diff --git a/.gitreview b/.gitreview index c03ed404a..ce54dba7f 100644 --- a/.gitreview +++ b/.gitreview @@ -13,4 +13,4 @@ host=review.opencontrail.org port=29418 project=Juniper/contrail-test.git -defaultbranch=master +defaultbranch=R3.0 diff --git a/build.xml b/build.xml deleted file mode 100644 index cf442aed7..000000000 --- a/build.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/common/config.py b/common/config.py deleted file mode 100644 index 20ae5f9f3..000000000 --- a/common/config.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import logging as std_logging -import os - -from oslo.config import cfg - -from common import log as logging - - -def register_opt_group(conf, opt_group, options): - conf.register_group(opt_group) - for opt in options: - conf.register_opt(opt, group=opt_group.name) - - -def register_opts(): - pass - - -# this should never be called outside of this class -class TestConfigPrivate(object): - """Provides OpenStack configuration information.""" - -# DEFAULT_CONFIG_DIR = os.path.join( -# os.path.abspath(os.path.dirname(os.path.dirname(__file__))), -# "etc") - DEFAULT_CONFIG_DIR = '.' - - DEFAULT_CONFIG_FILE = "logging.conf" - - def _set_attrs(self): - pass - - def __init__(self, parse_conf=True): - """Initialize a configuration from a conf directory and conf file.""" - super(TestConfigPrivate, self).__init__() - config_files = [] - failsafe_path = self.DEFAULT_CONFIG_FILE - - # Environment variables override defaults... - conf_dir = os.environ.get('TEST_CONFIG_DIR', - self.DEFAULT_CONFIG_DIR) - conf_file = os.environ.get('TEST_CONFIG_FILE', self.DEFAULT_CONFIG_FILE) - - path = os.path.join(conf_dir, conf_file) - - if not os.path.isfile(path): - path = failsafe_path - - # only parse the config file if we expect one to exist. This is needed - # to remove an issue with the config file up to date checker. - if parse_conf: - config_files.append(path) - - cfg.CONF([], project='contrailtest', default_config_files=config_files) - logging.setup('contrailtest') - LOG = logging.getLogger('contrailtest') - LOG.info("Using contrailtest config file %s" % path) - register_opts() - self._set_attrs() - if parse_conf: - cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) - - -class TestConfigProxy(object): - _config = None - - _extra_log_defaults = [ - 'keystoneclient.session=WARN', - 'paramiko.transport=WARN', - 'requests.packages.urllib3.connectionpool=WARN', - 'urllib3.connectionpool=WARN', - ] - - def _fix_log_levels(self): - """Tweak the oslo log defaults.""" - for opt in logging.log_opts: - if opt.dest == 'default_log_levels': - opt.default.extend(self._extra_log_defaults) - - def __getattr__(self, attr): - if not self._config: - self._fix_log_levels() - self._config = TestConfigPrivate() - - return getattr(self._config, attr) - - -CONF = TestConfigProxy() diff --git a/common/connections.py b/common/connections.py deleted file mode 100755 index 24de7c588..000000000 --- a/common/connections.py +++ /dev/null @@ -1,374 +0,0 @@ -from vnc_api_test import * -from tcutils.config.vnc_introspect_utils import * -from tcutils.config.svc_mon_introspect_utils import SvcMonInspect -from tcutils.control.cn_introspect_utils import * -from tcutils.agent.vna_introspect_utils import * -from tcutils.collector.opserver_introspect_utils import * -from tcutils.collector.analytics_tests import * -from vnc_api.vnc_api import * -from tcutils.vdns.dns_introspect_utils import DnsAgentInspect -from tcutils.config.ds_introspect_utils import * -from tcutils.config.discovery_tests import * -from tcutils.util import custom_dict -import os -from openstack import OpenstackAuth, OpenstackOrchestrator -from vcenter import VcenterAuth, VcenterOrchestrator -from common.contrail_test_init import ContrailTestInit - -try: - from webui.ui_login import UILogin -except ImportError: - pass - -class ContrailConnections(): - def __init__(self, inputs=None, logger=None, project_name=None, - username=None, password=None, domain_name=None, ini_file=None): - project_fq_name = [domain_name or 'default-domain', project_name] \ - if project_name else None - self.inputs = inputs or ContrailTestInit(ini_file, - project_fq_name=project_fq_name) - self.project_name = project_name or self.inputs.project_name - self.domain_name = domain_name or self.inputs.domain_name - self.username = username or self.inputs.stack_user - self.password = password or self.inputs.stack_password - self.logger = logger or self.inputs.logger - self.nova_h = None - self.quantum_h = None - self.api_server_inspects = custom_dict(self.get_api_inspect_handle, - 'api_inspect:'+self.project_name+':'+self.username) - self.dnsagent_inspect = custom_dict(self.get_dns_agent_inspect_handle, - 'dns_inspect') - self.agent_inspect = custom_dict(self.get_vrouter_agent_inspect_handle, - 'agent_inspect') - self.ops_inspects = custom_dict(self.get_opserver_inspect_handle, - 'ops_inspect') - self.cn_inspect = custom_dict(self.get_control_node_inspect_handle, - 'cn_inspect') - self.ds_inspect = custom_dict(self.get_discovery_service_inspect_handle, - 'ds_inspect') - - # ToDo: msenthil/sandipd rest of init needs to be better handled - self.vnc_lib = self.get_vnc_lib_h() - self.auth = self.get_auth_h() - if self.inputs.orchestrator == 'openstack': - self.project_id = self.get_project_id() - if self.inputs.verify_thru_gui(): - self.ui_login = UILogin(self, self.inputs, project_name, username, password) - self.browser = self.ui_login.browser - self.browser_openstack = self.ui_login.browser_openstack - - self.orch = OpenstackOrchestrator(username=self.username, - password=self.password, - project_id=self.project_id, - project_name=self.project_name, - inputs=self.inputs, - vnclib=self.vnc_lib, - logger=self.logger, - auth_server_ip=self.inputs.auth_ip) - self.nova_h = self.orch.get_compute_handler() - self.quantum_h = self.orch.get_network_handler() - else: # vcenter - self.orch = VcenterOrchestrator(user=self.username, - pwd=self.password, - host=self.inputs.auth_ip, - port=self.inputs.auth_port, - dc_name=self.inputs.vcenter_dc, - vnc=self.vnc_lib, - inputs=self.inputs, - logger=self.logger) - # end __init__ - - def get_project_id(self, project_name=None): - project_name = project_name or self.project_name - auth = self.get_auth_h(project_name) - return auth.get_project_id(project_name or self.project_name) - - def get_auth_h(self, refresh=False, project_name=None, - username=None, password=None): - project_name = project_name or self.project_name - username = username or self.username - password = password or self.password - attr = '_auth_'+project_name+'_'+username - if not getattr(env, attr, None) or refresh: - if self.inputs.orchestrator == 'openstack': - env[attr] = OpenstackAuth(username, password, - project_name, self.inputs, self.logger) - else: - env[attr] = VcenterAuth(username, password, - project_name, self.inputs) - return env[attr] - - def get_vnc_lib_h(self, refresh=False, project_name=None, - username=None, password=None): - project_name = project_name or self.project_name - username = username or self.username - password = password or self.password - attr = '_vnc_lib_'+project_name+'_'+username - if not getattr(env, attr, None) or refresh: - self.vnc_lib_fixture = VncLibFixture( - username=username, password=password, - domain=self.domain_name, project_name=project_name, - inputs = self.inputs, - cfgm_ip=self.inputs.cfgm_ip, - api_server_port=self.inputs.api_server_port, - auth_server_ip=self.inputs.auth_ip, - orchestrator=self.inputs.orchestrator, - logger=self.logger) - self.vnc_lib_fixture.setUp() - self.vnc_lib = self.vnc_lib_fixture.get_handle() - return self.vnc_lib - - def get_api_inspect_handle(self, host): - if host not in self.api_server_inspects: - self.api_server_inspects[host] = VNCApiInspect(host, - args=self.inputs, - logger=self.logger) - return self.api_server_inspects[host] - - def get_control_node_inspect_handle(self, host): - if host not in self.cn_inspect: - self.cn_inspect[host] = ControlNodeInspect(host, logger=self.logger) - return self.cn_inspect[host] - - def get_dns_agent_inspect_handle(self, host): - if host not in self.dnsagent_inspect: - self.dnsagent_inspect[host] = DnsAgentInspect(host, - logger=self.logger) - return self.dnsagent_inspect[host] - - def get_vrouter_agent_inspect_handle(self, host): - if host not in self.agent_inspect: - self.agent_inspect[host] = AgentInspect(host, logger=self.logger) - return self.agent_inspect[host] - - def get_opserver_inspect_handle(self, host): - #ToDo: WA till scripts are modified to use ip rather than hostname - ip = host if is_v4(host) else self.inputs.get_host_ip(host) - if ip not in self.ops_inspects: - self.ops_inspects[ip] = VerificationOpsSrv(ip, logger=self.logger) - return self.ops_inspects[ip] - - def get_discovery_service_inspect_handle(self, host): - if host not in self.ds_inspect: - self.ds_inspect[host] = VerificationDsSrv(host, logger=self.logger) - return self.ds_inspect[host] - - def get_svc_mon_h(self, refresh=False): - if not getattr(self, '_svc_mon_inspect', None) or refresh: - for cfgm_ip in self.inputs.cfgm_ips: - #contrail-status would increase run time hence netstat approach - cmd = 'netstat -antp | grep 8088 | grep LISTEN' - if self.inputs.run_cmd_on_server(cfgm_ip, cmd) is not None: - self._svc_mon_inspect = SvcMonInspect(cfgm_ip, - logger=self.logger) - break - return self._svc_mon_inspect - - @property - def api_server_inspect(self): - if not getattr(self, '_api_server_inspect', None): - self._api_server_inspect = self.api_server_inspects[ - self.inputs.cfgm_ips[0]] - return self._api_server_inspect - @api_server_inspect.setter - def api_server_inspect(self, value): - self._api_server_inspect = value - - @property - def ops_inspect(self): - if not getattr(self, '_ops_inspect', None): - self._ops_inspect = self.ops_inspects[self.inputs.collector_ips[0]] - return self._ops_inspect - @ops_inspect.setter - def ops_inspect(self, value): - self._ops_inspect = value - - @property - def analytics_obj(self): - if not getattr(self, '_analytics_obj', None): - self._analytics_obj = AnalyticsVerification(self.inputs, - self.cn_inspect, self.agent_inspect, - self.ops_inspects, logger=self.logger) - return self._analytics_obj - @analytics_obj.setter - def analytics_obj(self, value): - self._analytics_obj = value - - @property - def ds_verification_obj(self): - if not getattr(self, '_ds_verification_obj', None): - self._ds_verification_obj = DiscoveryVerification(self.inputs, - self.cn_inspect, self.agent_inspect, - self.ops_inspects, self.ds_inspect, - logger=self.logger) - return self._ds_verification_obj - @ds_verification_obj.setter - def ds_verification_obj(self, value): - self._ds_verification_obj = value - - def update_inspect_handles(self): - self.api_server_inspects.clear() - self.cn_inspect.clear() - self.dnsagent_inspect.clear() - self.agent_inspect.clear() - self.ops_inspects.clear() - self.ds_inspect.clear() - self.api_server_inspect = None - self.ops_inspect = None - self.ds_verification_obj = None - self._svc_mon_inspect = None - self._api_server_inspect = None - self._ops_inspect = None - self._analytics_obj = None - self._ds_verification_obj = None - # end update_inspect_handles - - def update_vnc_lib_fixture(self): - self.vnc_lib = self.get_vnc_lib_h(refresh=True) - # end update_vnc_lib_fixture() - - def set_vrouter_config_encap(self, encap1=None, encap2=None, encap3=None): - self.obj = self.vnc_lib - - try: - # Reading Existing config - current_config=self.obj.global_vrouter_config_read( - fq_name=['default-global-system-config', - 'default-global-vrouter-config']) - current_linklocal=current_config.get_linklocal_services() - except NoIdError as e: - self.logger.exception('No config id found. Creating new one') - current_linklocal='' - - encap_obj = EncapsulationPrioritiesType( - encapsulation=[encap1, encap2, encap3]) - conf_obj = GlobalVrouterConfig(linklocal_services=current_linklocal,encapsulation_priorities=encap_obj) - result = self.obj.global_vrouter_config_create(conf_obj) - return result - # end set_vrouter_config_encap - - def update_vrouter_config_encap(self, encap1=None, encap2=None, encap3=None): - '''Used to change the existing encapsulation priorities to new values''' - self.obj = self.vnc_lib - - try: - # Reading Existing config - current_config=self.obj.global_vrouter_config_read( - fq_name=['default-global-system-config', - 'default-global-vrouter-config']) - current_linklocal=current_config.get_linklocal_services() - except NoIdError as e: - self.logger.exception('No config id found. Creating new one') - current_linklocal='' - - encaps_obj = EncapsulationPrioritiesType( - encapsulation=[encap1, encap2, encap3]) - confs_obj = GlobalVrouterConfig(linklocal_services=current_linklocal, - encapsulation_priorities=encaps_obj) - result = self.obj.global_vrouter_config_update(confs_obj) - return result - # end update_vrouter_config_encap - - def delete_vrouter_encap(self): - self.obj = self.vnc_lib - try: - conf_id = self.obj.get_default_global_vrouter_config_id() - self.logger.info("Config id found.Deleting it") - config_parameters = self.obj.global_vrouter_config_read(id=conf_id) - self.inputs.config.obj = config_parameters.get_encapsulation_priorities( - ) - if not self.inputs.config.obj: - # temp workaround,delete default-global-vrouter-config.need to - # review this testcase - self.obj.global_vrouter_config_delete(id=conf_id) - errmsg = "No config id found" - self.logger.info(errmsg) - return (errmsg) - try: - encaps1 = self.inputs.config.obj.encapsulation[0] - encaps2 = self.inputs.config.obj.encapsulation[1] - try: - encaps1 = self.inputs.config.obj.encapsulation[0] - encaps2 = self.inputs.config.obj.encapsulation[1] - encaps3 = self.inputs.config.obj.encapsulation[2] - self.obj.global_vrouter_config_delete(id=conf_id) - return (encaps1, encaps2, encaps3) - except IndexError: - self.obj.global_vrouter_config_delete(id=conf_id) - return (encaps1, encaps2, None) - except IndexError: - self.obj.global_vrouter_config_delete(id=conf_id) - return (encaps1, None, None) - except NoIdError: - errmsg = "No config id found" - self.logger.info(errmsg) - return (errmsg) - # end delete_vrouter_encap - - def read_vrouter_config_encap(self): - result = None - try: - self.obj = self.vnc_lib - conf_id = self.obj.get_default_global_vrouter_config_id() - config_parameters = self.obj.global_vrouter_config_read(id=conf_id) - self.inputs.config.obj = config_parameters.get_encapsulation_priorities( - ) - result = self.inputs.config.obj.encapsulation - except NoIdError: - errmsg = "No config id found" - self.logger.info(errmsg) - return result - # end read_vrouter_config_encap - - def set_vrouter_config_evpn(self, evpn_status=True): - self.obj = self.vnc_lib - - # Check if already configured - try: - conf_id = self.obj.get_default_global_vrouter_config_id() - self.obj.global_vrouter_config_delete(id=conf_id) - except Exception: - msg = "No config id found. Configuring new one" - self.logger.info(msg) - pass - if evpn_status == True: - conf_obj = GlobalVrouterConfig(evpn_status=True) - else: - conf_obj = GlobalVrouterConfig(evpn_status=False) - result = self.obj.global_vrouter_config_create(conf_obj) - return result - # end set_vrouter_config_evpn - - def update_vrouter_config_evpn(self, evpn_status=True): - self.obj = self.vnc_lib - if evpn_status == True: - conf_obj = GlobalVrouterConfig(evpn_status=True) - else: - conf_obj = GlobalVrouterConfig(evpn_status=False) - result = self.obj.global_vrouter_config_update(conf_obj) - return result - # end update_vrouter_config_evpn - - def delete_vrouter_config_evpn(self): - try: - self.obj = self.vnc_lib - conf_id = self.obj.get_default_global_vrouter_config_id() - self.obj.global_vrouter_config_delete(id=conf_id) - except NoIdError: - errmsg = "No config id found" - self.logger.info(errmsg) - # end delete_vrouter_config_evpn - - def read_vrouter_config_evpn(self): - result = False - try: - self.obj = self.vnc_lib - conf_id = self.obj.get_default_global_vrouter_config_id() - out = self.obj.global_vrouter_config_read(id=conf_id) - if 'evpn_status' in out.__dict__.keys(): - result = out.evpn_status - except NoIdError: - errmsg = "No config id found" - self.logger.info(errmsg) - return result - # end read_vrouter_config_evpn diff --git a/common/contrail_test_init.py b/common/contrail_test_init.py deleted file mode 100755 index d8fb6bfb2..000000000 --- a/common/contrail_test_init.py +++ /dev/null @@ -1,1010 +0,0 @@ -import os -import re -import sys -import json -import time -import socket -import getpass -import ConfigParser -import ast -from netaddr import * - -import fixtures -from fabric.api import env, run, local -from fabric.operations import get, put, reboot -from fabric.context_managers import settings, hide -from fabric.exceptions import NetworkError -from fabric.contrib.files import exists - -from tcutils.util import * -from tcutils.util import custom_dict, read_config_option -from tcutils.custom_filehandler import * -from tcutils.config.vnc_introspect_utils import VNCApiInspect -from tcutils.config.ds_introspect_utils import VerificationDsSrv -from keystone_tests import KeystoneCommands -from tempfile import NamedTemporaryFile -import re - -import subprocess -import ast -from collections import namedtuple - -# monkey patch subprocess.check_output cos its not supported in 2.6 -if "check_output" not in dir(subprocess): # duck punch it in! - def f(*popenargs, **kwargs): - if 'stdout' in kwargs: - raise ValueError( - 'stdout argument not allowed, it will be overridden.') - process = subprocess.Popen( - stdout=subprocess.PIPE, - *popenargs, - **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise subprocess.CalledProcessError(retcode, cmd) - return output - subprocess.check_output = f - -class TestInputs(object): - ''' - Class that would populate testbedinfo from parsing the - .ini and .json input files if provided (or) - check the keystone and discovery servers to populate - the same with the certain default value assumptions - ''' - __metaclass__ = Singleton - def __init__(self, ini_file=None): - self.api_server_port = '8082' - self.bgp_port = '8083' - self.ds_port = '5998' - self.jenkins_trigger = self.get_os_env('JENKINS_TRIGGERED') - self.os_type = custom_dict(self.get_os_version, 'os_type') - self.config = None - if ini_file: - self.config = ConfigParser.ConfigParser() - self.config.read(ini_file) - self.orchestrator = read_config_option(self.config, - 'Basic', 'orchestrator', 'openstack') - self.prov_file = read_config_option(self.config, - 'Basic', 'provFile', None) - self.key = read_config_option(self.config, - 'Basic', 'key', 'key1') - self.stack_user = read_config_option( - self.config, - 'Basic', - 'stackUser', - os.getenv('OS_USERNAME', 'admin')) - self.stack_password = read_config_option( - self.config, - 'Basic', - 'stackPassword', - os.getenv('OS_PASSWORD', 'contrail123')) - self.stack_tenant = read_config_option( - self.config, - 'Basic', - 'stackTenant', - os.getenv('OS_TENANT_NAME', 'admin')) - self.stack_domain = read_config_option( - self.config, - 'Basic', - 'stackDomain', - os.getenv('OS_DOMAIN_NAME', 'default-domain')) - self.endpoint_type = read_config_option( - self.config, - 'Basic', - 'endpoint_type', - 'publicURL') - self.auth_ip = read_config_option(self.config, - 'Basic', 'auth_ip', None) - self.auth_port = read_config_option(self.config, - 'Basic', 'auth_port', None) - self.multi_tenancy = read_config_option(self.config, - 'Basic', 'multiTenancy', False) - self.enable_ceilometer = read_config_option(self.config, - 'Basic', 'enable_ceilometer', False) - self.fixture_cleanup = read_config_option( - self.config, - 'Basic', - 'fixtureCleanup', - 'yes') - - self.http_proxy = read_config_option(self.config, - 'proxy', 'proxy_url', None) - self.ui_config = read_config_option(self.config, - 'ui', 'ui_config', None) - self.ui_browser = read_config_option(self.config, - 'ui', 'ui_browser', None) - self.verify_webui = read_config_option(self.config, - 'ui', 'webui', False) - self.verify_horizon = read_config_option(self.config, - 'ui', 'horizon', False) - if not self.ui_browser and (self.verify_webui or self.verify_horizon): - raise ValueError( - "Verification via GUI needs 'browser' details. Please set the same.") - self.devstack = read_config_option(self.config, - 'devstack', 'devstack', None) - self.use_devicemanager_for_md5 = read_config_option( - self.config, 'use_devicemanager_for_md5', 'use_devicemanager_for_md5', False) - # router options - self.mx_rt = read_config_option(self.config, - 'router', 'route_target', '10003') - self.router_asn = read_config_option(self.config, - 'router', 'asn', '64512') - router_info_tuples_string = read_config_option( - self.config, - 'router', - 'router_info', - '[]') - self.ext_routers = ast.literal_eval(router_info_tuples_string) - self.fip_pool_name = read_config_option( - self.config, - 'router', - 'fip_pool_name', - 'public-pool') - self.fip_pool = read_config_option(self.config, - 'router', 'fip_pool', None) - if self.fip_pool: - update_reserve_cidr(self.fip_pool) - self.public_vn = read_config_option( - self.config, - 'router', - 'public_virtual_network', - 'public-network') - self.public_tenant = read_config_option( - self.config, - 'router', - 'public_tenant_name', - 'public-tenant') - - # HA setup IPMI username/password - self.ha_setup = read_config_option(self.config, 'HA', 'ha_setup', None) - - if self.ha_setup == True: - self.ipmi_username = read_config_option( - self.config, - 'HA', - 'ipmi_username', - 'ADMIN') - self.ipmi_password = read_config_option( - self.config, - 'HA', - 'ipmi_password', - 'ADMIN') - # debug option - self.verify_on_setup = read_config_option( - self.config, - 'debug', - 'verify_on_setup', - True) - self.stop_on_fail = bool( - read_config_option( - self.config, - 'debug', - 'stop_on_fail', - None)) - - self.ha_tmp_list = [] - self.tor_agent_data = {} - self.mysql_token = None - - self.public_host = read_config_option(self.config, 'Basic', - 'public_host', '10.204.216.50') - - self.prov_file = self.prov_file or self._create_prov_file() - self.prov_data = self.read_prov_file() - #vcenter server - self.vcenter_dc = read_config_option( - self.config, 'vcenter', 'vcenter_dc', None) - self.vcenter_server = read_config_option( - self.config, 'vcenter', 'vcenter_server', None) - self.vcenter_port = read_config_option( - self.config, 'vcenter', 'vcenter_port', None) - self.vcenter_username = read_config_option( - self.config, 'vcenter', 'vcenter_username', None) - self.vcenter_password = read_config_option( - self.config, 'vcenter', 'vcenter_password', None) - self.vcenter_compute = read_config_option( - self.config, 'vcenter', 'vcenter_compute', None) - if 'vcenter' in self.prov_data.keys(): - try: - self.dv_switch = self.prov_data['vcenter'][0]['dv_switch']['dv_switch_name'] - except Exception as e: - pass - if self.ha_setup == True: - self.update_etc_hosts_for_vip() - - self.username = self.host_data[self.cfgm_ip]['username'] - self.password = self.host_data[self.cfgm_ip]['password'] - # List of service correspond to each module - self.compute_services = [ - 'contrail-vrouter-agent', - 'supervisor-vrouter', - 'contrail-vrouter-nodemgr'] - self.control_services = ['contrail-control', 'supervisor-control', - 'contrail-control-nodemgr', 'contrail-dns', - 'contrail-named'] - self.cfgm_services = [ - 'contrail-api', - 'contrail-schema', - 'contrail-discovery', - 'supervisor-config', - 'contrail-config-nodemgr', - 'contrail-device-manager'] - self.webui_services = ['contrail-webui', 'contrail-webui-middleware', - 'supervisor-webui'] - self.openstack_services = [ - 'openstack-cinder-api', 'openstack-cinder-scheduler', - 'openstack-cinder-scheduler', 'openstack-glance-api', - 'openstack-glance-registry', 'openstack-keystone', - 'openstack-nova-api', 'openstack-nova-scheduler', - 'openstack-nova-cert'] - self.collector_services = [ - 'contrail-collector', 'contrail-analytics-api', - 'contrail-query-engine', 'contrail-analytics-nodemgr', - 'supervisor-analytics', - 'contrail-snmp-collector', 'contrail-topology'] - self.correct_states = ['active', 'backup'] - - def get_os_env(self, var, default=''): - if var in os.environ: - return os.environ.get(var) - else: - return default - # end get_os_env - - def get_os_version(self, host_ip): - ''' - Figure out the os type on each node in the cluster - ''' - if host_ip in self.os_type: - return self.os_type[host_ip] - username = self.host_data[host_ip]['username'] - password = self.host_data[host_ip]['password'] - with settings(host_string='%s@%s' % (username, host_ip), - password=password, warn_only=True, - abort_on_prompts=False): - output = run('uname -a') - if 'el6' in output: - self.os_type[host_ip] = 'centos_el6' - elif 'fc17' in output: - self.os_type[host_ip] = 'fc17' - elif 'xen' in output: - self.os_type[host_ip] = 'xenserver' - elif 'Ubuntu' in output: - self.os_type[host_ip] = 'ubuntu' - elif 'el7' in output: - self.os_type[host_ip] = 'redhat' - else: - raise KeyError('Unsupported OS') - return self.os_type[host_ip] - # end get_os_version - - def read_prov_file(self): - prov_file = open(self.prov_file, 'r') - prov_data = prov_file.read() - #json_data = json.loads(prov_data) - json_data = ast.literal_eval(prov_data) - self.host_names = [] - self.cfgm_ip = '' - self.cfgm_ips = [] - self.cfgm_control_ips = [] - self.cfgm_names = [] - self.collector_ips = [] - self.collector_control_ips = [] - self.collector_names = [] - self.database_ips = [] - self.database_names = [] - self.database_control_ips = [] - self.compute_ips = [] - self.compute_names = [] - self.compute_control_ips = [] - self.compute_info = {} - self.bgp_ips = [] - self.bgp_control_ips = [] - self.bgp_names = [] - self.ds_server_ip = [] - self.ds_server_name = [] - self.host_ips = [] - self.webui_ips = [] - self.host_data = {} - self.tor = {} - self.tor_hosts_data = {} - self.physical_routers_data = {} - - self.esxi_vm_ips = {} - self.vgw_data = {} - self.vip = {} - for host in json_data['hosts']: - host['name'] = host['name'] - self.host_names.append(host['name']) - host_ip = str(IPNetwork(host['ip']).ip) - host_data_ip = str(IPNetwork(host['data-ip']).ip) - host_control_ip = str(IPNetwork(host['control-ip']).ip) - self.host_ips.append(host_ip) - self.host_data[host_ip] = host - self.host_data[host_data_ip] = host - self.host_data[host_control_ip] = host - self.host_data[host['name']] = host - self.host_data[host['name']]['host_ip'] = host_ip - self.host_data[host['name']]['host_data_ip'] = host_data_ip - self.host_data[host['name']]['host_control_ip'] = host_control_ip - roles = host["roles"] - for role in roles: - if role['type'] == 'openstack': - if self.auth_ip: - if self.ha_setup == True: - self.openstack_ip = host_ip - else: - self.openstack_ip = self.auth_ip - else: - self.openstack_ip = host_ip - self.auth_ip = host_ip - if role['type'] == 'cfgm': - self.cfgm_ip = host_ip - self.cfgm_ips.append(host_ip) - self.cfgm_control_ips.append(host_control_ip) - self.cfgm_control_ip = host_control_ip - self.cfgm_names.append(host['name']) - self.ds_server_ip.append(host_ip) - self.ds_server_name.append(host['name']) - self.masterhost = self.cfgm_ip - self.hostname = host['name'] - if role['type'] == 'compute': - self.compute_ips.append(host_ip) - self.compute_names.append(host['name']) - self.compute_info[host['name']] = host_ip - self.compute_control_ips.append(host_control_ip) - if role['type'] == 'bgp': - - self.bgp_ips.append(host_ip) - self.bgp_control_ips.append(host_control_ip) - self.bgp_names.append(host['name']) -# if role['type'] == 'collector' : -# self.collector_ip= host_ip - if role['type'] == 'webui': - self.webui_ip = host_ip - self.webui_ips.append(host_ip) - if role['type'] == 'collector': - self.collector_ip = host_ip - self.collector_ips.append(host_ip) - self.collector_control_ips.append(host_control_ip) - self.collector_names.append(host['name']) - if role['type'] == 'database': - self.database_ip = host_ip - self.database_ips.append(host_ip) - self.database_names.append(host['name']) - self.database_control_ips.append(host_control_ip) - # end for - # end for - if self.ha_setup == True: - self.vip['keystone'] = self.auth_ip - self.vip['contrail'] = self.auth_ip - - if 'vgw' in json_data: - self.vgw_data = json_data['vgw'] - - if 'tor_agent' in json_data: - self.tor_agent_data = json_data['tor_agent'] - - if 'tor_hosts' in json_data: - self.tor_hosts_data = json_data['tor_hosts'] - - if 'physical_routers' in json_data: - self.physical_routers_data = json_data['physical_routers'] - self._process_tor_data() - - if 'esxi_vms' in json_data: - self.esxi_vm_ips = json_data['esxi_vms'] - if 'hosts_ipmi' in json_data: - self.hosts_ipmi = json_data['hosts_ipmi'] - - return json_data - # end read_prov_file - - def _process_tor_data(self): - for (device_name, device_dict) in self.physical_routers_data.iteritems(): - device_dict['tor_agents'] = [] - device_dict['tor_agent_dicts'] = [] - device_dict['tor_tsn_ips'] = [] - for (host_str, ta_list) in self.tor_agent_data.iteritems(): - for ta in ta_list: - if ta['tor_name'] == device_dict['name']: - ta['tor_agent_host_string'] = host_str - device_dict['tor_ovs_port'] = ta['tor_ovs_port'] - device_dict['tor_ovs_protocol'] = ta[ - 'tor_ovs_protocol'] - device_dict['tor_agents'].append('%s:%s' % (host_str, - ta['tor_agent_id'])) - device_dict['tor_agent_dicts'].append(ta) - device_dict['tor_tsn_ips'].append(ta['tor_tsn_ip']) - if self.ha_setup == True: - device_dict['controller_ip'] = self.vip['contrail'] - else: - device_dict['controller_ip'] = ta['tor_tsn_ip'] - - # end _process_tor_data - - def get_host_ip(self, name): - ip = self.host_data[name]['host_ip'] - if ip in self.ha_tmp_list: - ip = self.vip['contrail'] - return ip - - def get_host_data_ip(self, name): - ip = self.host_data[name]['host_data_ip'] - if ip in self.ha_tmp_list: - ip = self.vip['contrail'] - return ip - - def get_node_name(self, ip): - return self.host_data[ip]['name'] - - def update_etc_hosts_for_vip(self): - contrail_vip_name = "contrail-vip" - for host in self.host_ips: - cmd = 'if ! grep -Rq "contrail-vip" /etc/hosts; then echo "%s %s" >> /etc/hosts; fi' % ( - self.vip['contrail'], contrail_vip_name) - self.run_cmd_on_server(host, cmd) - if self.vip['contrail'] != self.vip['keystone']: - keystone_vip_name = "keystone-vip" - cmd = 'echo "%s %s" >> /etc/hosts' % ( - self.vip['keystone'], keystone_vip_name) - self.run_cmd_on_server(host, cmd) - - def get_computes(self, cfgm_ip): - kwargs = {'stack_user': self.stack_user, - 'stack_password': self.stack_password, - 'project_name': self.stack_tenant, - 'openstack_ip': self.auth_ip} - api_h = VNCApiInspect(cfgm_ip, args=type('', (), kwargs)) - return api_h.get_computes() - - def _create_prov_file(self): - ''' Creates json data for a single node only. - Optional Env variables: - openstack creds: - * OS_USERNAME (default: admin) - * OS_PASSWORD (default: contrail123) - * OS_TENANT_NAME (default: admin) - * OS_DOMAIN_NAME (default: default-domain) - * OS_AUTH_URL (default: http://127.0.0.1:5000/v2.0) - * OS_INSECURE (default: True) - login creds: - * USERNAME (default: root) - * PASSWORD (default: c0ntrail123) - contrail service: - * DISCOVERY_IP (default: neutron-server ip fetched from keystone endpoint) - ''' - pattern = 'http[s]?://(?P\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}):(?P\d+)' - if self.orchestrator.lower() != 'openstack': - raise Exception('Please specify testbed info in $PARAMS_FILE ' - 'under "Basic" section, keyword "provFile"') - if self.orchestrator.lower() == 'openstack': - auth_url = os.getenv('OS_AUTH_URL', None) or \ - 'http://127.0.0.1:5000/v2.0' - insecure = bool(os.getenv('OS_INSECURE', True)) - keystone = KeystoneCommands(self.stack_user, - self.stack_password, - self.stack_tenant, - auth_url, - insecure=insecure) - match = re.match(pattern, keystone.get_endpoint('identity')[0]) - self.auth_ip = match.group('ip') - self.auth_port = match.group('port') - - # Assume contrail-config runs in the same node as neutron-server - discovery = os.getenv('DISCOVERY_IP', None) or \ - (keystone and re.match(pattern, - keystone.get_endpoint('network')[0]).group('ip')) - ds_client = VerificationDsSrv(discovery) - services = ds_client.get_ds_services().info - cfgm = database = services['config'] - collector = services['analytics'] - bgp = services['control-node'] - openstack = [self.auth_ip] if self.auth_ip else [] - computes = self.get_computes(cfgm[0]) - data = {'hosts': list()} - hosts = cfgm + database + collector + bgp + computes + openstack - username = os.getenv('USERNAME', 'root') - password = os.getenv('PASSWORD', 'c0ntrail123') - for host in set(hosts): - with settings(host_string='%s@%s' % (username, host), - password=password, warn_only=True): - hname = run('hostname') - hdict = {'ip': host, - 'data-ip': host, - 'control-ip': host, - 'username': username, - 'password': password, - 'name': hname, - 'roles': [], - } - if host in cfgm: - hdict['roles'].append({'type': 'cfgm'}) - if host in collector: - hdict['roles'].append({'type': 'collector'}) - if host in database: - hdict['roles'].append({'type': 'database'}) - if host in bgp: - hdict['roles'].append({'type': 'bgp'}) - if host in computes: - hdict['roles'].append({'type': 'compute'}) - if host in openstack: - hdict['roles'].append({'type': 'openstack'}) - data['hosts'].append(hdict) - tempfile = NamedTemporaryFile(delete=False) - with open(tempfile.name, 'w') as fd: - json.dump(data, fd) - return tempfile.name - # end _create_prov_data - - def get_mysql_token(self): - if self.mysql_token: - return self.mysql_token - if self.orchestrator == 'vcenter': - return None - if self.devstack: - return 'contrail123' - username = self.host_data[self.openstack_ip]['username'] - password = self.host_data[self.openstack_ip]['password'] - cmd = 'cat /etc/contrail/mysql.token' - with hide('everything'): - with settings( - host_string='%s@%s' % (username, self.openstack_ip), - password=password, warn_only=True, abort_on_prompts=False): - if not exists('/etc/contrail/mysql.token'): - return None - self.mysql_token = self.run_cmd_on_server( - self.openstack_ip, - cmd, - username, - password) - return self.mysql_token - # end get_mysql_token - - def run_cmd_on_server(self, server_ip, issue_cmd, username=None, - password=None, pty=True): - if server_ip in self.host_data.keys(): - if not username: - username = self.host_data[server_ip]['username'] - if not password: - password = self.host_data[server_ip]['password'] - with hide('everything'): - with settings( - host_string='%s@%s' % (username, server_ip), password=password, - warn_only=True, abort_on_prompts=False): - output = run('%s' % (issue_cmd), pty=pty) - return output - # end run_cmd_on_server - - -class ContrailTestInit(object): - def __getattr__(self, attr): - return getattr(self.inputs, attr) - - def __init__( - self, - ini_file=None, - stack_user=None, - stack_password=None, - project_fq_name=None, - logger=None): - self.connections = None - self.logger = logger or logging.getLogger(__name__) - self.inputs = TestInputs(ini_file) - self.stack_user = stack_user or self.stack_user - self.stack_password = stack_password or self.stack_password - self.project_fq_name = project_fq_name or \ - [self.stack_domain, self.stack_tenant] - self.project_name = self.project_fq_name[1] - self.domain_name = self.project_fq_name[0] - # Possible af values 'v4', 'v6' or 'dual' - # address_family = read_config_option(self.config, - # 'Basic', 'AddressFamily', 'dual') - self.address_family = 'v4' - # end __init__ - - def set_af(self, af): - self.address_family = af - - def get_af(self): - return self.address_family - - def verify_thru_gui(self): - ''' - Check if GUI based verification is enabled - ''' - if self.ui_browser: - return True - return False - - def is_gui_based_config(self): - ''' - Check if objects have to configured via GUI - ''' - if self.ui_config: - return self.ui_config - return False - - def verify_state(self): - result = True - for host in self.host_ips: - username = self.host_data[host]['username'] - password = self.host_data[host]['password'] - if host in self.compute_ips: - for service in self.compute_services: - result = result and self.verify_service_state( - host, - service, - username, - password) - if host in self.bgp_ips: - for service in self.control_services: - result = result and self.verify_service_state( - host, - service, - username, - password) - if host in self.cfgm_ips: - for service in self.cfgm_services: - result = result and self.verify_service_state( - host, - service, - username, - password) - if host in self.collector_ips: - for service in self.collector_services: - result = result and self.verify_service_state( - host, - service, - username, - password) - if host in self.webui_ips: - for service in self.webui_services: - result = result and self.verify_service_state( - host, - service, - username, - password) - # Need to enhance verify_service_state to verify openstack services status as well - # Commenting out openstack service verifcation untill then - # if host == self.openstack_ip: - # for service in self.openstack_services: - # result = result and self.verify_service_state( - # host, - # service, - # username, - # password) - return result - # end verify_state - - def get_service_status(self, m, service): - Service = namedtuple('Service', 'name state') - for keys, values in m.items(): - values = values[0].rstrip().split() - if service in str(values): - cls = Service(values[0], values[1]) - self.logger.info("\n%s:%s" % (cls.name, cls.state)) - return cls - return None - - def verify_service_state(self, host, service, username, password): - m = None - cls = None - try: - m = self.get_contrail_status(host) - cls = self.get_service_status(m, service) - if (cls.state in self.correct_states): - return True - except Exception as e: - self.logger.exception("Got exception as %s" % (e)) - self.logger.exception( - "Service %s not in correct state - its in %s state" % - (cls.name, cls.state)) - return False - self.logger.exception( - "Service %s not in correct state - its in %s state" % - (cls.name, cls.state)) - return False - - def verify_control_connection(self, connections): - discovery = connections.ds_verification_obj - return discovery.verify_bgp_connection() - # end verify_control_connection - - def build_compute_to_control_xmpp_connection_dict(self, connections): - agent_to_control_dct = {} - for ip in self.compute_ips: - actual_bgp_peer = [] - inspect_h = connections.agent_inspect[ip] - agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status() - for i in xrange(len(agent_xmpp_status)): - actual_bgp_peer.append(agent_xmpp_status[i]['controller_ip']) - agent_to_control_dct[ip] = actual_bgp_peer - return agent_to_control_dct - # end build_compute_to_control_xmpp_connection_dict - - def reboot(self, server_ip): - i = socket.gethostbyaddr(server_ip)[0] - print "rebooting %s" % i - if server_ip in self.host_data.keys(): - username = self.host_data[server_ip]['username'] - password = self.host_data[server_ip]['password'] - with hide('everything'): - with settings( - host_string='%s@%s' % (username, server_ip), password=password, - warn_only=True, abort_on_prompts=False): - reboot(wait=300) - run('date') - # end reboot - - @retry(delay=10, tries=10) - def confirm_service_active(self, service_name, host): - cmd = 'contrail-status | grep %s | grep " active "' % (service_name) - output = self.run_cmd_on_server( - host, cmd, self.host_data[host]['username'], - self.host_data[host]['password']) - if output is not None: - return True - else: - return False - # end confirm_service_active - - def restart_service( - self, - service_name, - host_ips=[], - contrail_service=True): - result = True - if len(host_ips) == 0: - host_ips = self.host_ips - for host in host_ips: - username = self.host_data[host]['username'] - password = self.host_data[host]['password'] - self.logger.info('Restarting %s.service in %s' % - (service_name, self.host_data[host]['name'])) - if contrail_service: - issue_cmd = 'service %s restart' % (service_name) - else: - issue_cmd = 'service %s restart' % (service_name) - self.run_cmd_on_server( - host, issue_cmd, username, password, pty=False) - assert self.confirm_service_active(service_name, host), \ - "Service Restart failed for %s" % (service_name) - # end restart_service - - def stop_service(self, service_name, host_ips=[], contrail_service=True): - result = True - if len(host_ips) == 0: - host_ips = self.host_ips - for host in host_ips: - username = self.host_data[host]['username'] - password = self.host_data[host]['password'] - self.logger.info('Stoping %s.service in %s' % - (service_name, self.host_data[host]['name'])) - if contrail_service: - issue_cmd = 'service %s stop' % (service_name) - else: - issue_cmd = 'service %s stop' % (service_name) - self.run_cmd_on_server( - host, issue_cmd, username, password, pty=False) - # end stop_service - - def start_service(self, service_name, host_ips=[], contrail_service=True): - result = True - if len(host_ips) == 0: - host_ips = self.host_ips - for host in host_ips: - username = self.host_data[host]['username'] - password = self.host_data[host]['password'] - self.logger.info('Starting %s.service in %s' % - (service_name, self.host_data[host]['name'])) - if contrail_service: - issue_cmd = 'service %s start' % (service_name) - else: - issue_cmd = 'service %s start' % (service_name) - self.run_cmd_on_server( - host, issue_cmd, username, password, pty=False) - # end start_service - - def _compare_service_state( - self, host, service, state, state_val, active_str1, active_str1_val, - active_str2, active_str2_val): - result = False - if 'xen' in self.os_type[host] or 'centos' in self.os_type[host]: - if active_str2 != active_str2_val: - result = False - self.logger.warn( - 'On host %s,Service %s state is (%s) .. NOT Expected !!' % - (host, service, active_str2)) - elif 'fc' in self.os_type[host]: - if (state, - active_str1, - active_str2) != (state_val, - active_str1_val, - active_str2_val): - result = False - self.logger.warn( - 'On host %s,Service %s states are %s, %s, %s .. NOT Expected !!' % - (host, service, state, active_str1, active_str2)) - return result - # end _compare_service_state - - def get_contrail_status(self, server_ip, username='root', - password='contrail123'): - cache = self.run_cmd_on_server(server_ip, 'contrail-status') - m = dict([(n, tuple(l.split(';'))) - for n, l in enumerate(cache.split('\n'))]) - return m - - def run_provision_control( - self, - router_asn, - api_server_ip, - api_server_port, - oper): - - username = self.host_data[self.cfgm_ip]['username'] - password = self.host_data[self.cfgm_ip]['password'] - bgp_ips = set(self.bgp_ips) - for host in bgp_ips: - host_name = self.host_data[host]['name'] - issue_cmd = "python /opt/contrail/utils/provision_control.py \ - --host_name '%s' --host_ip '%s' --router_asn '%s' \ - --api_server_ip '%s' --api_server_port '%s' --oper '%s'" % (host_name, - host, - router_asn, - api_server_ip, - api_server_port, - oper) - - output = self.run_cmd_on_server( - self.cfgm_ip, issue_cmd, username, password) - if output.return_code != 0: - self.logger.exception('Fail to execute provision_control.py') - return output - - # end run_provision_control - - def run_provision_mx( - self, - api_server_ip, - api_server_port, - router_name, - router_ip, - router_asn, - oper): - - username = self.host_data[self.cfgm_ip]['username'] - password = self.host_data[self.cfgm_ip]['password'] - issue_cmd = "python /opt/contrail/utils/provision_mx.py \ - --api_server_ip '%s' --api_server_port '%s' \ - --router_name '%s' --router_ip '%s' \ - --router_asn '%s' --oper '%s'" % ( - api_server_ip, api_server_port, - router_name, router_ip, router_asn, oper) - output = self.run_cmd_on_server( - self.cfgm_ip, issue_cmd, username, password) - if output.return_code != 0: - self.logger.exception('Fail to execute provision_mx.py') - return output - # end run_provision_mx - - def config_route_target( - self, - routing_instance_name, - route_target_number, - router_asn, - api_server_ip, - api_server_port): - - username = self.host_data[self.cfgm_ip]['username'] - password = self.host_data[self.cfgm_ip]['password'] - issue_cmd = "python /opt/contrail/utils/add_route_target.py \ - --routing_instance_name '%s' --route_target_number '%s' \ - --router_asn '%s' --api_server_ip '%s' --api_server_port '%s'" % ( - routing_instance_name, route_target_number, - router_asn, api_server_ip, api_server_port) - - output = self.run_cmd_on_server( - self.cfgm_ip, issue_cmd, username, password) - if output.return_code != 0: - self.logger.exception('Fail to execute add_route_target.py') - return output - # end config_route_target - - def configure_mx( - self, - tunnel_name, - bgp_group, - cn_ip, - mx_ip, - mx_rt, - mx_as, - mx_user, - mx_password, - ri_name, - intf, - vrf_target, - ri_gateway): - - host_ip_with_subnet = "%s/32" % (cn_ip) - - # Initializing list of command need to be configured in MX - command_to_push = ['configure'] - - # Populating the required command - ##command_to_push.append("set routing-options dynamic-tunnels tunnel_name source-address %s" %(mx_ip.split('/')[0])) - #command_to_push.append("set routing-options dynamic-tunnels %s source-address %s" %(tunnel_name,mx_ip)) - #command_to_push.append("set routing-options dynamic-tunnels %s gre" % (tunnel_name ) ) - #command_to_push.append("set routing-options dynamic-tunnels %s destination-networks %s" % (tunnel_name,host_ip_with_subnet)) - #command_to_push.append("set protocols bgp group %s type internal" % (bgp_group)) - ##command_to_push.append("set protocols bgp group %s local-address %s" %(bgp_group,mx_ip.split('/')[0])) - #command_to_push.append("set protocols bgp group %s local-address %s" %(bgp_group,mx_ip)) - #command_to_push.append("set protocols bgp group %s family inet-vpn unicast" % (bgp_group)) - #command_to_push.append("set protocols bgp group %s neighbor %s" % (bgp_group,cn_ip)) - #command_to_push.append("set routing-instances %s instance-type vrf" % (ri_name)) - #command_to_push.append("set routing-instances %s interface %s" %(ri_name, intf)) - #command_to_push.append("set routing-instances %s vrf-target %s:%s:%s" %(ri_name, vrf_target,mx_as,mx_rt)) - #command_to_push.append("set routing-instances %s vrf-table-label" %(ri_name)) - #command_to_push.append("set routing-instances %s routing-options static route 0.0.0.0/0 next-hop %s" %(ri_name, ri_gateway)) - # command_to_push.append("commit") - - print "Final commad will be pushed to MX" - print "%s" % command_to_push - - # for command in command_to_push: - # output = self.run_cmd_on_server(mx_ip,command,mx_user,mx_password) - # if output.return_code != 0: - # self.logger.exception('Fail to configure MX') - # return output - command_to_push_string = ";".join(command_to_push) - output = self.run_cmd_on_server( - mx_ip, command_to_push_string, mx_user, mx_password) - - # end configure_mx - - def unconfigure_mx(self, tunnel_name, bgp_group): - - # Initializing list of command need to be configured in MX - command_to_push = ['configure'] - - # Populating the required command - command_to_push.append( - "delete routing-options dynamic-tunnels %s gre" % (tunnel_name)) - command_to_push.append("delete protocols bgp group %s" % (bgp_group)) - command_to_push.append("commit") - - print "Final commad will be pushed to MX" - print "%s" % command_to_push - - for command in command_to_push: - output = self.run_cmd_on_server( - mx_ip, command, mx_user, mx_password) - if output.return_code != 0: - self.logger.exception('Fail to unconfigure MX') - return output - # end unconfigure_mx - - def get_openstack_release(self): - with settings( - host_string='%s@%s' % ( - self.username, self.cfgm_ips[0]), - password=self.password, warn_only=True, abort_on_prompts=False, debug=True): - ver = run('contrail-version') - pkg = re.search(r'contrail-install-packages(.*)~(\w+)(.*)', ver) - os_release = pkg.group(2) - self.logger.info("%s" % os_release) - return os_release - # end get_openstack_release - - def copy_file_to_server(self, ip, src, dstdir, dst, force=False): - host = {} - host['ip'] = ip - host['username'] = self.host_data[ip]['username'] - host['password'] = self.host_data[ip]['password'] - copy_file_to_server(host, src, dstdir, dst, force) diff --git a/common/create_public_vn.py b/common/create_public_vn.py deleted file mode 100644 index 2c938deef..000000000 --- a/common/create_public_vn.py +++ /dev/null @@ -1,120 +0,0 @@ -import project_test -from common.contrail_test_init import ContrailTestInit -from common.connections import ContrailConnections -import os -import fixtures -from test import BaseTestCase -import time -from floating_ip import * -from vn_test import * -from control_node import * -from common import isolated_creds -from tcutils.util import Singleton - - -class PublicVn(fixtures.Fixture): - __metaclass__ = Singleton - - def __init__(self, user, password, inputs, ini_file = None ,logger = None, mx_rt = None): - -# self.project_name = project_name - self.user_name = user - self.password = password - self.inputs = inputs - self.ini_file = ini_file - self.logger = logger - self.public_vn = self.inputs.public_vn - self.public_tenant = self.inputs.public_tenant - self.setUp() - self.create_public_vn(mx_rt) - self.create_floatingip_pool() - self.configure_control_nodes() - - def setUp(self): - super(PublicVn, self).setUp() - self.isolated_creds = isolated_creds.IsolatedCreds(self.public_tenant, \ - self.inputs, ini_file = self.ini_file, \ - logger = self.logger, - username=self.user_name, - password=self.password) - self.isolated_creds.setUp() - self.project = self.isolated_creds.create_tenant() - self.isolated_creds.create_and_attach_user_to_tenant() - self.inputs = self.isolated_creds.get_inputs() - self.connections = self.isolated_creds.get_conections() - self.isolated_creds.create_and_attach_user_to_tenant(self.user_name,self.password) - self.project.set_sec_group_for_allow_all(\ - self.public_tenant, 'default') - - def create_public_vn(self,mx_rt = None): - if (('MX_GW_TEST' in os.environ) and ( - os.environ.get('MX_GW_TEST') == '1')): - fip_pool_name = self.inputs.fip_pool_name - fvn_name = self.public_vn - fip_subnets = [self.inputs.fip_pool] - if not mx_rt: - mx_rt = self.inputs.mx_rt - self.public_vn_fixture = self.useFixture( - VNFixture( - project_name=self.project.project_name, - connections=self.connections, - vn_name=fvn_name, - inputs=self.inputs, - subnets=fip_subnets, - router_asn=self.inputs.router_asn, - rt_number=mx_rt, - router_external=True)) - assert self.public_vn_fixture.verify_on_setup() - self.logger.info('created public VN:%s' % fvn_name) - # end createPublicVN - - def create_floatingip_pool(self): - if (('MX_GW_TEST' in os.environ) and ( - os.environ.get('MX_GW_TEST') == '1')): - fip_pool_name = self.inputs.fip_pool_name - fvn_name = self.public_vn - fip_subnets = [self.inputs.fip_pool] - self.fip_fixture = self.useFixture( - FloatingIPFixture( - project_name=self.public_tenant, - inputs=self.inputs, - connections=self.connections, - pool_name=fip_pool_name, - vn_id=self.public_vn_fixture.vn_id, - option='neutron', - vn_name=fvn_name)) - assert self.fip_fixture.verify_on_setup() - self.logger.info('created FIP Pool:%s under Project:%s' % - (self.fip_fixture.pool_name, - self.project.project_name)) - # end createfloatingip - - def configure_control_nodes(self): - - # Configuring all control nodes here - if (('MX_GW_TEST' in os.environ) and ( - os.environ.get('MX_GW_TEST') == '1')): - router_name = self.inputs.ext_routers[0][0] - router_ip = self.inputs.ext_routers[0][1] - for entry in self.inputs.bgp_ips: - hostname = self.inputs.host_data[entry]['name'] - entry_control_ip = self.inputs.host_data[ - entry]['host_control_ip'] - cn_fixture1 = self.useFixture( - CNFixture( - connections=self.connections, - router_name=hostname, - router_ip=entry_control_ip, - router_type='contrail', - inputs=self.inputs)) - cn_fixturemx = self.useFixture( - CNFixture( - connections=self.connections, - router_name=router_name, - router_ip=router_ip, - router_type='mx', - inputs=self.inputs)) - sleep(10) - assert cn_fixturemx.verify_on_setup() - # TODO Configure MX. Doing Manually For Now - diff --git a/common/device_connection.py b/common/device_connection.py deleted file mode 100644 index 037d750c4..000000000 --- a/common/device_connection.py +++ /dev/null @@ -1,174 +0,0 @@ -import abc -import logging -from fabric.operations import get, put, run, local, sudo -from fabric.context_managers import settings, hide -from fabric.contrib.files import exists - -from jnpr.junos import Device -from jnpr.junos.utils.config import Config -from jnpr.junos.exception import LockError -from jnpr.junos.exception import * - -class AbstractConnection(object): - ''' Abstract connnection class for ssh/netconf etc - ''' - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def __init__(self, *args, **kwargs): - pass - - @abc.abstractmethod - def connect(self, *args, **kwargs): - pass - -# end AbstractConnection - - -class SSHConnection(AbstractConnection): - ''' - :param host : Mgmt IP of the host - :param username - :param password - ''' - def __init__(self, host, username='root', password='c0ntrail123', - logger=None, **kwargs): - self.host = host - self.username = username - self.password = password - self.handle = None - self.logger = kwargs.get('logger', logging.getLogger(__name__)) - - def connect(self): - '''Since its a ssh connection, fab will take care, no action needed - ''' - pass - - def disconnect(self): - '''Since its a ssh connection, fab will take care, no action needed - ''' - pass - - def run_cmd(self, cmds, as_sudo=False): - cmd_outputs = [] - for cmd in cmds : - with settings(host_string='%s@%s' % (self.username, self.host), - password=self.password): - if as_sudo: - output = sudo(cmd) - else: - output = run(cmd, shell=True) - self.logger.debug('Command :%s, Succeeded: %s' % ( - cmd, output.succeeded)) - self.logger.debug('Output: %s' % (output)) - cmd_outputs.append(output) - return cmd_outputs - - def exists(self, filepath): - with settings(host_string='%s@%s' % (self.username, self.host), - password=self.password): - return exists(filepath) - -# end SSHConnection - -class NetconfConnection(AbstractConnection): - ''' Netconf connection class - ''' - def __init__(self, host, username='root', password='c0ntrail123', - logger=None, **kwargs): - self.host = host - self.username = username - self.password = password - self.handle = None - self.logger = kwargs.get('logger', logging.getLogger(__name__)) - self.config_handle = None - - - def connect(self): - self.handle = Device(host=self.host, user=self.username, - password=self.password) - try: - self.handle.open(gather_facts=False) - self.config_handle = Config(self.handle) - except (ConnectAuthError,ConnectRefusedError, ConnectTimeoutError, - ConnectError) as e: - self.logger.exception(e) - return self.handle - # end connect - - def disconnect(self): - self.handle.close() - - def show_version(self): - return self.handle.show_version() - - def config(self, stmts=[], commit=True, ignore_errors=False): - for stmt in stmts: - try: - self.config_handle.load(stmt, format='set', merge=True) - except ConfigLoadError,e: - if ignore_errors: - self.logger.debug('Exception %s ignored' % (e)) - self.logger.exception(e) - else: - raise e - if commit: - try: - self.config_handle.commit() - except CommitError,e: - self.logger.exception(e) - return (False,e) - return (True, None) - - def restart(self, process_name): - #TODO Not sure of apis other than cli - self.handle.cli('restart %s' % (process_name)) - - def get_mac_address(self, interface): - # Use physical interface - interface = interface.split('.')[0] - xml_resp = self.handle.rpc.get_interface_information(interface_name=interface) - mac_address = xml_resp.findtext( - 'physical-interface/current-physical-address') - return mac_address.rstrip('\n').lstrip('\n') - # end get_mac_address - - def get_mac_in_arp_table(self, ip_address): - # From 'show arp' output, get the MAC address - # of a IP - xml_resp = self.handle.rpc.get_arp_table_information(no_resolve=True) - arp_entries = xml_resp.findall('arp-table-entry') - for arp_entry in arp_entries: - if arp_entry.find('ip-address').text.strip() == ip_address: - mac = arp_entry.find('mac-address').text.strip() - self.logger.debug('Found MAC %s for IP %s in arp table of ' - '%s' % (mac, ip_address, self.host)) - return mac - self.logger.warn('IP %s not found in arp table of %s' % ( - ip_address, self.host)) - return None - # end get_mac_in_arp_table - - -# end NetconfConnection - -class ConnectionFactory(object): - ''' Factory for Connection classes - ''' - __connection_classes = { - "juniper": NetconfConnection, - "openvswitch": SSHConnection, - } - - @staticmethod - def get_connection_obj(vendor, *args, **kwargs): - connection_class = ConnectionFactory.__connection_classes.get( - vendor.lower(), None) - - if connection_class: - return connection_class(*args, **kwargs) - raise NotImplementedError("The requested connection has not been implemented") - -if __name__ == "__main__": - nc = ConnectionFactory.get_connection_obj('juniper', - host='10.204.216.186', username = 'root', password='c0ntrail123') diff --git a/common/ecmp/ecmp_test_resource.py b/common/ecmp/ecmp_test_resource.py deleted file mode 100644 index 3052b1741..000000000 --- a/common/ecmp/ecmp_test_resource.py +++ /dev/null @@ -1,142 +0,0 @@ -import fixtures -import testtools -import os -from common.connections import ContrailConnections -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from vm_test import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from floating_ip import * -from testresources import OptimisingTestSuite, TestResource - - -class ECMPSolnSetup(): - - def get_random_fip(self, vn): - return vn.vn_subnets[0]['cidr'].replace(".0/24", ".100/24").split('/')[0] - #end get_random_fip - - def setup_common_objects(self): - - self.fip_pool_name = 'some-pool1' - self.my_fip_name = 'fip' - self.my_fip = '30.1.1.30' - self.dport1 = '9000' - self.dport2 = '9001' - self.dport3 = '9002' - self.udp_src = unicode(8000) - - self.fvn = self.useFixture( - VNFixture(project_name=self.inputs.project_name, - connections=self.connections, vn_name='fvn', inputs=self.inputs, subnets=['30.1.1.0/24'])) - self.vn1 = self.useFixture( - VNFixture(project_name=self.inputs.project_name, - connections=self.connections, vn_name='vn1', inputs=self.inputs, subnets=['10.1.1.0/29'])) - self.vn2 = self.useFixture( - VNFixture(project_name=self.inputs.project_name, - connections=self.connections, vn_name='vn2', inputs=self.inputs, subnets=['20.1.1.0/29'])) - self.vn3 = self.useFixture( - VNFixture(project_name=self.inputs.project_name, - connections=self.connections, vn_name='vn3', inputs=self.inputs, subnets=['40.1.1.0/29'])) - - self.vm1 = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=self.vn1.obj, flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name='vn1_vm1')) - self.vm2 = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=self.vn2.obj, flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name='vn2_vm1')) - self.vm3 = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=self.vn3.obj, flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name='vn3_vm1')) - self.fvn_vm1 = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=self.fvn.obj, flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name='fvn_vm1')) - self.fvn_vm2 = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=self.fvn.obj, flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name='fvn_vm2')) - self.fvn_vm3 = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=self.fvn.obj, flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name='fvn_vm3')) - - assert self.fvn.verify_on_setup() - assert self.vn1.verify_on_setup() - assert self.vn2.verify_on_setup() - assert self.vn3.verify_on_setup() - self.vm1.wait_till_vm_is_up() - self.vm2.wait_till_vm_is_up() - self.vm3.wait_till_vm_is_up() - self.fvn_vm1.wait_till_vm_is_up() - self.fvn_vm2.wait_till_vm_is_up() - self.fvn_vm3.wait_till_vm_is_up() - - all_vm_list = [self.vm1, self.vm2, self.vm3, self.fvn_vm1, self.fvn_vm2, self.fvn_vm3] - for vm in all_vm_list: - vm.install_pkg("Traffic") - sleep(5) - - self.vn1_fq_name = self.vn1.vn_fq_name - self.vn2_fq_name = self.vn2.vn_fq_name - self.vn3_fq_name = self.vn3.vn_fq_name - self.fvn_fq_name = self.fvn.vn_fq_name - - self.fvn_vrf_name = self.fvn.vrf_name - self.vn1_vrf_name = self.vn1.vrf_name - self.vn2_vrf_name = self.vn2.vrf_name - self.vn3_vrf_name = self.vn3.vrf_name - - self.fvn_id = self.fvn.vn_id - self.vm1_id = self.vm1.vm_id - self.vm2_id = self.vm2.vm_id - self.vm3_id = self.vm3.vm_id - - self.fvn_ri_name = self.fvn.ri_name - self.vn1_ri_name = self.vn1.ri_name - self.vn2_ri_name = self.vn2.ri_name - self.vn3_ri_name = self.vn3.ri_name - - self.vmi1_id = self.vm1.tap_intf[self.vn1_fq_name]['uuid'] - self.vmi2_id = self.vm2.tap_intf[self.vn2_fq_name]['uuid'] - self.vmi3_id = self.vm3.tap_intf[self.vn3_fq_name]['uuid'] - - self.fip_fixture = self.useFixture( - FloatingIPFixture( - project_name=self.inputs.project_name, inputs=self.inputs, - connections=self.connections, pool_name=self.fip_pool_name, vn_id=self.fvn_id)) - assert self.fip_fixture.verify_on_setup() - self.fvn_obj = self.vnc_lib.virtual_network_read(id=self.fvn_id) - self.fip_pool_obj = FloatingIpPool(self.fip_pool_name, self.fvn_obj) - self.fip_obj = FloatingIp( - self.my_fip_name, self.fip_pool_obj, self.my_fip, True) - - # Get the project_fixture - self.project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) - # Read the project obj and set to the floating ip object. - self.fip_obj.set_project(self.project_fixture.project_obj) - - self.vm1_intf = self.vnc_lib.virtual_machine_interface_read( - id=self.vmi1_id) - self.vm2_intf = self.vnc_lib.virtual_machine_interface_read( - id=self.vmi2_id) - self.vm3_intf = self.vnc_lib.virtual_machine_interface_read( - id=self.vmi3_id) - - self.fip_obj.add_virtual_machine_interface(self.vm1_intf) - self.fip_obj.add_virtual_machine_interface(self.vm2_intf) - self.fip_obj.add_virtual_machine_interface(self.vm3_intf) - - self.vnc_lib.floating_ip_create(self.fip_obj) - self.addCleanup(self.vnc_lib.floating_ip_delete, self.fip_obj.fq_name) - errmsg = "Ping to the shared Floating IP ip %s from left VM failed" % self.my_fip - assert self.fvn_vm1.ping_with_certainty( - self.my_fip), errmsg - - diff --git a/common/ecmp/ecmp_traffic.py b/common/ecmp/ecmp_traffic.py deleted file mode 100644 index 9ff1ed732..000000000 --- a/common/ecmp/ecmp_traffic.py +++ /dev/null @@ -1,248 +0,0 @@ -import sys -from time import sleep -from datetime import datetime -import os -import fixtures -import testtools -import unittest -import types -import time -sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) -from traffic.core.stream import Stream -from traffic.core.profile import create, ContinuousProfile, ContinuousSportRange -from traffic.core.helpers import Host -from traffic.core.helpers import Sender, Receiver -from tcutils.commands import ssh, execute_cmd, execute_cmd_out -from fabric.state import connections as fab_connections -from common.servicechain.config import ConfigSvcChain -from common.servicechain.verify import VerifySvcChain - - -class ECMPTraffic(ConfigSvcChain, VerifySvcChain): - - def verify_traffic_flow(self, src_vm, dst_vm_list, si_fix, src_vn, src_ip=None, dst_ip=None): - fab_connections.clear() - src_ip = src_vm.vm_ip - if dst_ip == None: - dst_ip = dst_vm_list[0].vm_ip - src_vm.install_pkg("Traffic") - for vm in dst_vm_list: - vm.install_pkg("Traffic") - sleep(5) - stream_list = self.setup_streams( - src_vm, dst_vm_list, src_ip=src_ip, dst_ip=dst_ip) - sender, receiver = self.start_traffic( - src_vm, dst_vm_list, stream_list, src_ip=src_ip, dst_ip=dst_ip) - self.verify_flow_thru_si(si_fix, src_vn) - self.verify_flow_records(src_vm, src_ip=src_ip, dst_ip=dst_ip) - self.stop_traffic(sender, receiver, dst_vm_list, stream_list) - - return True - - def setup_streams(self, src_vm, dst_vm_list, src_ip=None, dst_ip=None): - - src_ip = src_vm.vm_ip - if dst_ip == None: - dst_ip = dst_vm_list[0].vm_ip - - stream1 = Stream(protocol="ip", proto="udp", src=src_ip, - dst=dst_ip, sport=8000, dport=9000) - stream2 = Stream(protocol="ip", proto="udp", src=src_ip, - dst=dst_ip, sport=8000, dport=9001) - stream3 = Stream(protocol="ip", proto="udp", src=src_ip, - dst=dst_ip, sport=8000, dport=9002) - stream_list = [stream1, stream2, stream3] - - return stream_list - # end setup_streams - - def start_traffic(self, src_vm, dst_vm_list, stream_list, src_ip=None, dst_ip=None): - - self.logger.info("-" * 80) - self.logger.info('Starting Traffic from %s to %s' % - (src_ip, dst_ip)) - self.logger.info("-" * 80) - profile = {} - sender = {} - receiver = {} - tx_vm_node_ip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(src_vm.vm_obj)]['host_ip'] - tx_local_host = Host( - tx_vm_node_ip, - self.inputs.host_data[tx_vm_node_ip]['username'], - self.inputs.host_data[tx_vm_node_ip]['password']) - send_host = Host(src_vm.local_ip, src_vm.vm_username, - src_vm.vm_password) - rx_vm_node_ip = {} - rx_local_host = {} - recv_host = {} - - for dst_vm in dst_vm_list: - rx_vm_node_ip[dst_vm] = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(dst_vm.vm_obj)]['host_ip'] - rx_local_host[dst_vm] = Host( - rx_vm_node_ip[dst_vm], - self.inputs.host_data[rx_vm_node_ip[dst_vm]]['username'], - self.inputs.host_data[rx_vm_node_ip[dst_vm]]['password']) - recv_host[dst_vm] = Host(dst_vm.local_ip, dst_vm.vm_username, - dst_vm.vm_password) - count = 0 - for stream in stream_list: - profile[stream] = {} - sender[stream] = {} - receiver[stream] = {} - for dst_vm in dst_vm_list: - count = count + 1 - x = datetime.now().microsecond - send_filename = "sendudp_" + str(x) + "_" + "%s" % count - recv_filename = "recvudp_" + str(x) + "_" + "%s" % count - profile[stream][dst_vm] = ContinuousProfile( - stream=stream, listener=dst_vm.vm_ip, chksum=True) - sender[stream][dst_vm] = Sender( - send_filename, profile[stream][dst_vm], tx_local_host, send_host, self.inputs.logger) - receiver[stream][dst_vm] = Receiver( - recv_filename, profile[stream][dst_vm], rx_local_host[dst_vm], recv_host[dst_vm], self.inputs.logger) - receiver[stream][dst_vm].start() - sender[stream][dst_vm].start() - return sender, receiver - # end start_traffic - - def verify_flow_thru_si(self, si_fix, src_vn=None): - self.logger.info( - 'Will start a tcpdump on the left-interfaces of the Service Instances to find out which flow is entering which Service Instance') - flowcount = 0 - result = True - flow_pattern = {} - svms = self.get_svms_in_si(si_fix, self.inputs.project_name) - svms = sorted(set(svms)) - if None in svms: - svms.remove(None) - for svm in svms: - self.logger.info('SVM %s is in %s state' % (svm.name, svm.status)) - if svm.status == 'ACTIVE': - svm_name = svm.name - host = self.get_svm_compute(svm_name) - if src_vn is not None: - tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn) - else: - direction = 'left' - tapintf = self.get_bridge_svm_tapintf(svm_name, direction) - session = ssh( - host['host_ip'], host['username'], host['password']) - cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % ( - tapintf, tapintf) - execute_cmd(session, cmd, self.logger) - else: - self.logger.info('%s is not in ACTIVE state' % svm.name) - sleep(15) - - self.logger.info('***** Will check the result of tcpdump *****') - svms = self.get_svms_in_si(si_fix, self.inputs.project_name) - svms = sorted(set(svms)) - if None in svms: - svms.remove(None) - for svm in svms: - self.logger.info('SVM %s is in %s state' % (svm.name, svm.status)) - if svm.status == 'ACTIVE': - svm_name = svm.name - host = self.get_svm_compute(svm_name) - if src_vn is not None: - tapintf = self.get_svm_tapintf_of_vn(svm_name, src_vn) - else: - direction = 'left' - tapintf = self.get_bridge_svm_tapintf(svm_name, direction) - session = ssh( - host['host_ip'], host['username'], host['password']) - output_cmd = 'cat /tmp/%s_out.log' % tapintf - out, err = execute_cmd_out(session, output_cmd, self.logger) - if '9000' in out: - flowcount = flowcount + 1 - self.logger.info( - 'Flow with dport 9000 seen flowing inside %s' % svm_name) - flow_pattern['9000'] = svm_name - if '9001' in out: - flowcount = flowcount + 1 - self.logger.info( - 'Flow with dport 9001 seen flowing inside %s' % svm_name) - flow_pattern['9001'] = svm_name - if '9002' in out: - flowcount = flowcount + 1 - self.logger.info( - 'Flow with dport 9002 seen flowing inside %s' % svm_name) - flow_pattern['9002'] = svm_name - else: - self.logger.info('%s is not in ACTIVE state' % svm.name) - if flowcount > 0: - self.logger.info( - 'Flows are distributed across the Service Instances as :') - self.logger.info('%s' % flow_pattern) - else: - result = False - assert result, 'No Flow distribution seen' - # end verify_flow_thru_si - - def verify_flow_records(self, src_vm, src_ip=None, dst_ip=None): - - self.logger.info('Checking Flow records') - src_port = unicode(8000) - dpi1 = unicode(9000) - dpi2 = unicode(9001) - dpi3 = unicode(9002) - dpi_list = [dpi1, dpi2, dpi3] - vn_fq_name = src_vm.vn_fq_name - items_list = src_vm.tap_intf[vn_fq_name].items() - for items, values in items_list: - if items == 'flow_key_idx': - nh_id = values - self.logger.debug('Flow Index of the src_vm is %s' % nh_id) - inspect_h = self.agent_inspect[src_vm.vm_node_ip] - flow_rec = inspect_h.get_vna_fetchflowrecord( - nh=nh_id, sip=src_ip, dip=dst_ip, sport=src_port, dport=dpi1, protocol='17') - - flow_result = True - if flow_rec is None: - flow_result = False - else: - self.logger.info('Flow between %s and %s seen' % (dst_ip, src_ip)) - assert flow_result, 'Flow between %s and %s not seen' % ( - dst_ip, src_ip) - - return True - # end verify_flow_records - - def stop_traffic(self, sender, receiver, dst_vm_list, stream_list): - - self.logger.info('Stopping Traffic now') - - for stream in stream_list: - for dst_vm in dst_vm_list: - sender[stream][dst_vm].stop() - receiver[stream][dst_vm].stop() - time.sleep(5) - stream_sent_count = {} - stream_recv_count = {} - result = True - for stream in stream_list: - stream_sent_count[stream] = 0 - stream_recv_count[stream] = 0 - for dst_vm in dst_vm_list: - if sender[stream][dst_vm].sent == None: - sender[stream][dst_vm].sent = 0 - if receiver[stream][dst_vm].recv == None: - receiver[stream][dst_vm].recv = 0 - stream_sent_count[stream] = stream_sent_count[ - stream] + sender[stream][dst_vm].sent - stream_recv_count[stream] = stream_recv_count[ - stream] + receiver[stream][dst_vm].recv - pkt_diff = (stream_sent_count[stream] - stream_recv_count[stream]) - if pkt_diff < 0: - self.logger.debug('Some problem with Scapy. Please check') - elif pkt_diff in range(0, 6): - self.logger.info( - '%s packets sent and %s packets received in Stream%s. No Packet Loss seen.' % - (stream_sent_count[stream], stream_recv_count[stream], stream_list.index(stream))) - else: - self.logger.error('%s packets sent and %s packets received in Stream%s. Packet Loss.' % ( - stream_sent_count[stream], stream_recv_count[stream], stream_list.index(stream))) - return True - # end stop_traffic diff --git a/common/ecmp/ecmp_verify.py b/common/ecmp/ecmp_verify.py deleted file mode 100644 index f243532e4..000000000 --- a/common/ecmp/ecmp_verify.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import fixtures -import testtools -import unittest -import time -from common.connections import ContrailConnections -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from floating_ip import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from vm_test import * -from tcutils.wrappers import preposttest_wrapper - - -class ECMPVerify(): - - def get_rt_info_tap_intf_list(self, src_vn, src_vm, dst_vm, svm_ids): - shared_ip= self.find_rt_in_ctrl_node(src_vn, src_vm, dst_vm, svm_ids) - self.find_rt_in_agent(src_vn, src_vm, dst_vm) - return self.get_tap_intf_list(src_vn, src_vm, dst_vm, shared_ip) - #end get_rt_info_tap_intf_list - - def find_rt_in_ctrl_node(self, src_vn, src_vm, dst_vm, svm_ids): - right_ip = {} - left_ip = {} - count= 0 - self.logger.info('***Get the Route Entry in the control node***') - active_controller = None - inspect_h1 = self.agent_inspect[src_vm.vm_node_ip] - agent_xmpp_status = inspect_h1.get_vna_xmpp_connection_status() - for entry in agent_xmpp_status: - if entry['cfg_controller'] == 'Yes': - count += 1 - active_controller = entry['controller_ip'] - new_controller = self.inputs.host_data[ - active_controller]['host_ip'] - self.logger.info('Active control node is %s' % new_controller) - assert count > 0, 'Some Problem with the setup. Pls chk XMPP connection' - svm_route_entry = {} - for svm_id in svm_ids: - svc_obj = self.nova_h.get_vm_by_id(svm_id) - left_ip[svm_id] = svc_obj.addresses[self.si_fixtures[0] - .left_vn_name.split(':')[2]][0]['addr'] - right_ip[svm_id] = svc_obj.addresses[self.si_fixtures[0] - .right_vn_name.split(':')[2]][0]['addr'] - self.logger.info('%s has %s as left_ip and %s as right_ip' % - (svc_obj.name, left_ip[svm_id], right_ip[svm_id])) - shared_ip= left_ip[svm_id] - dst_vm_ip = self.cn_inspect[new_controller].get_cn_route_table_entry( - ri_name=src_vn.ri_name, prefix=dst_vm.vm_ip + '/32') - result = True - if dst_vm_ip: - self.logger.info( - 'Route to %s found in the Active Control-Node %s' % - (dst_vm.vm_ip, new_controller)) - else: - result = False - assert result, 'Route to %s not found in the Active Control-Node %s' %(dst_vm.vm_ip, new_controller) - - return shared_ip - #end find_rt_in_ctrl_node - - def find_rt_in_agent(self, src_vn, src_vm, dst_vm): - self.logger.info('***Get the Route Entry in the agent***') - vn_vrf_id= self.get_vrf_id(src_vn, src_vm) - inspect_h1 = self.agent_inspect[src_vm.vm_node_ip] - paths = inspect_h1.get_vna_active_route( - vrf_id=vn_vrf_id, ip=dst_vm.vm_ip, prefix='32')['path_list'] - self.logger.info('There are %s nexthops to %s on Agent %s' % - (len(paths), dst_vm.vm_ip, src_vm.vm_node_ip)) - next_hops = paths[0]['nh'] - if not paths: - result = False - assert result, 'Route to %s not found in the Agent %s' %(dst_vm.vm_ip, src_vm.vm_node_ip) - return True - #end find_rt_in_agent - - def get_tap_intf_list(self, src_vn, src_vm, dst_vm, shared_ip): - self.logger.info('***Get the Tap Interface List***') - vn_vrf_id= self.get_vrf_id(src_vn, src_vm) - inspect_h1 = self.agent_inspect[src_vm.vm_node_ip] - paths = inspect_h1.get_vna_active_route( - vrf_id=vn_vrf_id, ip=shared_ip, prefix='32')['path_list'] - next_hops = paths[0]['nh'] - (domain, project, vn) = src_vn.vn_fq_name.split(':') - tap_intf_list= [] - if 'mc_list' in next_hops: - self.logger.info('Composite Next Hops seen') - inspect_h1 = self.agent_inspect[src_vm.vm_node_ip] - vn_vrf_id= self.get_vrf_id(src_vn, src_vm) - multi_next_hops = inspect_h1.get_vna_active_route( - vrf_id=vn_vrf_id, ip=shared_ip, prefix='32')['path_list'][0]['nh']['mc_list'] - - for nh in multi_next_hops: - if nh['type'] == 'Tunnel': - destn_agent = nh['dip'] - new_destn_agent = self.inputs.host_data[ - destn_agent]['host_ip'] - inspect_hh = self.agent_inspect[new_destn_agent] - vn_vrf_id= self.get_vrf_id(src_vn, src_vm, new_destn_agent) - next_hops_in_tnl = inspect_hh.get_vna_active_route( - vrf_id=vn_vrf_id, ip=shared_ip, prefix='32')['path_list'][0]['nh']['mc_list'] - for next_hop in next_hops_in_tnl: - if next_hop['type'] == 'Interface': - tap_intf_from_tnl = next_hop['itf'] - agent_tap_intf_tuple= (new_destn_agent, tap_intf_from_tnl) - tap_intf_list.append(agent_tap_intf_tuple) - elif nh['type'] == 'Interface': - tap_intf = nh['itf'] - agent_tap_intf_tuple= (src_vm.vm_node_ip, tap_intf) - tap_intf_list.append(agent_tap_intf_tuple) - else: - self.logger.debug('No mc_list seen') - if 'unnel' in next_hops['type']: - destn_agent = next_hops['dip'] - new_destn_agent = self.inputs.host_data[ - destn_agent]['host_ip'] - inspect_hh = self.agent_inspect[new_destn_agent] - vn_vrf_id= self.get_vrf_id(src_vn, src_vm, new_destn_agent) - next_hops_in_tnl = inspect_hh.get_vna_active_route( - vrf_id=vn_vrf_id, ip=shared_ip, prefix='32')['path_list'][0]['nh'] - if 'mc_list' in next_hops_in_tnl: - next_hops_in_tnl= next_hops_in_tnl['mc_list'] - for next_hop in next_hops_in_tnl: - if next_hop['type'] == 'Interface': - tap_intf_from_tnl = next_hop['itf'] - agent_tap_intf_tuple= (new_destn_agent, tap_intf_from_tnl) - tap_intf_list.append(agent_tap_intf_tuple) - elif 'face' in next_hops_in_tnl['type']: - tap_intf_from_tnl = next_hops_in_tnl['itf'] - agent_tap_intf_tuple= (new_destn_agent, tap_intf_from_tnl) - tap_intf_list.append(agent_tap_intf_tuple) - elif 'face' in next_hops['type']: - tap_intf = next_hops['itf'] - agent_tap_intf_tuple= (src_vm.vm_node_ip, tap_intf) - tap_intf_list.append(agent_tap_intf_tuple) - self.logger.info( - 'The Tap interface list :%s' % - tap_intf_list) - return tap_intf_list - # end get_tap_intf_list - - def get_vrf_id(self, src_vn, src_vm, destn_agent= None): - if destn_agent is None: - destn_agent= src_vm.vm_node_ip - destn_agent= self.inputs.host_data[destn_agent]['host_ip'] - (domain, project, vn) = src_vn.vn_fq_name.split(':') - inspect_h1 = self.agent_inspect[destn_agent] - agent_vrf_objs = inspect_h1.get_vna_vrf_objs(domain, project, vn) - agent_vrf_obj = src_vm.get_matching_vrf( - agent_vrf_objs['vrf_list'], src_vn.vrf_name) - vn_vrf_id = agent_vrf_obj['ucindex'] - return vn_vrf_id - #end get_vrf_id - - def get_svms_in_si(self, si, proj_name): - svm_ids= si.svm_ids - svm_list= [] - for svm_id in svm_ids: - svm_list.append(self.nova_h.get_vm_by_id(svm_id)) - return svm_list - #end get_svms_in_si diff --git a/common/floatingip/__init__.py b/common/floatingip/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/floatingip/config.py b/common/floatingip/config.py deleted file mode 100644 index e0eab7f0d..000000000 --- a/common/floatingip/config.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Floating IP config utilities.""" - -import fixtures - - -class CreateAssociateFip(fixtures.Fixture): - - """Create and associate a floating IP to the Virtual Machine.""" - - def __init__(self, inputs, fip_fixture, vn_id, vm_id): - self.inputs = inputs - self.logger = self.inputs.logger - self.fip_fixture = fip_fixture - self.vn_id = vn_id - self.vm_id = vm_id - - def setUp(self): - self.logger.info("Create associate FIP") - super(CreateAssociateFip, self).setUp() - self.fip_id = self.fip_fixture.create_and_assoc_fip( - self.vn_id, self.vm_id) - - def cleanUp(self): - self.logger.info("Disassociationg FIP") - super(CreateAssociateFip, self).cleanUp() - self.fip_fixture.disassoc_and_delete_fip(self.fip_id) diff --git a/common/flow_tests/__init__.py b/common/flow_tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/gettextutils.py b/common/gettextutils.py deleted file mode 100644 index 5f4c138ac..000000000 --- a/common/gettextutils.py +++ /dev/null @@ -1,371 +0,0 @@ -# Copyright 2012 Red Hat, Inc. -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -gettext for openstack-common modules. - -Usual usage in an openstack.common module: - - from tempest.openstack.common.gettextutils import _ -""" - -import copy -import gettext -import logging -import os -import re -try: - import UserString as _userString -except ImportError: - import collections as _userString - -from babel import localedata -import six - -_localedir = os.environ.get('contrailtest'.upper() + '_LOCALEDIR') -_t = gettext.translation('contrailtest', localedir=_localedir, fallback=True) - -_AVAILABLE_LANGUAGES = {} -USE_LAZY = False - - -def enable_lazy(): - """Convenience function for configuring _() to use lazy gettext - - Call this at the start of execution to enable the gettextutils._ - function to use lazy gettext functionality. This is useful if - your project is importing _ directly instead of using the - gettextutils.install() way of importing the _ function. - """ - global USE_LAZY - USE_LAZY = True - - -def _(msg): - if USE_LAZY: - return Message(msg, 'contrailtest') - else: - if six.PY3: - return _t.gettext(msg) - return _t.ugettext(msg) - - -def install(domain, lazy=False): - """Install a _() function using the given translation domain. - - Given a translation domain, install a _() function using gettext's - install() function. - - The main difference from gettext.install() is that we allow - overriding the default localedir (e.g. /usr/share/locale) using - a translation-domain-specific environment variable (e.g. - NOVA_LOCALEDIR). - - :param domain: the translation domain - :param lazy: indicates whether or not to install the lazy _() function. - The lazy _() introduces a way to do deferred translation - of messages by installing a _ that builds Message objects, - instead of strings, which can then be lazily translated into - any available locale. - """ - if lazy: - # NOTE(mrodden): Lazy gettext functionality. - # - # The following introduces a deferred way to do translations on - # messages in OpenStack. We override the standard _() function - # and % (format string) operation to build Message objects that can - # later be translated when we have more information. - # - # Also included below is an example LocaleHandler that translates - # Messages to an associated locale, effectively allowing many logs, - # each with their own locale. - - def _lazy_gettext(msg): - """Create and return a Message object. - - Lazy gettext function for a given domain, it is a factory method - for a project/module to get a lazy gettext function for its own - translation domain (i.e. nova, glance, cinder, etc.) - - Message encapsulates a string so that we can translate - it later when needed. - """ - return Message(msg, domain) - - from six import moves - moves.builtins.__dict__['_'] = _lazy_gettext - else: - localedir = '%s_LOCALEDIR' % domain.upper() - if six.PY3: - gettext.install(domain, - localedir=os.environ.get(localedir)) - else: - gettext.install(domain, - localedir=os.environ.get(localedir), - unicode=True) - - -class Message(_userString.UserString, object): - """Class used to encapsulate translatable messages.""" - def __init__(self, msg, domain): - # _msg is the gettext msgid and should never change - self._msg = msg - self._left_extra_msg = '' - self._right_extra_msg = '' - self._locale = None - self.params = None - self.domain = domain - - @property - def data(self): - # NOTE(mrodden): this should always resolve to a unicode string - # that best represents the state of the message currently - - localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR') - if self.locale: - lang = gettext.translation(self.domain, - localedir=localedir, - languages=[self.locale], - fallback=True) - else: - # use system locale for translations - lang = gettext.translation(self.domain, - localedir=localedir, - fallback=True) - - if six.PY3: - ugettext = lang.gettext - else: - ugettext = lang.ugettext - - full_msg = (self._left_extra_msg + - ugettext(self._msg) + - self._right_extra_msg) - - if self.params is not None: - full_msg = full_msg % self.params - - return six.text_type(full_msg) - - @property - def locale(self): - return self._locale - - @locale.setter - def locale(self, value): - self._locale = value - if not self.params: - return - - # This Message object may have been constructed with one or more - # Message objects as substitution parameters, given as a single - # Message, or a tuple or Map containing some, so when setting the - # locale for this Message we need to set it for those Messages too. - if isinstance(self.params, Message): - self.params.locale = value - return - if isinstance(self.params, tuple): - for param in self.params: - if isinstance(param, Message): - param.locale = value - return - if isinstance(self.params, dict): - for param in self.params.values(): - if isinstance(param, Message): - param.locale = value - - def _save_dictionary_parameter(self, dict_param): - full_msg = self.data - # look for %(blah) fields in string; - # ignore %% and deal with the - # case where % is first character on the line - keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg) - - # if we don't find any %(blah) blocks but have a %s - if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg): - # apparently the full dictionary is the parameter - params = copy.deepcopy(dict_param) - else: - params = {} - for key in keys: - try: - params[key] = copy.deepcopy(dict_param[key]) - except TypeError: - # cast uncopyable thing to unicode string - params[key] = six.text_type(dict_param[key]) - - return params - - def _save_parameters(self, other): - # we check for None later to see if - # we actually have parameters to inject, - # so encapsulate if our parameter is actually None - if other is None: - self.params = (other, ) - elif isinstance(other, dict): - self.params = self._save_dictionary_parameter(other) - else: - # fallback to casting to unicode, - # this will handle the problematic python code-like - # objects that cannot be deep-copied - try: - self.params = copy.deepcopy(other) - except TypeError: - self.params = six.text_type(other) - - return self - - # overrides to be more string-like - def __unicode__(self): - return self.data - - def __str__(self): - if six.PY3: - return self.__unicode__() - return self.data.encode('utf-8') - - def __getstate__(self): - to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg', - 'domain', 'params', '_locale'] - new_dict = self.__dict__.fromkeys(to_copy) - for attr in to_copy: - new_dict[attr] = copy.deepcopy(self.__dict__[attr]) - - return new_dict - - def __setstate__(self, state): - for (k, v) in state.items(): - setattr(self, k, v) - - # operator overloads - def __add__(self, other): - copied = copy.deepcopy(self) - copied._right_extra_msg += other.__str__() - return copied - - def __radd__(self, other): - copied = copy.deepcopy(self) - copied._left_extra_msg += other.__str__() - return copied - - def __mod__(self, other): - # do a format string to catch and raise - # any possible KeyErrors from missing parameters - self.data % other - copied = copy.deepcopy(self) - return copied._save_parameters(other) - - def __mul__(self, other): - return self.data * other - - def __rmul__(self, other): - return other * self.data - - def __getitem__(self, key): - return self.data[key] - - def __getslice__(self, start, end): - return self.data.__getslice__(start, end) - - def __getattribute__(self, name): - # NOTE(mrodden): handle lossy operations that we can't deal with yet - # These override the UserString implementation, since UserString - # uses our __class__ attribute to try and build a new message - # after running the inner data string through the operation. - # At that point, we have lost the gettext message id and can just - # safely resolve to a string instead. - ops = ['capitalize', 'center', 'decode', 'encode', - 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip', - 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill'] - if name in ops: - return getattr(self.data, name) - else: - return _userString.UserString.__getattribute__(self, name) - - -def get_available_languages(domain): - """Lists the available languages for the given translation domain. - - :param domain: the domain to get languages for - """ - if domain in _AVAILABLE_LANGUAGES: - return copy.copy(_AVAILABLE_LANGUAGES[domain]) - - localedir = '%s_LOCALEDIR' % domain.upper() - find = lambda x: gettext.find(domain, - localedir=os.environ.get(localedir), - languages=[x]) - - # NOTE(mrodden): en_US should always be available (and first in case - # order matters) since our in-line message strings are en_US - language_list = ['en_US'] - # NOTE(luisg): Babel <1.0 used a function called list(), which was - # renamed to locale_identifiers() in >=1.0, the requirements master list - # requires >=0.9.6, uncapped, so defensively work with both. We can remove - # this check when the master list updates to >=1.0, and update all projects - list_identifiers = (getattr(localedata, 'list', None) or - getattr(localedata, 'locale_identifiers')) - locale_identifiers = list_identifiers() - for i in locale_identifiers: - if find(i) is not None: - language_list.append(i) - _AVAILABLE_LANGUAGES[domain] = language_list - return copy.copy(language_list) - - -def get_localized_message(message, user_locale): - """Gets a localized version of the given message in the given locale. - - If the message is not a Message object the message is returned as-is. - If the locale is None the message is translated to the default locale. - - :returns: the translated message in unicode, or the original message if - it could not be translated - """ - translated = message - if isinstance(message, Message): - original_locale = message.locale - message.locale = user_locale - translated = six.text_type(message) - message.locale = original_locale - return translated - - -class LocaleHandler(logging.Handler): - """Handler that can have a locale associated to translate Messages. - - A quick example of how to utilize the Message class above. - LocaleHandler takes a locale and a target logging.Handler object - to forward LogRecord objects to after translating the internal Message. - """ - - def __init__(self, locale, target): - """Initialize a LocaleHandler - - :param locale: locale to use for translating messages - :param target: logging.Handler object to forward - LogRecord objects to after translation - """ - logging.Handler.__init__(self) - self.locale = locale - self.target = target - - def emit(self, record): - if isinstance(record.msg, Message): - # set the locale and resolve to a string - record.msg.locale = self.locale - - self.target.emit(record) diff --git a/common/importutils.py b/common/importutils.py deleted file mode 100644 index 4fd9ae2bc..000000000 --- a/common/importutils.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Import related utilities and helper functions. -""" - -import sys -import traceback - - -def import_class(import_str): - """Returns a class from a string including module and class.""" - mod_str, _sep, class_str = import_str.rpartition('.') - try: - __import__(mod_str) - return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): - raise ImportError('Class %s cannot be found (%s)' % - (class_str, - traceback.format_exception(*sys.exc_info()))) - - -def import_object(import_str, *args, **kwargs): - """Import a class and return an instance of it.""" - return import_class(import_str)(*args, **kwargs) - - -def import_object_ns(name_space, import_str, *args, **kwargs): - """Tries to import object from default namespace. - - Imports a class and return an instance of it, first by trying - to find the class in a default namespace, then failing back to - a full path if not found in the default namespace. - """ - import_value = "%s.%s" % (name_space, import_str) - try: - return import_class(import_value)(*args, **kwargs) - except ImportError: - return import_class(import_str)(*args, **kwargs) - - -def import_module(import_str): - """Import a module.""" - __import__(import_str) - return sys.modules[import_str] - - -def try_import(import_str, default=None): - """Try to import a module and if it fails return default.""" - try: - return import_module(import_str) - except ImportError: - return default diff --git a/common/isolated_creds.py b/common/isolated_creds.py deleted file mode 100644 index 79e85513e..000000000 --- a/common/isolated_creds.py +++ /dev/null @@ -1,134 +0,0 @@ -import project_test -from common.contrail_test_init import ContrailTestInit -from common.connections import ContrailConnections -import os -import fixtures -from test import BaseTestCase -import time -from tcutils.util import get_random_name - -ADMIN_TENANT = 'admin' - -class IsolatedCreds(fixtures.Fixture): - - def __init__(self,project_name, inputs, ini_file=None, logger=None, - username=None, password=None): - - self.inputs = inputs - self.admin_tenant = self.inputs.stack_tenant - if (self.inputs.public_tenant == project_name): - self.project_name = project_name - else: - self.project_name = get_random_name(project_name) - if username: - self.user = username - else: - self.user = project_name - if password: - self.password = password - else: - self.password = project_name - self.ini_file = ini_file - self.logger = logger - if self.inputs.orchestrator == 'vcenter': - self.project_name = self.inputs.stack_tenant - self.user = self.inputs.stack_user - self.password = self.inputs.stack_password - - def setUp(self): - super(IsolatedCreds, self).setUp() - self.connections= ContrailConnections(self.inputs, self.logger) - self.vnc_lib= self.connections.vnc_lib - self.auth = self.connections.auth - - def create_tenant(self): - - self.project = None - time.sleep(4) - try: - self.project = project_test.ProjectFixture(project_name = self.project_name, auth=self.auth, - vnc_lib_h= self.vnc_lib,username= self.user,password= self.password, - connections= self.connections) - self.project.setUp() - except Exception as e: - self.logger.warn("got exception as %s"%(e)) - finally: - return self.project - - def delete_tenant(self): - - self.project.cleanUp() - - def delete_user(self,user=None): - if self.inputs.orchestrator == 'vcenter': - return - if user: - user = user - else: - user = self.user - self.auth.delete_user(user) - - def create_and_attach_user_to_tenant(self,user = None , password=None): - if self.inputs.orchestrator == 'vcenter': - return - user = user if user else self.user - password = password if password else self.password - self.auth.create_user(user,password) - self.auth.add_user_to_project(user, self.project_name) - self.auth.add_user_to_project('admin', self.project_name) - time.sleep(4) - - def get_inputs(self): - - self.project_inputs= ContrailTestInit(self.ini_file, - stack_user=self.project.username, - stack_password=self.project.password, - project_fq_name=['default-domain',self.project_name],logger = self.logger) - return self.project_inputs - - def get_conections(self): - self.project_connections= ContrailConnections(self.project_inputs, - project_name= self.project_name, - username=self.project.username, - password= self.project.password, - logger = self.logger) - return self.project_connections - - def get_admin_inputs(self): - - admin = AdminCreds(self.admin_tenant , self.inputs , self.ini_file , self.logger) - return admin.get_inputs() - - def get_admin_connections(self): - - admin = AdminCreds(self.admin_tenant , self.inputs , self.ini_file , self.logger) - return admin.get_conections() - - def cleanUp(self): - super(IsolatedCreds, self).cleanUp() - -class AdminCreds(fixtures.Fixture): - - def __init__(self,project_name,inputs,ini_file = None ,logger = None): - - self.project_name = project_name - self.user = project_name - self.password = project_name - self.inputs = inputs - self.ini_file = ini_file - self.logger = logger - - def get_inputs(self): - - return self.inputs - - def get_conections(self): - - connections= ContrailConnections(self.inputs,project_name= self.project_name, - username=self.inputs.stack_user - ,password= self.inputs.stack_password, - logger = self.logger) - return connections - - def cleanUp(self): - super(AdminCreds, self).cleanUp() diff --git a/common/jsonutils.py b/common/jsonutils.py deleted file mode 100644 index 3f44ca08d..000000000 --- a/common/jsonutils.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -''' -JSON related utilities. - -This module provides a few things: - - 1) A handy function for getting an object down to something that can be - JSON serialized. See to_primitive(). - - 2) Wrappers around loads() and dumps(). The dumps() wrapper will - automatically use to_primitive() for you if needed. - - 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson - is available. -''' - - -import datetime -import functools -import inspect -import itertools -import json -try: - import xmlrpclib -except ImportError: - # NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3 - # however the function and object call signatures - # remained the same. This whole try/except block should - # be removed and replaced with a call to six.moves once - # six 1.4.2 is released. See http://bit.ly/1bqrVzu - import xmlrpc.client as xmlrpclib - -import six - -from common import gettextutils -from common import importutils -from common import timeutils - -netaddr = importutils.try_import("netaddr") - -_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, - inspect.isfunction, inspect.isgeneratorfunction, - inspect.isgenerator, inspect.istraceback, inspect.isframe, - inspect.iscode, inspect.isbuiltin, inspect.isroutine, - inspect.isabstract] - -_simple_types = (six.string_types + six.integer_types - + (type(None), bool, float)) - - -def to_primitive(value, convert_instances=False, convert_datetime=True, - level=0, max_depth=3): - """Convert a complex object into primitives. - - Handy for JSON serialization. We can optionally handle instances, - but since this is a recursive function, we could have cyclical - data structures. - - To handle cyclical data structures we could track the actual objects - visited in a set, but not all objects are hashable. Instead we just - track the depth of the object inspections and don't go too deep. - - Therefore, convert_instances=True is lossy ... be aware. - - """ - # handle obvious types first - order of basic types determined by running - # full tests on nova project, resulting in the following counts: - # 572754 - # 460353 - # 379632 - # 274610 - # 199918 - # 114200 - # 51817 - # 26164 - # 6491 - # 283 - # 19 - if isinstance(value, _simple_types): - return value - - if isinstance(value, datetime.datetime): - if convert_datetime: - return timeutils.strtime(value) - else: - return value - - # value of itertools.count doesn't get caught by nasty_type_tests - # and results in infinite loop when list(value) is called. - if type(value) == itertools.count: - return six.text_type(value) - - # FIXME(vish): Workaround for LP bug 852095. Without this workaround, - # tests that raise an exception in a mocked method that - # has a @wrap_exception with a notifier will fail. If - # we up the dependency to 0.5.4 (when it is released) we - # can remove this workaround. - if getattr(value, '__module__', None) == 'mox': - return 'mock' - - if level > max_depth: - return '?' - - # The try block may not be necessary after the class check above, - # but just in case ... - try: - recursive = functools.partial(to_primitive, - convert_instances=convert_instances, - convert_datetime=convert_datetime, - level=level, - max_depth=max_depth) - if isinstance(value, dict): - return dict((k, recursive(v)) for k, v in six.iteritems(value)) - elif isinstance(value, (list, tuple)): - return [recursive(lv) for lv in value] - - # It's not clear why xmlrpclib created their own DateTime type, but - # for our purposes, make it a datetime type which is explicitly - # handled - if isinstance(value, xmlrpclib.DateTime): - value = datetime.datetime(*tuple(value.timetuple())[:6]) - - if convert_datetime and isinstance(value, datetime.datetime): - return timeutils.strtime(value) - elif isinstance(value, gettextutils.Message): - return value.data - elif hasattr(value, 'iteritems'): - return recursive(dict(value.iteritems()), level=level + 1) - elif hasattr(value, '__iter__'): - return recursive(list(value)) - elif convert_instances and hasattr(value, '__dict__'): - # Likely an instance of something. Watch for cycles. - # Ignore class member vars. - return recursive(value.__dict__, level=level + 1) - elif netaddr and isinstance(value, netaddr.IPAddress): - return six.text_type(value) - else: - if any(test(value) for test in _nasty_type_tests): - return six.text_type(value) - return value - except TypeError: - # Class objects are tricky since they may define something like - # __iter__ defined but it isn't callable as list(). - return six.text_type(value) - - -def dumps(value, default=to_primitive, **kwargs): - return json.dumps(value, default=default, **kwargs) - - -def loads(s): - return json.loads(s) - - -def load(s): - return json.load(s) - - -try: - import anyjson -except ImportError: - pass -else: - anyjson._modules.append((__name__, 'dumps', TypeError, - 'loads', ValueError, 'load')) - anyjson.force_implementation(__name__) diff --git a/common/local.py b/common/local.py deleted file mode 100644 index 0819d5b97..000000000 --- a/common/local.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Local storage of variables using weak references""" - -import threading -import weakref - - -class WeakLocal(threading.local): - def __getattribute__(self, attr): - rval = super(WeakLocal, self).__getattribute__(attr) - if rval: - # NOTE(mikal): this bit is confusing. What is stored is a weak - # reference, not the value itself. We therefore need to lookup - # the weak reference and return the inner value here. - rval = rval() - return rval - - def __setattr__(self, attr, value): - value = weakref.ref(value) - return super(WeakLocal, self).__setattr__(attr, value) - - -# NOTE(mikal): the name "store" should be deprecated in the future -store = WeakLocal() - -# A "weak" store uses weak references and allows an object to fall out of scope -# when it falls out of scope in the code that uses the thread local storage. A -# "strong" store will hold a reference to the object so that it never falls out -# of scope. -weak_store = WeakLocal() -strong_store = threading.local() diff --git a/common/log.py b/common/log.py deleted file mode 100644 index 1dd08d15f..000000000 --- a/common/log.py +++ /dev/null @@ -1,628 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Openstack logging handler. - -This module adds to logging functionality by adding the option to specify -a context object when calling the various log methods. If the context object -is not specified, default formatting is used. Additionally, an instance uuid -may be passed as part of the log message, which is intended to make it easier -for admins to find messages related to a specific instance. - -It also allows setting of formatting information through conf. - -""" - -import inspect -import itertools -import logging -import logging.config -import logging.handlers -import os -import re -import sys -import traceback - -from oslo.config import cfg -import six -from six import moves - -from common.gettextutils import _ -from common import importutils -from common import jsonutils -from common import local - - -_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - -_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] - -# NOTE(ldbragst): Let's build a list of regex objects using the list of -# _SANITIZE_KEYS we already have. This way, we only have to add the new key -# to the list of _SANITIZE_KEYS and we can generate regular expressions -# for XML and JSON automatically. -_SANITIZE_PATTERNS = [] -_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', - r'(<%(key)s>).*?()', - r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', - r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] - -for key in _SANITIZE_KEYS: - for pattern in _FORMAT_PATTERNS: - reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) - _SANITIZE_PATTERNS.append(reg_ex) - - -common_cli_opts = [ - cfg.BoolOpt('debug', - short='d', - default=False, - help='Print debugging output (set logging level to ' - 'DEBUG instead of default WARNING level).'), - cfg.BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output (set logging level to ' - 'INFO instead of default WARNING level).'), -] - -logging_cli_opts = [ - cfg.StrOpt('log-config-append', - metavar='PATH', - deprecated_name='log-config', - help='The name of logging configuration file. It does not ' - 'disable existing loggers, but just appends specified ' - 'logging configuration to any other existing logging ' - 'options. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), - cfg.StrOpt('log-format', - default=None, - metavar='FORMAT', - help='DEPRECATED. ' - 'A logging.Formatter log message format string which may ' - 'use any of the available logging.LogRecord attributes. ' - 'This option is deprecated. Please use ' - 'logging_context_format_string and ' - 'logging_default_format_string instead.'), - cfg.StrOpt('log-date-format', - default=_DEFAULT_LOG_DATE_FORMAT, - metavar='DATE_FORMAT', - help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s'), - cfg.StrOpt('log-file', - metavar='PATH', - deprecated_name='logfile', - help='(Optional) Name of log file to output to. ' - 'If no default is set, logging will go to stdout.'), - cfg.StrOpt('log-dir', - deprecated_name='logdir', - help='(Optional) The base directory used for relative ' - '--log-file paths'), - cfg.BoolOpt('use-syslog', - default=False, - help='Use syslog for logging.'), - cfg.StrOpt('syslog-log-facility', - default='LOG_USER', - help='syslog facility to receive log lines') -] - -generic_log_opts = [ - cfg.BoolOpt('use_stderr', - default=True, - help='Log output to standard error') -] - -DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', - 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', - 'oslo.messaging=INFO', 'iso8601=WARN', - 'requests.packages.urllib3.connectionpool=WARN', - 'urllib3.connectionpool=WARN'] - -log_opts = [ - cfg.StrOpt('logging_context_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [%(request_id)s %(user_identity)s] ' - '%(instance)s%(message)s', - help='Format string to use for log messages with context.'), - cfg.StrOpt('logging_default_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [-] %(instance)s%(message)s', - help='Format string to use for log messages without context.'), - cfg.StrOpt('logging_debug_format_suffix', - default='%(funcName)s %(pathname)s:%(lineno)d', - help='Data to append to log format when level is DEBUG.'), - cfg.StrOpt('logging_exception_prefix', - default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' - '%(instance)s', - help='Prefix each line of exception output with this format.'), - cfg.ListOpt('default_log_levels', - default=DEFAULT_LOG_LEVELS, - help='List of logger=LEVEL pairs.'), - cfg.BoolOpt('publish_errors', - default=False, - help='Enables or disables publication of error events.'), - cfg.BoolOpt('fatal_deprecations', - default=False, - help='Enables or disables fatal status of deprecations.'), - - # NOTE(mikal): there are two options here because sometimes we are handed - # a full instance (and could include more information), and other times we - # are just handed a UUID for the instance. - cfg.StrOpt('instance_format', - default='[instance: %(uuid)s] ', - help='The format for an instance that is passed with the log ' - 'message.'), - cfg.StrOpt('instance_uuid_format', - default='[instance: %(uuid)s] ', - help='The format for an instance UUID that is passed with the ' - 'log message.'), -] - -CONF = cfg.CONF -CONF.register_cli_opts(common_cli_opts) -CONF.register_cli_opts(logging_cli_opts) -CONF.register_opts(generic_log_opts) -CONF.register_opts(log_opts) - -# our new audit level -# NOTE(jkoelker) Since we synthesized an audit level, make the logging -# module aware of it so it acts like other levels. -logging.AUDIT = logging.INFO + 1 -logging.addLevelName(logging.AUDIT, 'AUDIT') - - -try: - NullHandler = logging.NullHandler -except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 - class NullHandler(logging.Handler): - def handle(self, record): - pass - - def emit(self, record): - pass - - def createLock(self): - self.lock = None - - -def _dictify_context(context): - if context is None: - return None - if not isinstance(context, dict) and getattr(context, 'to_dict', None): - context = context.to_dict() - return context - - -def _get_binary_name(): - return os.path.basename(inspect.stack()[-1][1]) - - -def _get_log_file_path(binary=None): - logfile = CONF.log_file - logdir = CONF.log_dir - - if logfile and not logdir: - return logfile - - if logfile and logdir: - return os.path.join(logdir, logfile) - - if logdir: - binary = binary or _get_binary_name() - return '%s.log' % (os.path.join(logdir, binary),) - - return None - - -def mask_password(message, secret="***"): - """Replace password with 'secret' in message. - - :param message: The string which includes security information. - :param secret: value with which to replace passwords. - :returns: The unicode value of message with the password fields masked. - - For example: - - >>> mask_password("'adminPass' : 'aaaaa'") - "'adminPass' : '***'" - >>> mask_password("'admin_pass' : 'aaaaa'") - "'admin_pass' : '***'" - >>> mask_password('"password" : "aaaaa"') - '"password" : "***"' - >>> mask_password("'original_password' : 'aaaaa'") - "'original_password' : '***'" - >>> mask_password("u'original_password' : u'aaaaa'") - "u'original_password' : u'***'" - """ - message = six.text_type(message) - - # NOTE(ldbragst): Check to see if anything in message contains any key - # specified in _SANITIZE_KEYS, if not then just return the message since - # we don't have to mask any passwords. - if not any(key in message for key in _SANITIZE_KEYS): - return message - - secret = r'\g<1>' + secret + r'\g<2>' - for pattern in _SANITIZE_PATTERNS: - message = re.sub(pattern, secret, message) - return message - - -class BaseLoggerAdapter(logging.LoggerAdapter): - - def audit(self, msg, *args, **kwargs): - self.log(logging.AUDIT, msg, *args, **kwargs) - - -class LazyAdapter(BaseLoggerAdapter): - def __init__(self, name='unknown', version='unknown'): - self._logger = None - self.extra = {} - self.name = name - self.version = version - - @property - def logger(self): - if not self._logger: - self._logger = getLogger(self.name, self.version) - return self._logger - - -class ContextAdapter(BaseLoggerAdapter): - warn = logging.LoggerAdapter.warning - - def __init__(self, logger, project_name, version_string): - self.logger = logger - self.project = project_name - self.version = version_string - - @property - def handlers(self): - return self.logger.handlers - - def deprecated(self, msg, *args, **kwargs): - stdmsg = _("Deprecated: %s") % msg - if CONF.fatal_deprecations: - self.critical(stdmsg, *args, **kwargs) - raise DeprecatedConfig(msg=stdmsg) - else: - self.warn(stdmsg, *args, **kwargs) - - def process(self, msg, kwargs): - # NOTE(mrodden): catch any Message/other object and - # coerce to unicode before they can get - # to the python logging and possibly - # cause string encoding trouble - if not isinstance(msg, six.string_types): - msg = six.text_type(msg) - - if 'extra' not in kwargs: - kwargs['extra'] = {} - extra = kwargs['extra'] - - context = kwargs.pop('context', None) - if not context: - context = getattr(local.store, 'context', None) - if context: - extra.update(_dictify_context(context)) - - instance = kwargs.pop('instance', None) - instance_uuid = (extra.get('instance_uuid', None) or - kwargs.pop('instance_uuid', None)) - instance_extra = '' - if instance: - instance_extra = CONF.instance_format % instance - elif instance_uuid: - instance_extra = (CONF.instance_uuid_format - % {'uuid': instance_uuid}) - extra['instance'] = instance_extra - - extra.setdefault('user_identity', kwargs.pop('user_identity', None)) - - extra['project'] = self.project - extra['version'] = self.version - extra['extra'] = extra.copy() - return msg, kwargs - - -class JSONFormatter(logging.Formatter): - def __init__(self, fmt=None, datefmt=None): - # NOTE(jkoelker) we ignore the fmt argument, but its still there - # since logging.config.fileConfig passes it. - self.datefmt = datefmt - - def formatException(self, ei, strip_newlines=True): - lines = traceback.format_exception(*ei) - if strip_newlines: - lines = [moves.filter( - lambda x: x, - line.rstrip().splitlines()) for line in lines] - lines = list(itertools.chain(*lines)) - return lines - - def format(self, record): - message = {'message': record.getMessage(), - 'asctime': self.formatTime(record, self.datefmt), - 'name': record.name, - 'msg': record.msg, - 'args': record.args, - 'levelname': record.levelname, - 'levelno': record.levelno, - 'pathname': record.pathname, - 'filename': record.filename, - 'module': record.module, - 'lineno': record.lineno, - 'funcname': record.funcName, - 'created': record.created, - 'msecs': record.msecs, - 'relative_created': record.relativeCreated, - 'thread': record.thread, - 'thread_name': record.threadName, - 'process_name': record.processName, - 'process': record.process, - 'traceback': None} - - if hasattr(record, 'extra'): - message['extra'] = record.extra - - if record.exc_info: - message['traceback'] = self.formatException(record.exc_info) - - return jsonutils.dumps(message) - - -def _create_logging_excepthook(product_name): - def logging_excepthook(exc_type, value, tb): - extra = {} - if CONF.verbose: - extra['exc_info'] = (exc_type, value, tb) - getLogger(product_name).critical(str(value), **extra) - return logging_excepthook - - -class LogConfigError(Exception): - - message = _('Error loading logging config %(log_config)s: %(err_msg)s') - - def __init__(self, log_config, err_msg): - self.log_config = log_config - self.err_msg = err_msg - - def __str__(self): - return self.message % dict(log_config=self.log_config, - err_msg=self.err_msg) - - -def _load_log_config(log_config_append): - try: - logging.config.fileConfig(log_config_append, - disable_existing_loggers=False) - except moves.configparser.Error as exc: - raise LogConfigError(log_config_append, str(exc)) - - -def setup(product_name): - """Setup logging.""" - if CONF.log_config_append: - _load_log_config(CONF.log_config_append) - else: - _setup_logging_from_conf() - sys.excepthook = _create_logging_excepthook(product_name) - - -def set_defaults(logging_context_format_string): - if default_log_levels is None: - default_log_levels = DEFAULT_LOG_LEVELS - cfg.set_defaults( - log_opts, - logging_context_format_string=logging_context_format_string, - default_log_levels=default_log_levels) - - - -def _find_facility_from_conf(): - facility_names = logging.handlers.SysLogHandler.facility_names - facility = getattr(logging.handlers.SysLogHandler, - CONF.syslog_log_facility, - None) - - if facility is None and CONF.syslog_log_facility in facility_names: - facility = facility_names.get(CONF.syslog_log_facility) - - if facility is None: - valid_facilities = facility_names.keys() - consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', - 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', - 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', - 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', - 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] - valid_facilities.extend(consts) - raise TypeError(_('syslog facility must be one of: %s') % - ', '.join("'%s'" % fac - for fac in valid_facilities)) - - return facility - - -def _setup_logging_from_conf(): - log_root = getLogger(None).logger - for handler in log_root.handlers: - log_root.removeHandler(handler) - - if CONF.use_syslog: - facility = _find_facility_from_conf() - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) - log_root.addHandler(syslog) - - logpath = _get_log_file_path() - if logpath: - filelog = logging.handlers.WatchedFileHandler(logpath) - log_root.addHandler(filelog) - - if CONF.use_stderr: - streamlog = ColorHandler() - log_root.addHandler(streamlog) - - elif not logpath: - # pass sys.stdout as a positional argument - # python2.6 calls the argument strm, in 2.7 it's stream - streamlog = logging.StreamHandler(sys.stdout) - log_root.addHandler(streamlog) - - if CONF.publish_errors: - handler = importutils.import_object( - "common.log_handler.PublishErrorsHandler", - logging.ERROR) - log_root.addHandler(handler) - - datefmt = CONF.log_date_format - for handler in log_root.handlers: - # NOTE(alaski): CONF.log_format overrides everything currently. This - # should be deprecated in favor of context aware formatting. - if CONF.log_format: - handler.setFormatter(logging.Formatter(fmt=CONF.log_format, - datefmt=datefmt)) - log_root.info('Deprecated: log_format is now deprecated and will ' - 'be removed in the next release') - else: - handler.setFormatter(ContextFormatter(datefmt=datefmt)) - - if CONF.debug: - log_root.setLevel(logging.DEBUG) - elif CONF.verbose: - log_root.setLevel(logging.INFO) - else: - log_root.setLevel(logging.WARNING) - - for pair in CONF.default_log_levels: - mod, _sep, level_name = pair.partition('=') - level = logging.getLevelName(level_name) - logger = logging.getLogger(mod) - logger.setLevel(level) - -_loggers = {} - - -def getLogger(name='unknown', version='unknown'): - if name not in _loggers: - _loggers[name] = ContextAdapter(logging.getLogger(name), - name, - version) - return _loggers[name] - - -def getLazyLogger(name='unknown', version='unknown'): - """Returns lazy logger. - - Creates a pass-through logger that does not create the real logger - until it is really needed and delegates all calls to the real logger - once it is created. - """ - return LazyAdapter(name, version) - - -class WritableLogger(object): - """A thin wrapper that responds to `write` and logs.""" - - def __init__(self, logger, level=logging.INFO): - self.logger = logger - self.level = level - - def write(self, msg): - self.logger.log(self.level, msg) - - -class ContextFormatter(logging.Formatter): - """A context.RequestContext aware formatter configured through flags. - - The flags used to set format strings are: logging_context_format_string - and logging_default_format_string. You can also specify - logging_debug_format_suffix to append extra formatting if the log level is - debug. - - For information about what variables are available for the formatter see: - http://docs.python.org/library/logging.html#formatter - - """ - - def format(self, record): - """Uses contextstring if request_id is set, otherwise default.""" - # NOTE(sdague): default the fancier formating params - # to an empty string so we don't throw an exception if - # they get used - for key in ('instance', 'color'): - if key not in record.__dict__: - record.__dict__[key] = '' - - if record.__dict__.get('request_id', None): - self._fmt = CONF.logging_context_format_string - else: - self._fmt = CONF.logging_default_format_string - - if (record.levelno == logging.DEBUG and - CONF.logging_debug_format_suffix): - self._fmt += " " + CONF.logging_debug_format_suffix - - # Cache this on the record, Logger will respect our formated copy - if record.exc_info: - record.exc_text = self.formatException(record.exc_info, record) - return logging.Formatter.format(self, record) - - def formatException(self, exc_info, record=None): - """Format exception output with CONF.logging_exception_prefix.""" - if not record: - return logging.Formatter.formatException(self, exc_info) - - stringbuffer = moves.StringIO() - traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], - None, stringbuffer) - lines = stringbuffer.getvalue().split('\n') - stringbuffer.close() - - if CONF.logging_exception_prefix.find('%(asctime)') != -1: - record.asctime = self.formatTime(record, self.datefmt) - - formatted_lines = [] - for line in lines: - pl = CONF.logging_exception_prefix % record.__dict__ - fl = '%s%s' % (pl, line) - formatted_lines.append(fl) - return '\n'.join(formatted_lines) - - -class ColorHandler(logging.StreamHandler): - LEVEL_COLORS = { - logging.DEBUG: '\033[00;32m', # GREEN - logging.INFO: '\033[00;36m', # CYAN - logging.AUDIT: '\033[01;36m', # BOLD CYAN - logging.WARN: '\033[01;33m', # BOLD YELLOW - logging.ERROR: '\033[01;31m', # BOLD RED - logging.CRITICAL: '\033[01;31m', # BOLD RED - } - - def format(self, record): - record.color = self.LEVEL_COLORS[record.levelno] - return logging.StreamHandler.format(self, record) - - -class DeprecatedConfig(Exception): - message = _("Fatal call to deprecated config: %(msg)s") - - def __init__(self, msg): - super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/common/log_orig.py b/common/log_orig.py deleted file mode 100644 index 0a92b9e28..000000000 --- a/common/log_orig.py +++ /dev/null @@ -1,116 +0,0 @@ -import ConfigParser -import logging -import logging.config -import logging.handlers -import os -import sys -import subprocess -import time -import fixtures -import datetime - -cwd = os.getcwd() -LOG_CONFIG = '%s/log_conf.ini'%cwd -LOG_KEY = 'log01' -TS = time.time() -ST = datetime.datetime.fromtimestamp(TS).strftime('%Y-%m-%d_%H:%M:%S') -LOG_FORMAT = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - -_loggers = {} - -class NullHandler(logging.Handler): - """ - For backward-compatibility with Python 2.6, a local class definition - is used instead of logging.NullHandler - """ - - def handle(self, record): - pass - - def emit(self, record): - pass - - def createLock(self): - self.lock = None - -def getLogger(log_file = 'abcd', name='unknown'): - if name not in _loggers: - _loggers[name] = ContrailLogger(log_file ,name = name) - _loggers[name].setUp() - return _loggers[name] - -class ContrailLogger: - def __init__(self,log_file,name=None): - - - self.name = name - logging.config.fileConfig(LOG_CONFIG) - self.logger = logging.getLogger(LOG_KEY) - self.log_file = log_file - - def setUp(self): - self.fileHandler = CustomFileHandler(fileName = self.log_file) - self.fileHandler.setFormatter(LOG_FORMAT) - self.logger.addHandler(self.fileHandler) - #return self.logger - #self.memHandler = self.logger.handlers[0] - #self.memHandler.setTarget(self.fileHandler) - #self.logger.addHandler(self.fileHandler) - - self.console_h= logging.StreamHandler() - self.console_h.setLevel(logging.INFO) - self.console_h.setFormatter(LOG_FORMAT) - self.logger.addHandler(self.console_h) - #self.logger.addHandler(logging.NullHandler()) - self.logger.addHandler(NullHandler()) - - def cleanUp(self): - pass - #self.memHandler.flush() - #self.memHandler.close() - #self.logger.removeHandler(self.memHandler) - self.logger.removeHandler(self.console_h) - - def handlers(self): - return self.logger.handlers - -class CustomFileHandler(logging.FileHandler): - def __init__( self, fileName='test_details.log', mode='a', build_id='0000'): - if 'SCRIPT_TS' in os.environ: - ts= os.environ.get('SCRIPT_TS') - else: - ts='' - if 'BUILD_ID' in os.environ : - build_id= os.environ.get('BUILD_ID') - #path=os.environ.get('%s',%cwd)+'/logs/' - path=('%s'+'/logs/')%cwd - try: - os.mkdir( path ) - except OSError: - subprocess.call('mkdir -p %s' %(path), shell=True) - fileName= path + '/' + fileName.lower() +'.log' - print "\nLog file : %s \n" %(os.path.realpath(fileName)) - logging.FileHandler.__init__(self,fileName,mode) - -def dolog(logger,message = ''): - - logger.debug("Debug %s"%message) - logger.info("Info %s"%message) - logger.warning("Warning %s"%message) - logger.error("Error %s"%message) - logger.critical("Critical %s"%message) - -def main(): - - logger = Contrail_Logger('Dummy_file') - logger.setUp() - dolog(logger.logger,'message1') - logger.cleanUp() - - logger = Contrail_Logger('Dummy_file_1') - logger.setUp() - dolog(logger.logger,'message2') - logger.cleanUp() - -if __name__ == "__main__": - main() diff --git a/common/neutron/__init__.py b/common/neutron/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/neutron/attributes.py b/common/neutron/attributes.py deleted file mode 100644 index 2d5c6a161..000000000 --- a/common/neutron/attributes.py +++ /dev/null @@ -1,382 +0,0 @@ -from common.neutron.neutron_util import * -from tcutils.util import * -import string - -network = { - 'id': { - 'perm': 'r', - 'default': 'generated', - 'type': 'uuid-str', - 'required': 'false', - }, - 'name': { - 'perm': 'cru', - 'default': 'none', - 'type': 'string', - 'required': 'false', - }, - 'admin_state_up': { - 'perm': 'cru', - 'default': 'none', - 'type': 'boolean', - 'required': 'false', - }, - 'status': { - 'perm': 'r', - 'default': 'none', - 'type': 'string', - 'required': 'false', - }, - 'subnets': { - 'perm': 'r', - 'default': 'empty list', - 'type': 'list(uuid-str)', - 'required': 'false', - }, - 'shared': { - 'perm': 'cru', - 'default': 'false', - 'type': 'boolean', - 'required': 'false', - }, - 'tenant_id': { - 'perm': 'cr', - 'default': 'none', - 'type': 'uuid-str', - 'required': 'false', - }, - 'router:external': { - 'perm': 'cru', - 'default': 'false', - 'type': 'boolean', - 'required': 'false', - }, -} - -subnet = { - 'id': { - 'perm': 'r', - 'default': 'generated', - 'type': 'uuid-str', - 'required': 'false', - }, - 'name': { - 'perm': 'cru', - 'default': 'none', - 'type': 'string', - 'required': 'false', - }, - 'network_id': { - 'perm': 'cr', - 'default': 'none', - 'type': 'uuid-str', - 'required': 'true', - }, - 'ip_version': { - 'perm': 'cr', - 'default': '4', - 'type': 'int-46', - 'required': 'true', - }, - 'cidr': { - 'perm': 'cr', - 'default': 'none', - 'type': 'cidr-string', - 'required': 'true', - }, - 'gateway_ip': { - #'perm':'crud', - 'perm': 'cr', # gateway update and delete are not supported - 'default': 'none', - 'type': 'gw-ip', - 'required': 'false', - }, - 'dns_nameservers': { - 'perm': 'cru', - 'default': '[]', - 'type': 'list-ip', - 'required': 'false', - }, - 'allocation_pools': { - 'perm': 'cr', - 'default': 'none', - 'type': 'list-pool-dict', - 'required': 'false', - }, - 'host_routes': { - 'perm': 'cru', - 'default': '[]', - 'type': 'list-route-dict', - 'required': 'false', - }, - 'enable_dhcp': { - 'perm': 'cru', - 'default': 'true', - 'type': 'boolean', - 'required': 'false', - }, - 'tenant_id': { - 'perm': 'cr', - 'default': 'none', - 'type': 'uuid-str', - 'required': 'false', - }, -} - -port = { - 'id': { - 'perm': 'r', - 'default': 'generated', - 'type': 'uuid-str', - 'required': 'false', - }, - 'network_id': { - 'perm': 'cr', - 'default': 'none', - 'type': 'uuid-str', - 'required': 'true', - }, - 'name': { - 'perm': 'cru', - 'default': 'none', - 'type': 'string', - 'required': 'false', - }, - 'admin_state_up': { - 'perm': 'cru', - 'default': 'true', - 'type': 'boolean', - 'required': 'false', - }, - 'status': { - 'perm': 'r', - 'default': 'none', - 'type': 'string', - 'required': 'false', - }, - 'mac_address': { - 'perm': 'cr', - 'default': 'generated', - 'type': 'mac-string', - 'required': 'false', - }, - 'fixed_ips': { - 'perm': 'cru', - 'default': 'generated', - 'type': 'list-fixed-ip-dict', - 'required': 'false', - }, - 'device_id': { - # TODO Bug 1337457 - # 'perm':'crud', - 'perm': 'r', - 'default': 'none', - 'type': 'uuid-str', - 'required': 'false', - }, - 'device_owner': { - # 'perm':'crud', # TODO Bug 1337457 - 'perm': 'r', - 'default': 'none', - 'type': 'string', - 'required': 'false', - }, - 'tenant_id': { - 'perm': 'cr', - 'default': 'none', - 'type': 'uuid-str', - 'required': 'false', - }, - 'security_groups': { - 'perm': 'crud', - 'default': 'none', - 'type': 'list-sg-str', - 'required': 'false', - }, - -} - -router = { - 'id': { - 'perm': 'r', - 'default': 'generated', - 'type': 'uuid-str', - 'required': 'false', - }, - 'name': { - 'perm': 'cru', - 'default': 'none', - 'type': 'string', - 'required': 'false', - }, - 'admin_state_up': { - 'perm': 'cru', - 'default': 'true', - 'type': 'boolean', - 'required': 'false', - }, - 'status': { - 'perm': 'r', - 'default': 'none', - 'type': 'string', - 'required': 'false', - }, - 'tenant_id': { - 'perm': 'cr', - 'default': 'none', - 'type': 'uuid-str', - 'required': 'false', - }, - # Igore external_gateway_info since its not supported -} - - -def get_matching_perm_attributes(obj, conditions): - return_list = [] - for (attr, value) in obj.items(): - needed = True - for (cond_attr, cond_value) in conditions.items(): - if not cond_value in value[cond_attr]: - needed = needed and False - if needed: - return_list.append(attr) - return return_list - - -def get_network_c_reqd_attributes1(): - c_reqd_attributes = [] - for (attr, value) in network.items(): - if 'c' in value['perm'] and 'true' in value['required']: - c_reqd_attributes.append(attr) - return c_reqd_attributes -# end get_network_c_reqd_attributes - - -def get_network_create_required_attributes(): - return get_matching_perm_attributes(network, conditions={'perm': 'c', 'required': 'true'}) - - -def get_subnet_create_required_attributes(): - return get_matching_perm_attributes(subnet, conditions={'perm': 'c', 'required': 'true'}) - - -def get_port_create_required_attributes(): - return get_matching_perm_attributes(port, conditions={'perm': 'c', 'required': 'true'}) - - -def get_router_create_required_attributes(): - return get_matching_perm_attributes(router, conditions={'perm': 'c', 'required': 'true'}) - - -def get_other_network_create_attributes(): - all_attributes = get_matching_perm_attributes(network, - conditions={'perm': 'c', }) - must = get_network_create_required_attributes() - return list(set(all_attributes) - set(must)) - - -def get_other_subnet_create_attributes(): - all_attributes = get_matching_perm_attributes(subnet, - conditions={'perm': 'c', }) - must = get_subnet_create_required_attributes() - return list(set(all_attributes) - set(must)) - - -def get_other_port_create_attributes(): - all_attributes = get_matching_perm_attributes(port, - conditions={'perm': 'c', }) - must = get_port_create_required_attributes() - return list(set(all_attributes) - set(must)) - - -def get_other_router_create_attributes(): - all_attributes = get_matching_perm_attributes(router, - conditions={'perm': 'c', }) - must = get_router_create_required_attributes() - return list(set(all_attributes) - set(must)) - - -def get_network_read_attributes(): - return get_matching_perm_attributes(network, - conditions={'perm': 'r', }) - - -def get_subnet_read_attributes(): - return get_matching_perm_attributes(subnet, - conditions={'perm': 'r', }) - - -def get_port_read_attributes(): - return get_matching_perm_attributes(port, - conditions={'perm': 'r', }) - - -def get_router_read_attributes(): - return get_matching_perm_attributes(router, - conditions={'perm': 'r', }) - - -def get_network_update_attributes(): - return get_matching_perm_attributes(network, - conditions={'perm': 'u', }) - - -def get_subnet_update_attributes(): - return get_matching_perm_attributes(subnet, - conditions={'perm': 'u', }) - - -def get_port_update_attributes(): - return get_matching_perm_attributes(port, - conditions={'perm': 'u', }) - - -def get_router_update_attributes(): - return get_matching_perm_attributes(router, - conditions={'perm': 'u', }) - - -def get_random_value(obj, attribute, dep_attribute1=None, dep_attribute2=None): - item = obj[attribute]['type'] - if item == 'string': - return attribute + '-' + get_random_string() - elif 'boolean' == item: - return str(get_random_boolean()) - elif 'uuid-str' == item: - return get_uuid() - elif 'int-46' == item: - # IPv4 always - return '4' - elif 'cidr-string' == item: - return get_random_cidr('16') - elif 'gw-ip' == item: - return get_random_ip(dep_attribute1) - elif 'list-string' == item: - return get_random_string_list( - max_list_length=4, prefix=attribute + '-', - length=8) - elif 'list-pool-dict' == item: - # eg: allocation_pool - return get_pool_dict_list(dep_attribute1) - elif 'list-route-dict' == item: - # host routes - return get_route_dict_list(dep_attribute1) - elif 'mac-string' == item: - return get_random_mac() - elif 'list-fixed-ip-dict' == item: - return get_fixed_ip_dict_list(dep_attribute1, dep_attribute2) - elif 'list-ip' == item: - return get_random_ip_list(max_list_length=4) - - -if __name__ == "__main__": - import pdb - pdb.set_trace() -# print get_network_c_reqd_attributes1() - print get_network_create_required_attributes() - print get_other_network_create_attributes() - - # for i in util.combos(get_other_network_create_attributes()): - # print i - print get_fixed_ip_dict_list('some_uuid', '10.1.1.0/24') - print get_pool_dict_list('10.1.1.0/24') diff --git a/common/neutron/base.py b/common/neutron/base.py deleted file mode 100644 index 525358f58..000000000 --- a/common/neutron/base.py +++ /dev/null @@ -1,837 +0,0 @@ -import time -import test -from netaddr import * - -from common.connections import ContrailConnections -from common import isolated_creds -from common import create_public_vn -from vn_test import VNFixture -from vm_test import VMFixture -from project_test import ProjectFixture -from policy_test import PolicyFixture -from port_fixture import PortFixture -from tcutils.util import get_random_name, retry, get_random_cidr -from fabric.context_managers import settings -from fabric.api import run -from fabric.operations import get, put -from tcutils.commands import ssh, execute_cmd, execute_cmd_out -import ConfigParser -import re - -contrail_api_conf = '/etc/contrail/contrail-api.conf' - - -class BaseNeutronTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BaseNeutronTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.admin_connections = cls.isolated_creds.get_admin_connections() - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.admin_inputs = cls.isolated_creds.get_admin_inputs() - cls.admin_connections = cls.isolated_creds.get_admin_connections() - cls.quantum_h = cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib = cls.connections.vnc_lib - cls.agent_inspect = cls.connections.agent_inspect - cls.cn_inspect = cls.connections.cn_inspect - cls.analytics_obj = cls.connections.analytics_obj - cls.api_s_inspect = cls.connections.api_server_inspect - cls.public_vn_obj = create_public_vn.PublicVn( - cls.__name__, - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - # end setUpClass - - @classmethod - def tearDownClass(cls): - cls.isolated_creds.delete_tenant() - super(BaseNeutronTest, cls).tearDownClass() - # end tearDownClass - - def create_vn(self, vn_name=None, vn_subnets=None, vxlan_id=None, - enable_dhcp=True, cleanup=True): - if not vn_name: - vn_name = get_random_name('vn') - if not vn_subnets: - vn_subnets = [get_random_cidr()] - vn_fixture = VNFixture(project_name=self.inputs.project_name, - connections=self.connections, - inputs=self.inputs, - vn_name=vn_name, - subnets=vn_subnets, - vxlan_id=vxlan_id, - enable_dhcp=enable_dhcp) - vn_fixture.setUp() - if cleanup: - self.addCleanup(vn_fixture.cleanUp) - - return vn_fixture - # end create_vn - - def create_vm(self, vn_fixture, vm_name=None, node_name=None, - flavor='contrail_flavor_small', - image_name='ubuntu-traffic', - port_ids=[]): - if not vm_name: - vm_name = 'vm-%s' % (get_random_name(vn_fixture.vn_name)) - return self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_fixture.obj, - vm_name=vm_name, - image_name=image_name, - flavor=flavor, - node_name=node_name, - port_ids=port_ids)) - - def create_router(self, router_name, tenant_id=None): - obj = self.quantum_h.create_router(router_name, tenant_id) - if obj: - self.addCleanup(self.quantum_h.delete_router, obj['id']) - return obj - - def delete_router(self, router_id=None): - val = self.quantum_h.delete_router(router_id) - - def create_port(self, net_id, fixed_ips=[], - mac_address=None, no_security_group=False, - security_groups=[], extra_dhcp_opts=None): - port_rsp = self.quantum_h.create_port( - net_id, - fixed_ips, - mac_address, - no_security_group, - security_groups, - extra_dhcp_opts) - self.addCleanup(self.delete_port, port_rsp['id'], quiet=True) - return port_rsp - - def delete_port(self, port_id, quiet=False): - self._remove_from_cleanup(self.quantum_h.delete_port, (port_id)) - if quiet and not self.quantum_h.get_port(port_id): - return - self.quantum_h.delete_port(port_id) - - def update_port(self, port_id, port_dict): - if not self.quantum_h.get_port(port_id): - self.logger.error('Port with port_id %s not found' % port_id) - return - else: - port_rsp = self.quantum_h.update_port(port_id, port_dict) - return port_rsp - - def add_router_interface(self, router_id, subnet_id=None, port_id=None, - cleanup=True): - if subnet_id: - result = self.quantum_h.add_router_interface( - router_id, subnet_id) - elif port_id: - result = self.quantum_h.add_router_interface(router_id, - port_id=port_id) - - if cleanup: - self.addCleanup(self.delete_router_interface, - router_id, subnet_id, port_id) - return result - - def delete_router_interface(self, router_id, subnet_id=None, port_id=None): - self._remove_from_cleanup(self.delete_router_interface, - (router_id, subnet_id, port_id)) - self.quantum_h.delete_router_interface( - router_id, subnet_id, port_id) - - def add_vn_to_router(self, router_id, vn_fixture, cleanup=True): - return self.add_router_interface( - router_id, - subnet_id=vn_fixture.vn_subnet_objs[0]['id'], cleanup=cleanup) - - def delete_vn_from_router(self, router_id, vn_fixture): - return self.delete_router_interface( - router_id, - vn_fixture.vn_subnet_objs[0]['id']) - - def create_security_group(self, name, quantum_handle=None): - q_h = None - if quantum_handle: - q_h = quantum_handle - else: - q_h = self.quantum_h - obj = q_h.create_security_group(name) - if obj: - self.addCleanup(self.delete_security_group, obj['id']) - return obj - # end create_security_group - - def delete_security_group(self, sg_id, quantum_handle=None): - q_h = None - if quantum_handle: - q_h = quantum_handle - else: - q_h = self.quantum_h - q_h.delete_security_group(sg_id) - - def update_default_quota_list( - self, - subnet=-1, - virtual_network=-1, - floating_ip=-1, - logical_router=-1, - security_group_rule=-1, - virtual_machine_interface=-1, - security_group=-1): - contrail_api_file_list = [] - - # Copy the contrail-api.conf to /tmp/ and restore it later - - for cfgm_ip in self.inputs.cfgm_ips: - api_file_name = get_random_name('contrail-api') - contrail_api_file_list.append(api_file_name) - issue_cmd = "cp " + contrail_api_conf + " /tmp/" + \ - api_file_name - output = self.inputs.run_cmd_on_server( - cfgm_ip, - issue_cmd, - self.inputs.host_data[cfgm_ip]['username'], - self.inputs.host_data[cfgm_ip]['password']) - - self.addCleanup( - self.restore_default_quota_list, - contrail_api_file_list) - - # Fetch the contrail-api.conf from all config nodes to active cfgm's - # /tmp/ - - api_file_list = [] - api_file_list.append(contrail_api_conf) - for cfgm_ip in self.inputs.cfgm_ips[1:]: - with settings( - host_string='%s@%s' % ( - self.inputs.host_data[cfgm_ip]['username'], cfgm_ip)): - api_conf_file = get_random_name('contrail-api-remote') - api_file_list.append('/tmp/' + api_conf_file) - get(contrail_api_conf, '/tmp/' + api_conf_file) - - # Edit the contrail-api.conf files adding quota sections - - for api_conf in api_file_list: - api_conf_h = open(api_conf, 'a') - config = ConfigParser.ConfigParser() - config.add_section('QUOTA') - config.set('QUOTA', 'subnet', subnet) - config.set('QUOTA', 'virtual_network', virtual_network) - config.set('QUOTA', 'floating_ip', floating_ip) - config.set('QUOTA', 'logical_router', logical_router) - config.set('QUOTA', 'security_group', security_group) - config.set('QUOTA', 'security_group_rule', security_group_rule) - config.set( - 'QUOTA', - 'virtual_machine_interface', - virtual_machine_interface) - config.write(api_conf_h) - api_conf_h.close() - - # Put back updated contrail-api.conf file to respective cfgm's remove - # temp files - - count = 1 - for cfgm_ip in self.inputs.cfgm_ips[1:]: - with settings( - host_string='%s@%s' % ( - self.inputs.host_data[cfgm_ip]['username'], cfgm_ip)): - put(api_file_list[count], contrail_api_conf) - issue_cmd = "rm -rf " + api_file_list[count] - output = self.inputs.run_cmd_on_server( - cfgm_ip, - issue_cmd, - self.inputs.host_data[cfgm_ip]['username'], - self.inputs.host_data[cfgm_ip]['password']) - count = count + 1 - - # Restart contrail-api service on all cfgm nodes - - for cfgm_ip in self.inputs.cfgm_ips: - self.inputs.restart_service('contrail-api', [cfgm_ip]) - - time.sleep(30) - - # end update_default_quota_list - - def restore_default_quota_list(self, file_list=[]): - # Restore default contrail-api.conf on respective cfgm nodes remove - # temp files - - file_itr = iter(file_list) - for cfgm_ip in self.inputs.cfgm_ips: - api_conf_backup = next(file_itr) - issue_cmd = "cp /tmp/" + api_conf_backup + \ - " " + contrail_api_conf + "; rm -rf /tmp/" + api_conf_backup - output = self.inputs.run_cmd_on_server( - cfgm_ip, - issue_cmd, - self.inputs.host_data[cfgm_ip]['username'], - self.inputs.host_data[cfgm_ip]['password']) - - for cfgm_ip in self.inputs.cfgm_ips: - self.inputs.restart_service('contrail-api', [cfgm_ip]) - - time.sleep(30) - - # end restore_default_quota_list - - def create_external_network(self, connections, inputs): - ext_vn_name = get_random_name('ext_vn') - ext_subnets = [self.inputs.fip_pool] - mx_rt = self.inputs.mx_rt - ext_vn_fixture = self.useFixture( - VNFixture( - project_name=inputs.project_name, - connections=connections, - vn_name=ext_vn_name, - inputs=inputs, - subnets=ext_subnets, - router_asn=self.inputs.router_asn, - rt_number=mx_rt, - router_external=True)) - assert ext_vn_fixture.verify_on_setup() - return ext_vn_fixture - - # end create_external_network - - def allow_default_sg_to_allow_all_on_project(self, project_name): - - self.project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - self.logger.info( - 'Default SG to be edited for allow all on project: %s' % - project_name) - self.project_fixture.set_sec_group_for_allow_all( - project_name, 'default') - - # end allow_default_sg_to_allow_all_on_project - - def verify_snat(self, vm_fixture, expectation=True, timeout=200): - result = True - self.logger.info("Ping to 8.8.8.8 from vm %s" % (vm_fixture.vm_name)) - if not vm_fixture.ping_with_certainty('8.8.8.8', - expectation=expectation): - self.logger.error("Ping to 8.8.8.8 from vm %s Failed" % - (vm_fixture.vm_name)) - result = result and False - self.logger.info('Testing FTP...Copying VIM files to VM via FTP') - run_cmd = "wget http://ftp.vim.org/pub/vim/unix/vim-7.3.tar.bz2" - vm_fixture.run_cmd_on_vm(cmds=[run_cmd], timeout=timeout) - output = vm_fixture.return_output_values_list[0] - if not output or 'saved' not in output: - self.logger.error("FTP failed from VM %s" % - (vm_fixture.vm_name)) - result = result and False - else: - self.logger.info("FTP successful from VM %s via FIP" % - (vm_fixture.vm_name)) - return result - # end verify_snat - - def get_active_snat_node(self, vm_fixture, vn_fixture): - (domain, project, vn) = vn_fixture.vn_fq_name.split(':') - inspect_h = self.agent_inspect[vm_fixture.vm_node_ip] - agent_vrf_objs = inspect_h.get_vna_vrf_objs(domain, project, vn) - agent_vrf_obj = vm_fixture.get_matching_vrf( - agent_vrf_objs['vrf_list'], vn_fixture.vrf_name) - vn_vrf_id9 = agent_vrf_obj['ucindex'] - next_hops = inspect_h.get_vna_active_route( - vrf_id=vn_vrf_id9, ip=vm_fixture.vm_ip, prefix='32')['path_list'][0]['nh'] - if next_hops['type'] == 'interface': - return vm_fixture.vm_node_ip - else: - return next_hops['itf'] - # end get_active_snat_node - - def config_aap(self, port1, port2, ip, vsrx=False): - self.logger.info('Configuring AAP on ports %s and %s' % - (port1['id'], port2['id'])) -# port1_dict = {'allowed_address_pairs': [ -# {"ip_address": ip + '/32', "mac_address": port1['mac_address']}]} -# port2_dict = {'allowed_address_pairs': [ -# {"ip_address": ip + '/32', "mac_address": port2['mac_address']}]} - if vsrx: - port1_dict = {'allowed_address_pairs': [ - {"ip_address": ip + '/32', "mac_address": '00:00:5e:00:01:01'}]} - port2_dict = {'allowed_address_pairs': [ - {"ip_address": ip + '/32', "mac_address": '00:00:5e:00:01:01'}]} - else: - port1_dict = {'allowed_address_pairs': [ - {"ip_address": ip + '/32'}]} - port2_dict = {'allowed_address_pairs': [ - {"ip_address": ip + '/32'}]} - port1_rsp = self.update_port(port1['id'], port1_dict) - port2_rsp = self.update_port(port2['id'], port2_dict) - return True - # end config_aap - - def config_vrrp_on_vsrx(self, vm_fix, vip, priority): - cmdList = [] - cmdList.append('deactivate security nat source rule-set TestNat') - cmdList.append( - 'deactivate interfaces ge-0/0/1 unit 0 family inet filter') - cmdList.append('deactivate interfaces ge-0/0/1.0 family inet dhcp') - cmdList.append('deactivate security policies') - cmdList.append( - 'set security forwarding-options family inet6 mode packet-based') - cmdList.append( - 'set security forwarding-options family mpls mode packet-based') - cmdList.append( - 'set security forwarding-options family iso mode packet-based') - vsrx_vrrp_config = ['set interfaces ge-0/0/1.0 family inet address ' + vm_fix.vm_ips[ - 1] + '/' + '24 vrrp-group 1 priority ' + priority + ' virtual-address ' + vip + ' accept-data'] - cmdList = cmdList + vsrx_vrrp_config - cmd_string = (';').join(cmdList) - result = vm_fix.config_via_netconf(cmds=cmd_string) - return result - - @retry(delay=5, tries=10) - def config_vrrp(self, vm_fix, vip, priority): - self.logger.info('Configuring VRRP on %s ' % vm_fix.vm_name) - vrrp_cmd = 'nohup vrrpd -n -D -i eth0 -v 1 -a none -p %s -d 3 %s' % ( - priority, vip) - vm_fix.run_cmd_on_vm(cmds=[vrrp_cmd], as_sudo=True) - result = self.vrrp_chk(vm_fix) - return result - # end config_vrrp - - def vrrp_chk(self, vm): - vrrp_chk_cmd = 'netstat -anp | grep vrrpd' - vm.run_cmd_on_vm(cmds=[vrrp_chk_cmd], as_sudo=True) - vrrp_op = vm.return_output_cmd_dict[vrrp_chk_cmd] - if '/vrrpd' in vrrp_op: - result = True - self.logger.info('vrrpd running in %s' % vm.vm_name) - else: - result = False - self.logger.error('vrrpd not running in %s' % vm.vm_name) - return result - # end vrrp_mas_chk - - @retry(delay=5, tries=10) - def vrrp_mas_chk(self, vm, vn, ip, vsrx=False): - self.logger.info( - 'Will verify who the VRRP master is and the corresponding route entries in the Agent') - if vsrx: - vrrp_mas_chk_cmd = 'show vrrp' - result = vm.config_via_netconf(cmds=vrrp_mas_chk_cmd) - if 'master' in result: - self.logger.info( - '%s is selected as the VRRP Master' % vm.vm_name) - result = True - else: - result = False - self.logger.error('VRRP Master not selected') - else: - vrrp_mas_chk_cmd = 'ip -4 addr ls' - vm.run_cmd_on_vm(cmds=[vrrp_mas_chk_cmd], as_sudo=True) - output = vm.return_output_cmd_dict[vrrp_mas_chk_cmd] - result = False - if ip in output: - self.logger.info( - '%s is selected as the VRRP Master' % vm.vm_name) - result = True - else: - result = False - self.logger.error('VRRP Master not selected') - inspect_h = self.agent_inspect[vm.vm_node_ip] - (domain, project, vnw) = vn.vn_fq_name.split(':') - agent_vrf_objs = inspect_h.get_vna_vrf_objs(domain, project, vnw) - agent_vrf_obj = vm.get_matching_vrf( - agent_vrf_objs['vrf_list'], vn.vrf_name) - vn1_vrf_id = agent_vrf_obj['ucindex'] - paths = inspect_h.get_vna_active_route( - vrf_id=vn1_vrf_id, ip=ip, prefix='32')['path_list'] - for path in paths: - if path['peer'] == 'LocalVmPort' and path['path_preference_data']['wait_for_traffic'] == 'false': - result = True - break - else: - result = False - return result - # end vrrp_mas_chk - - @retry(delay=5, tries=10) - def verify_vrrp_action(self, src_vm, dst_vm, ip, vsrx=False): - result = False - self.logger.info('Will ping %s from %s and check if %s responds' % ( - ip, src_vm.vm_name, dst_vm.vm_name)) - compute_ip = dst_vm.vm_node_ip - compute_user = self.inputs.host_data[compute_ip]['username'] - compute_password = self.inputs.host_data[compute_ip]['password'] - session = ssh(compute_ip, compute_user, compute_password) - if vsrx: - vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_names[1]]['name'] - else: - vm_tapintf = dst_vm.tap_intf[dst_vm.vn_fq_name]['name'] - cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % ( - vm_tapintf, vm_tapintf) - execute_cmd(session, cmd, self.logger) - assert src_vm.ping_with_certainty(ip), 'Ping to vIP failure' - output_cmd = 'cat /tmp/%s_out.log' % vm_tapintf - output, err = execute_cmd_out(session, output_cmd, self.logger) - if ip in output: - result = True - self.logger.info( - '%s is seen responding to ICMP Requests' % dst_vm.vm_name) - else: - self.logger.error('ICMP Requests not seen on the VRRP Master') - result = False - return result - # end verify_vrrp_sction - - def _remove_from_cleanup(self, func_call, *args): - for cleanup in self._cleanups: - if func_call in cleanup and args == cleanup[1]: - self._cleanups.remove(cleanup) - return True - return False - - def create_lb_pool(self, name, lb_method, protocol, subnet_id): - lb_pool_resp = None - lb_pool_resp = self.quantum_h.create_lb_pool( - name, lb_method, protocol, subnet_id) - if lb_pool_resp: - self.addCleanup(self.verify_on_pool_delete, lb_pool_resp['id']) - self.addCleanup(self.quantum_h.delete_lb_pool, - lb_pool_resp['id']) - return lb_pool_resp - # end create_lb_pool - - def verify_on_pool_delete(self, pool_id): - result, msg = self.verify_pool_not_in_api_server(pool_id) - assert result, msg - - @retry(delay=10, tries=10) - def verify_pool_not_in_api_server(self, pool_id): - pool = self.api_s_inspect.get_lb_pool(pool_id, refresh=True) - if pool: - self.logger.warn("pool with pool id %s still present in API" - " server even after pool delete.retrying..." % (pool_id)) - errmsg = "API server verification failed for pool with pool id %s" % ( - pool_id) - return False, errmsg - self.logger.debug( - "pool with pool id %s not present in API server" % (pool_id)) - return True, None - - def create_lb_member(self, ip_address, protocol_port, pool_id): - lb_member_resp = None - lb_member_resp = self.quantum_h.create_lb_member( - ip_address, protocol_port, pool_id) - if lb_member_resp: - self.addCleanup(self.verify_on_member_delete, lb_member_resp['id']) - self.addCleanup(self.quantum_h.delete_lb_member, - lb_member_resp['id']) - return lb_member_resp - # end create_lb_member - - def verify_on_member_delete(self, member_id): - result, msg = self.verify_member_not_in_api_server(member_id) - assert result, msg - - @retry(delay=10, tries=10) - def verify_member_not_in_api_server(self, member_id): - member = self.api_s_inspect.get_lb_member(member_id) - if member: - self.logger.warn("member with member id %s still present in API" - " server even after member delete" % (member_id)) - errmsg = "API server verification failed for member with member id %s" % ( - member_id) - assert False, errmsg - self.logger.debug( - "member with member id %s not present in API server" % (member_id)) - return True, None - - def create_health_monitor(self, delay, max_retries, probe_type, timeout): - hm_resp = None - hm_resp = self.quantum_h.create_health_monitor( - delay, max_retries, probe_type, timeout) - if hm_resp: - self.addCleanup(self.verify_on_healthmonitor_delete, hm_resp['id']) - self.addCleanup(self.quantum_h.delete_health_monitor, - hm_resp['id']) - return hm_resp - # end create_health_monitor - - def verify_on_healthmonitor_delete(self, healthmonitor_id): - result, msg = self.verify_healthmonitor_not_in_api_server( - healthmonitor_id) - assert result, msg - - @retry(delay=10, tries=10) - def verify_healthmonitor_not_in_api_server(self, healthmonitor_id): - healthmonitor = self.api_s_inspect.get_lb_healthmonitor( - healthmonitor_id) - if healthmonitor: - self.logger.warn("healthmonitor with id %s still present in API" - " server even after healthmonitor delete" % (healthmonitor_id)) - errmsg = "API server verification failed for healthmonitor with id %s" % ( - healthmonitor_id) - assert False, errmsg - self.logger.debug( - "healthmonitor with id %s not present in API server" % (healthmonitor_id)) - return True, None - - def create_vip(self, name, protocol, protocol_port, subnet_id, pool_id): - vip_resp = None - vip_resp = self.quantum_h.create_vip( - name, protocol, protocol_port, pool_id, subnet_id) - if vip_resp: - self.addCleanup(self.verify_on_vip_delete, pool_id, vip_resp['id']) - self.addCleanup(self.quantum_h.delete_vip, - vip_resp['id']) - return vip_resp - # end create_vip - - def verify_on_vip_delete(self, pool_id, vip_id): - result = True - result, msg = self.verify_vip_delete(vip_id) - assert result, msg - for compute_ip in self.inputs.compute_ips: - result, msg = self.verify_netns_delete(compute_ip, pool_id) - assert result, msg - for compute_ip in self.inputs.compute_ips: - result, msg = self.verify_haproxy_kill(compute_ip, pool_id) - assert result, msg - result, msg = self.verify_vip_not_in_api_server(vip_id) - assert result, msg - # end verify_on_vip_delete - - @retry(delay=10, tries=10) - def verify_vip_delete(self, vip_id): - vip = self.quantum_h.show_vip(vip_id) - if vip: - errmsg = "vip %s still exists after delete" % vip_id - self.logger.error(errmsg) - return (False, errmsg) - self.logger.debug("vip %s deleted successfully" % vip_id) - return (True, None) - # end verify_vip_delete - - @retry(delay=10, tries=10) - def verify_netns_delete(self, compute_ip, pool_id): - cmd = 'ip netns list | grep %s' % pool_id - pool_obj = self.quantum_h.get_lb_pool(pool_id) - out = self.inputs.run_cmd_on_server( - compute_ip, cmd, - self.inputs.host_data[compute_ip]['username'], - self.inputs.host_data[compute_ip]['password']) - if out: - self.logger.warn("NET NS: %s still present for pool name: %s with UUID: %s" - " even after VIP delete in compute node %s" - % (out, pool_obj['name'], pool_id, compute_ip)) - errmsg = "NET NS still present after vip delete, failed in compute %s" % compute_ip - return False, errmsg - self.logger.debug("NET NS deleted successfully for pool name: %s with" - " UUID :%s in compute node %s" % (pool_obj['name'], pool_id, compute_ip)) - return True, None - # end verify_netns_delete - - @retry(delay=10, tries=10) - def verify_haproxy_kill(self, compute_ip, pool_id): - cmd = 'ps -aux | grep loadbalancer | grep %s' % pool_id - pool_obj = self.quantum_h.get_lb_pool(pool_id) - pid = [] - out = self.inputs.run_cmd_on_server( - compute_ip, cmd, - self.inputs.host_data[compute_ip]['username'], - self.inputs.host_data[compute_ip]['password']) - output = out.split('\n') - for out in output: - match = re.search("nobody\s+(\d+)\s+", out) - if match: - pid.append(match.group(1)) - if pid: - self.loger.warn("haproxy still running even after VIP delete for pool name: %s," - " with UUID: %s in compute node %s" % (pool_obj['name'], pool_id, compute_ip)) - errmsg = "HAPROXY still running after VIP delete failed in compute node %s" % ( - compute_ip) - return False, errmsg - self.logger.debug("haproxy process got killed successfully with vip delete for pool" - " name: %s UUID :%s on compute %s" % (pool_obj['name'], pool_id, compute_ip)) - return True, None - # end verify_haproxy_kill - - @retry(delay=10, tries=10) - def verify_vip_not_in_api_server(self, vip_id): - vip = self.api_s_inspect.get_lb_vip(vip_id) - if vip: - self.logger.warn("vip with vip id %s still present in API" - " server even after vip delete" % (vip_id)) - errmsg = "API server verification failed for vip with id %s" % ( - vip_id) - return False, errmsg - self.logger.debug( - "vip with vip id %s not present in API server" % (vip_id)) - #msg = "vip with vip id %s not present in API server" % (vip_id) - return True, None - - def associate_health_monitor(self, pool_id, hm_id): - hm_resp = self.quantum_h.associate_health_monitor( - pool_id, hm_id) - if hm_resp: - self.addCleanup(self.verify_on_disassociate_health_monitor, - pool_id, hm_id) - self.addCleanup(self.quantum_h.disassociate_health_monitor, - pool_id, hm_id) - # end associate_health_monitor - - def verify_on_disassociate_health_monitor(self, pool_id, hm_id): - result, msg = self.verify_disassociate_health_monitor(pool_id, hm_id) - assert result, msg - # end verify_on_disassociate_health_monitor - - @retry(delay=10, tries=10) - def verify_disassociate_health_monitor(self, pool_id, hm_id): - pool = self.api_s_inspect.get_lb_pool(pool_id) - try: - healthmonitor_refs = pool[ - 'loadbalancer-pool']['loadbalancer_healthmonitor_refs'] - for href in healthmonitor_refs: - if href['uuid'] == healthmonitor_id: - self.logger.warn("healthmonitor with id %s associated with pool" - " %s" % (healthmonitor_id, pool['loadbalancer-pool']['name'])) - errmsg = ("API server verification failed, health monitor %s still associated" - " with pool %s" % (healthmonitor_id, ool['loadbalancer-pool']['name'])) - return False, errmsg - else: - self.logger.debug("healthmonitor with id %s successfully disassociated with pool" - " %s" % (healthmonitor_id, pool['loadbalancer-pool']['name'])) - return True, None - except KeyError: - self.logger.debug("healthmonitor refs not found in API server for pool %s" - % (pool['loadbalancer-pool']['name'])) - return True, None - # end verify_disassociate_health_monitor - - def remove_method_from_cleanups(self, method): - for cleanup in self._cleanups: - if method == cleanup: - self._cleanups.remove(cleanup) - break - # end remove_from_cleanups - - def extend_vn_to_physical_router(self, vn_fixture, phy_router_fixture): - # Attach VN to router in Contrail API so that Device manager - # can configure the device - phy_router_fixture.add_virtual_network(vn_fixture.vn_id) - self.addCleanup(self.delete_vn_from_physical_router, vn_fixture, - phy_router_fixture) - # end extend_vn_to_physical_router - - def delete_vn_from_physical_router(self, vn_fixture, phy_router_fixture): - # Disassociate VN from the physical router so that Device manager - # can delete corresponding configs from the device - phy_router_fixture.delete_virtual_network(vn_fixture.vn_id) - # end delete_vn_from_physical_router - - def allow_all_traffic_between_vns(self, vn1_fixture, vn2_fixture): - policy_name = get_random_name('policy-allow-all') - rules = [ - { - 'direction': '<>', 'simple_action': 'pass', - 'protocol': 'any', - 'source_network': vn1_fixture.vn_name, - 'dest_network': vn2_fixture.vn_name, - }, - ] - policy_fixture = self.useFixture( - PolicyFixture( - policy_name=policy_name, rules_list=rules, inputs=self.inputs, - connections=self.connections)) - - vn1_fixture.bind_policies( - [policy_fixture.policy_fq_name], vn1_fixture.vn_id) - self.addCleanup(vn1_fixture.unbind_policies, - vn1_fixture.vn_id, [policy_fixture.policy_fq_name]) - - vn2_fixture.bind_policies( - [policy_fixture.policy_fq_name], vn2_fixture.vn_id) - self.addCleanup(vn2_fixture.unbind_policies, - vn2_fixture.vn_id, [policy_fixture.policy_fq_name]) - # end allow_all_traffic_between_vns - - def create_dhcp_server_vm(self, - vn1_fixture, - vn2_fixture, - vm_name=None, - node_name=None, - flavor='contrail_flavor_large', - image_name='ubuntu-dhcpdns-server', - port_ids=[]): - if not vm_name: - vm_name = get_random_name('dhcp-server') - vm_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_objs=[vn1_fixture.obj, vn2_fixture.obj], - vm_name=vm_name, - image_name=image_name, - flavor=flavor, - node_name=node_name, - port_ids=port_ids)) - assert vm_fixture.verify_on_setup(), ( - "DHCP Server VM Verification failed ") - assert vm_fixture.wait_till_vm_is_up() - vn2_fq_name = vn2_fixture.vn_fq_name - vm_ip = vm_fixture.vm_ip_dict[vn2_fq_name][0] - cmds = ['ifconfig eth1 up', - 'ifconfig eth1 %s netmask 255.255.255.0' % (vm_ip), - 'service isc-dhcp-server restart'] - vm_fixture.run_cmd_on_vm(cmds, as_sudo=True) - time.sleep(5) - return vm_fixture - - # end create_dhcp_server_vm - - def setup_vmi(self, vn_id, fixed_ips=[], - mac_address=None, - security_groups=[], - extra_dhcp_opts=[], - cleanup=True): - if mac_address: - mac_address = EUI(mac_address) - mac_address.dialect = mac_unix - port_fixture = PortFixture( - vn_id, - mac_address=mac_address, - fixed_ips=fixed_ips, - security_groups=security_groups, - extra_dhcp_opts=extra_dhcp_opts, - connections=self.connections, - ) - port_fixture.setUp() - if cleanup: - self.addCleanup(port_fixture.cleanUp) - return port_fixture - # end setup_vmi - - def do_ping_test(self, fixture_obj, sip, dip, expectation=True): - assert fixture_obj.ping_with_certainty(dip, expectation=expectation),\ - 'Ping from %s to %s with expectation %s failed!' % ( - sip, dip, str(expectation)) - self.logger.info('Ping test from %s to %s with expectation %s passed' % (sip, - dip, str(expectation))) - # end do_ping_test - diff --git a/common/neutron/lbaas/__init__.py b/common/neutron/lbaas/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/neutron/lbaas/base.py b/common/neutron/lbaas/base.py deleted file mode 100644 index a55535aef..000000000 --- a/common/neutron/lbaas/base.py +++ /dev/null @@ -1,282 +0,0 @@ -from common.neutron.base import BaseNeutronTest -from tcutils.commands import ssh, execute_cmd, execute_cmd_out -from fabric.context_managers import settings, hide -from tcutils.util import run_fab_cmd_on_node, retry -import re -from time import sleep - -class BaseTestLbaas(BaseNeutronTest): - - @classmethod - def setUpClass(cls): - super(BaseTestLbaas, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - super(BaseTestLbaas, cls).tearDownClass() - - - def verify_active_standby(self, compute_ips, pool_uuid): - cmd1 = 'ip netns list | grep %s' % pool_uuid - cmd2 = 'ps -aux | grep loadbalancer | grep %s' % pool_uuid - netns_list = {} - haproxy_pid = {} - result = True - errmsg = [] - for compute_ip in compute_ips: - out = self.inputs.run_cmd_on_server( - compute_ip, cmd1, - self.inputs.host_data[compute_ip]['username'], - self.inputs.host_data[compute_ip]['password']) - output = [] if out == '' else out.strip().split('\n') - if not output: - self.logger.warn("'ip netns list' with the pool id %s returned no output. " - "NET NS is not created on node %s" - % (pool_uuid, compute_ip)) - continue - if len(output) != 1: - self.logger.error("More than one NET NS found for pool with id %s" - " on node %s" % (pool_uuid, compute_ip)) - return False, ('Found more than one NETNS (%s) while' - 'expecting one with pool ID (%s) in node %s' - % (output, pool_uuid, compute_ip)) - - netns_list[compute_ip] = output[0] - out = self.inputs.run_cmd_on_server( - compute_ip, cmd2, - self.inputs.host_data[compute_ip]['username'], - self.inputs.host_data[compute_ip]['password']) - pid = [] - output = out.split('\n') - for out in output: - match = re.search("nobody\s+(\d+)\s+",out) - if match: - pid.append(match.group(1)) - if not pid: - self.logger.error("Haproxy seems to be not running when checked with pool id %s" - " on node %s" % (pool_uuid, compute_ip)) - return False, "Haproxy not running in compute node %s" % (compute_ip) - if len(pid) != 1: - self.logger.debug("More than one instance of haproxy running for pool with id %s" - " on node %s" % (pool_uuid, compute_ip)) - return False, ('Found more than one instance of haproxy running while' - ' expecting one with pool ID (%s) in node %s' - % (pool_uuid, compute_ip)) - haproxy_pid[compute_ip] = pid - - self.logger.info("Created net ns: %s" % (netns_list.values())) - if len(self.inputs.compute_ips) >= 2: - if len(netns_list.values()) == 2: - self.logger.info('More than 1 compute in setup: Active and Standby nets got' - ' created on compute nodes: (%s)' % (netns_list.keys())) - else: - errmsg.append("More than 1 compute in setup: " - "2 netns did not get created for Active and Standby") - result = False - if len(haproxy_pid.values()) == 2: - self.logger.info('More than 1 compute in setup: Active and Standby haproxy running on' - ' compute node: (%s)' % (haproxy_pid.keys())) - else: - errmsg.append("More than 1 compute in setup: " - "Haproxy not running in 2 computes for Active and Standby") - result = False - else: - if(netns_list.values()): - self.logger.info('one compute in setup, sinlge netns got created' - ' on compute:(%s)' % (netns_list.keys())) - else: - errmsg.append("NET NS didnot get created") - result = False - if(haproxy_pid.values()): - self.logger.info('one compute in setup, haproxy running on' - ' compute:(%s)' % (haproxy_pid.keys())) - else: - errmsg.append("haproxy not running on compute node") - result = False - - return (result,errmsg) - - def start_simpleHTTPserver(self, servers): - output = '' - for server in servers: - with hide('everything'): - with settings(host_string='%s@%s' % (self.inputs.username,server.vm_node_ip), - password=self.inputs.password, warn_only=True,abort_on_prompts= False): - cmd1 = 'sudo hostname > index.html' - cmd2 = 'sudo python -m SimpleHTTPServer 80 & sleep 600' - output = run_fab_cmd_on_node(host_string = '%s@%s'%(server.vm_username,server.local_ip), - password = server.vm_password, cmd = cmd1, as_sudo=False) - output = run_fab_cmd_on_node(host_string = '%s@%s'%(server.vm_username,server.local_ip), - password = server.vm_password, cmd = cmd2, as_sudo=False, timeout=2) - return - - def run_wget(self, vm, vip): - response = '' - out = '' - result = False - with hide('everything'): - with settings(host_string='%s@%s' % (self.inputs.username,vm.vm_node_ip), - password=self.inputs.password, warn_only=True,abort_on_prompts= False): - cmd1 = 'sudo wget http://%s' % vip - cmd2 = 'cat index.html' - cmd3 = 'rm -rf index.html' - result = run_fab_cmd_on_node(host_string = '%s@%s'%(vm.vm_username,vm.local_ip), - password = vm.vm_password, cmd = cmd1, as_sudo=False) - if result.count('200 OK'): - result = True - self.logger.info("connections to vip %s successful" % (vip)) - response = run_fab_cmd_on_node(host_string = '%s@%s'%(vm.vm_username,vm.local_ip), - password = vm.vm_password, cmd = cmd2, as_sudo=False) - out = run_fab_cmd_on_node(host_string = '%s@%s'%(vm.vm_username,vm.local_ip), - password = vm.vm_password, cmd = cmd3, as_sudo=False) - self.logger.info("Request went to server: %s" % (response)) - else: - self.logger.error("Error in response on connecting to vip %s. Error is %s" % (vip, result)) - result = False - return (result,response) - #end run_wget - - def get_netns_left_intf(self, server_ip, pool_uuid): - cmd = 'ip netns list | grep %s' % pool_uuid - left_int = '' - out = self.inputs.run_cmd_on_server( - server_ip, cmd, - self.inputs.host_data[server_ip]['username'], - self.inputs.host_data[server_ip]['password']) - pattern = "vrouter-((\w+-)+\w+):" - match = re.match(pattern, out) - if match: - netns = match.group(1) - inspect_h = self.agent_inspect[server_ip] - for tapint in inspect_h.get_vna_tap_interface_by_vm(netns): - if 'left interface' in tapint['vm_name']: - left_int = tapint['name'] - return left_int - - def start_tcpdump(self, server_ip, tap_intf): - session = ssh(server_ip,self.inputs.host_data[server_ip]['username'],self.inputs.host_data[server_ip]['password']) - pcap = '/tmp/%s.pcap' % tap_intf - cmd = "tcpdump -nei %s tcp -w %s" % (tap_intf, pcap) - self.logger.info("Staring tcpdump to capture the packets on server %s" % (server_ip)) - execute_cmd(session, cmd, self.logger) - return pcap, session - - def stop_tcpdump(self,session, pcap): - self.logger.info("Waiting for the tcpdump write to complete.") - sleep(30) - cmd = 'kill $(pidof tcpdump)' - execute_cmd(session, cmd, self.logger) - cmd = 'tcpdump -r %s | wc -l' % pcap - out, err = execute_cmd_out(session, cmd, self.logger) - count = int(out.strip('\n')) - cmd = 'rm -f %s' % pcap - execute_cmd(session, cmd, self.logger) - return count - - def start_stop_service(self, server_ip, service, action): - cmd = "service %s %s" % (service, action) - out = self.inputs.run_cmd_on_server( - server_ip, cmd, - self.inputs.host_data[server_ip]['username'], - self.inputs.host_data[server_ip]['password']) - cmd = "service %s status" % (service) - output = self.inputs.run_cmd_on_server( - server_ip, cmd, - self.inputs.host_data[server_ip]['username'], - self.inputs.host_data[server_ip]['password']) - if action == 'stop' and 'STOPPED' in output: - self.logger.info("%s service stopped in server %s" % (service, server_ip)) - elif action == 'start' and 'RUNNING' in output: - self.logger.info("%s service running in server %s" % (service, server_ip)) - else: - self.logger.warn("requested action is %s for service %s, but current staus is %s" % (action, service, output)) - return - - @retry(delay=10, tries=20) - def verify_agent_process_active(self, vrouter_node): - try: - status = self.connections.ops_inspects[self.inputs.collector_ips[0]] \ - .get_ops_vrouter(vrouter_node)['NodeStatus']['process_status'][0]['state'] - if status == 'Functional': - self.logger.info("agent process is in active state in compute node %s" - % vrouter_node) - return True, None - except KeyError: - self.logger.warn("Agent process is still not in Active state in node %s." - "retrying.." % (vrouter_node)) - errmsg = ("Agent process not in active state in compute node %s " - % vrouter_node) - return False, errmsg - - @retry(delay=10, tries=10) - def verify_lb_pool_in_api_server(self,pool_id): - pool = self.api_s_inspect.get_lb_pool(pool_id) - if not pool: - self.logger.warn("pool with pool id %s not found in api server" % (pool_id)) - return False - self.logger.info("pool with pool id %s created successfully in api server" % (pool_id)) - return True - - @retry(delay=10, tries=10) - def verify_vip_in_api_server(self,vip_id): - vip = self.api_s_inspect.get_lb_vip(vip_id) - if not vip: - self.logger.warn("vip with vip id %s not found in api server" % (vip_id)) - return False - self.logger.info("vip with vip id %s created successfully in api server" % (vip_id)) - try: - if vip['virtual-ip']['virtual_machine_interface_refs']: - self.logger.info("virtual machine ref created successfully for VIP with id" - " %s" %(vip_id)) - except KeyError: - self.logger.warn("virtual machine ref not found in vip with id %s" - % (vip_id)) - return False - try: - if vip['virtual-ip']['loadbalancer_pool_refs']: - self.logger.info("pool ref created successfully for VIP with id %s" - % (vip_id)) - except KeyError: - self.logger.warn("pool ref not found in vip with id %s" % (vip_id)) - return False - return True - - @retry(delay=10, tries=10) - def verify_member_in_api_server(self,member_id): - member = self.api_s_inspect.get_lb_member(member_id) - if not member: - self.logger.warn("member with member id %s not found in api server" % (member_id)) - return False - self.logger.info("member with member id %s created successfully in api server" % (member_id)) - return True - - @retry(delay=10, tries=10) - def verify_healthmonitor_in_api_server(self,healthmonitor_id): - healthmonitor = self.api_s_inspect.get_lb_healthmonitor(healthmonitor_id) - if not healthmonitor: - self.logger.warn("healthmonitor with id %s not found in api server" % (healthmonitor_id)) - return False - self.logger.info("healthmonitor with id %s created successfully in api server" % (healthmonitor_id)) - return True - - @retry(delay=10, tries=10) - def verify_healthmonitor_association_in_api_server(self, pool_id, healthmonitor_id): - result = True - pool = self.api_s_inspect.get_lb_pool(pool_id) - healthmonitor_refs = pool['loadbalancer-pool']['loadbalancer_healthmonitor_refs'] - if not healthmonitor_refs: - errmsg = ("healthmonitor refs not found in API server for pool %s" - % (pool['loadbalancer-pool']['name'])) - self.logger.warn(errmsg) - return False, errmsg - self.logger.debug("healthmonitor refs found in API server for pool %s" - % (pool['loadbalancer-pool']['name'])) - for href in healthmonitor_refs: - if href['uuid'] == healthmonitor_id: - self.logger.debug("healthmonitor with id %s associated with pool" - " %s" % (healthmonitor_id, pool['loadbalancer-pool']['name'])) - else: - errmsg = ("healthmonitor with id %s not associated with pool" - " %s" % (healthmonitor_id, pool['loadbalancer-pool']['name'])) - result = False - return result, None diff --git a/common/neutron/neutron_util.py b/common/neutron/neutron_util.py deleted file mode 100644 index 623397246..000000000 --- a/common/neutron/neutron_util.py +++ /dev/null @@ -1,64 +0,0 @@ -import string -import random -import itertools -import uuid -from netaddr import IPAddress, IPNetwork -from tcutils.util import get_random_ip, get_random_cidr - - -def get_pool_dict_list(cidr, max_length=4): - # Assuming a /24 subnet in cidr - final_list = [] - cidr_object = IPNetwork(cidr) - subnets_list = list(cidr_object.subnet(29)) - random.shuffle(subnets_list) - list_length = random.randint(1, max_length) - - for i in range(0, list_length): - subnet = subnets_list[i] - pool_dict = {'start': str(subnet[0]), 'end': str(subnet[-1])} - final_list.append(pool_dict) - return final_list - - -def get_route_dict_list(cidr, max_length=4): - list_length = random.randint(1, max_length) - final_list = [] - - for i in range(0, list_length): - route_dict = {'destination': get_random_cidr(), - 'nexthop': str(get_random_ip(cidr))} - final_list.append(route_dict) - return final_list - - -def get_fixed_ip_dict_list(subnet_id, cidr, max_length=1): - # Need to test separately for a list of fixed ips - list_length = random.randint(1, max_length) - final_list = [] - for i in range(0, list_length): - fixed_ip_dict = {} - random_ip = get_random_ip(cidr) - if subnet_id is not None: - fixed_ip_dict['subnet_id'] = subnet_id - fixed_ip_dict['ip_address'] = random_ip - final_list.append(fixed_ip_dict) - return final_list - - -def get_random_ip_list(max_list_length=4): - list_length = random.randint(1, max_list_length) - final_list = [] - for i in range(0, list_length): - cidr = get_random_cidr() - random_ip = get_random_ip(cidr) - final_list.append(random_ip) - return final_list - - -def combos(list_obj): - all_combos = [] - for i in range(1, len(list_obj)): - all_combos += itertools.combinations(list_obj, i) - for j in all_combos: - yield j diff --git a/common/openstack_libs.py b/common/openstack_libs.py deleted file mode 100644 index fd233a2a3..000000000 --- a/common/openstack_libs.py +++ /dev/null @@ -1,61 +0,0 @@ -# import handling for quantum & neutron -try: - from quantumclient.quantum import client as quantum_client - from quantumclient.client import HTTPClient as quantum_http_client - from quantumclient.common import exceptions as quantum_exception - from quantumclient.common.exceptions import QuantumClientException as quantum_client_exception -except: - quantum_client = None - quantum_http_client = None - quantum_exception = None - quantum_client_exception = None - -try: - from neutronclient.neutron import client as neutron_client - from neutronclient.client import HTTPClient as neutron_http_client - from neutronclient.common import exceptions as neutron_exception - from neutronclient.common.exceptions import NeutronClientException as neutron_client_exception -except: - neutron_client = None - neutron_http_client = None - neutron_exception = None - neutron_client_exception = None - -network_client = quantum_client if quantum_client else neutron_client -network_http_client = quantum_http_client if quantum_http_client else neutron_http_client -network_exception = quantum_exception if quantum_exception else neutron_exception -if quantum_client_exception: - network_client_exception = quantum_client_exception -elif neutron_client_exception: - network_client_exception = neutron_client_exception -else: - network_client_exception = Exception - -# import handling for keystone -try: - from keystoneclient.v2_0 import client as ks_client - from keystoneclient import exceptions as ks_exceptions - from keystoneclient.auth.identity import v2 as ks_auth_identity_v2 - from keystoneclient import session as ks_session - import keystoneclient -except: - ks_client = None - ks_exceptions = None - keystoneclient = None - ks_auth_identity_v2 = None - ks_session = None - -# import handling for nova -try: - from novaclient import client as nova_client - from novaclient import exceptions as nova_exception -except: - nova_client = None - nova_exception = None - -# import handling for ceilometer -try: - from ceilometerclient import client as ceilo_client -except: - ceilo_client = None - diff --git a/common/policy/__init__.py b/common/policy/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/policy/config.py b/common/policy/config.py deleted file mode 100644 index 68ab3c495..000000000 --- a/common/policy/config.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Policy config utilities.""" - -import random - -import fixtures - -from policy_test import PolicyFixture - -from vnc_api.gen.resource_xsd import TimerType, SequenceType,\ - VirtualNetworkPolicyType -from vnc_api.vnc_api import NetworkPolicy - - -class AttachPolicyFixture(fixtures.Fixture): - - """Policy attach fixture to attach policy to Virtuak Networks.""" - - def __init__(self, inputs, connections, vn_fixture, policy_fixture, policy_type=None): - self.inputs = inputs - self.logger = self.inputs.logger - self.orch = connections.orch - self.vnc_lib = connections.vnc_lib - self.vn_fixture = vn_fixture - self.policy_fixture = policy_fixture - self.vn_obj = self.vnc_lib.virtual_network_read( - fq_name_str=self.vn_fixture.vn_fq_name) - self.policy_obj = self.vnc_lib.network_policy_read( - fq_name=self.policy_fixture.policy_fq_name) - seq = random.randint(1, 655535) - kwargs = {'sequence': SequenceType(seq, 0)} - if policy_type == 'dynamic': - kwargs.update({'timer': TimerType()}) - self.policy_type = VirtualNetworkPolicyType(**kwargs) - - def setUp(self): - self.logger.info("Attaching policy %s to vn %s", - self.policy_fixture.policy_name, self.vn_fixture.vn_name) - super(AttachPolicyFixture, self).setUp() - self.vn_obj.add_network_policy(self.policy_obj, self.policy_type) - self.vnc_lib.virtual_network_update(self.vn_obj) - # Required for verification by VNFixture in vn_test.py - policy = self.orch.get_policy(self.policy_fixture.policy_fq_name) - policy_name_objs = dict((policy_obj['policy']['name'], policy_obj) - for policy_obj in self.vn_fixture.policy_objs) - if isinstance(policy, NetworkPolicy): - policy_name = policy.fq_name[-1] - else: - policy_name = policy['policy']['name'] - if policy_name not in policy_name_objs.keys(): - self.vn_fixture.policy_objs.append(policy) - - def cleanUp(self): - self.logger.info("Dettaching policy %s from vn %s", - self.policy_fixture.policy_name, self.vn_fixture.vn_name) - super(AttachPolicyFixture, self).cleanUp() - self.vn_obj.del_network_policy(self.policy_obj) - self.vnc_lib.virtual_network_update(self.vn_obj) - # Required for verification by VNFixture in vn_test.py - policy = self.orch.get_policy(self.policy_fixture.policy_fq_name) - if isinstance(policy, NetworkPolicy): - policy_name = policy.fq_name[-1] - policy_name_objs = dict((policy_obj.fq_name[-1], policy_obj) - for policy_obj in self.vn_fixture.policy_objs) - else: - policy_name = policy['policy']['name'] - policy_name_objs = dict((policy_obj['policy']['name'], policy_obj) - for policy_obj in self.vn_fixture.policy_objs) - if policy_name in policy_name_objs.keys(): - self.vn_fixture.policy_objs.remove(policy_name_objs[policy_name]) - - -class ConfigPolicy(): - - def remove_from_cleanups(self, fix): - for cleanup in self._cleanups: - if fix.cleanUp in cleanup: - self._cleanups.remove(cleanup) - break - - def config_policy(self, policy_name, rules): - """Configures policy.""" - # create policy - policy_fix = self.useFixture(PolicyFixture( - policy_name=policy_name, rules_list=rules, - inputs=self.inputs, connections=self.connections)) - return policy_fix - - def attach_policy_to_vn(self, policy_fix, vn_fix, policy_type=None): - policy_attach_fix = self.useFixture(AttachPolicyFixture( - self.inputs, self.connections, vn_fix, policy_fix, policy_type)) - return policy_attach_fix - - def detach_policy(self, vn_policy_fix): - self.logger.debug("Removing policy from '%s'", - vn_policy_fix.vn_fixture.vn_name) - vn_policy_fix.cleanUp() - self.remove_from_cleanups(vn_policy_fix) - - def unconfig_policy(self, policy_fix): - """Un Configures policy.""" - self.logger.debug("Delete policy '%s'", policy_fix.policy_name) - policy_fix.cleanUp() - self.remove_from_cleanups(policy_fix) diff --git a/common/policy/get_version.py b/common/policy/get_version.py deleted file mode 100644 index 2cf3d2f55..000000000 --- a/common/policy/get_version.py +++ /dev/null @@ -1,32 +0,0 @@ -import re - - -def get_OsVersion(self): - OSVersion = self.inputs.os_type[self.inputs.compute_ips[0]] - OSVersion = OSVersion.capitalize() - return OSVersion -# end get_OsVersion - - -def get_VrouterReleaseVersion(self): - buildlist = [] - myBuild = self.inputs.run_cmd_on_server( - self.inputs.compute_ips[0], 'contrail-version | grep contrail-vrouter-agent | awk \'{print $2}\'') - myRel = myBuild.split("-", 1) - return myRel[0] -# end get_VrouterReleaseVersion - - -def get_VrouterBuildVersion(self): - buildlist = [] - myBuild = self.inputs.run_cmd_on_server( - self.inputs.compute_ips[0], 'contrail-version | grep contrail-vrouter-agent | awk \'{print $3}\'') - return myBuild -# end get_VrouterBuildVersion - - -def get_OS_Release_BuildVersion(self): - BuildTag = str(get_OsVersion(self)) + '-' + str(get_VrouterReleaseVersion(self)) + \ - '-' + str(get_VrouterBuildVersion(self)) - return BuildTag -# end get_OS_Release_BuildVersion diff --git a/common/policy/policy_test_helper.py b/common/policy/policy_test_helper.py deleted file mode 100644 index 0af3ac489..000000000 --- a/common/policy/policy_test_helper.py +++ /dev/null @@ -1,741 +0,0 @@ -import os -import copy -import traceback -from vnc_api.vnc_api import * -from vnc_api.gen.resource_test import * -from quantum_test import * -from nova_test import * -from policy_test import * -from vn_test import * -import string - - -def comp_rules_from_policy_to_system(self): - """ Comparing Policy rule to system rule(agent) . - """ - # Initializing the connections to quantum/api/nova/agent fixtures from self - self.connections = ContrailConnections(self.project_inputs, self.logger) - self.agent_inspect = self.connections.agent_inspect - self.quantum_h = self.connections.quantum_h - self.nova_h = self.connections.nova_h - self.orch = self.connections.orch - self.api_s_inspect = self.connections.api_server_inspect - self.logger = self.inputs.logger - self.project_name = self.project_inputs.project_name - - result = True - msg = [] - # - # Step 1 :Get all projects - project_names, project_ids, project_domains = get_project_list(self) - for pr in range(len(project_names)): - pro_vm_list = None - if self.project_name == project_names[pr]: - # Step 2:Check VMs are exist for selected project - pro_vm_list = self.orch.get_vm_list(project_id=project_ids[pr]) - else: - pro_vm_list = None - if pro_vm_list is not None: - # Arragenging all VM's - vm_list = [] - old_vn = '' - for i in range(len(pro_vm_list)): - vm = str(pro_vm_list[i].name) - vm_list.append(vm) - - # Step 2:Verify quantum rules for each VM. - for vm in range(len(vm_list)): - policys_list = [] - vn_list = [] - - # Step 3 :Get All VNs of selected VM - vns_of_vm = self.orch.get_networks_of_vm(pro_vm_list[vm]) - for i in range(len(vns_of_vm)): - vn_obj = str(vns_of_vm[i]) - vn_list.append(vn_obj) - # Verifying the quntum rules for each VN - for vn in vn_list: - if old_vn != vn: - # step 4:Get the policys associated with vn from API - # server - policys_list = self.api_s_inspect.get_cs_vn_policys( - project=project_names[pr], - domain=project_domains[pr], - vn=vn, - refresh=True) - if policys_list == []: - break - else: - pass - self.logger.info("Order of the policy's list:%s" % - (policys_list)) - user_rules_tx = {} - rules_by_vn = {} - rules_by_vn[vn] = [] - - # Step 5 :Aggregating all attached policys rules for - # each network. - for policy in policys_list: - - # Get the rules from quantum client - policy_detail = self.vnc_lib.network_policy_read(fq_name=[u'default-domain', - unicode(project_names[pr]), unicode(policy)]) - - self.logger.info( - "%s, %s, %s, %s, %s" % - (policy_detail, - policys_list, - project_names, - pro_vm_list, - vn_list)) - # Total no of rules for each policy - list_of_rules = policy_detail.network_policy_entries.exportDict() - list_of_rules = list_of_rules['PolicyEntriesType']['policy_rule'] - - no_of_rules = [] - for each_rule in list_of_rules: - if ((each_rule['dst_addresses'][0] - ['network_policy'] is not None) and - (each_rule['src_addresses'][0] - ['network_policy'] is not None)): - dst_pol = str(each_rule['dst_addresses'][0] - ['network_policy']) - src_pol = str(each_rule['src_addresses'][0] - ['network_policy']) - for each_vn in self.topo.policy_vn[ - dst_pol.split(':')[2]]: - new_rule = copy.deepcopy(each_rule) - new_fqn = [project_domains[pr], - project_names[pr], each_vn] - new_vnfqn = ':'.join(new_fqn) - new_rule['dst_addresses'][0][ - 'virtual_network'] = new_vnfqn - new_rule['dst_addresses'][0][ - 'network_policy'] = None - for srcvn in self.topo.policy_vn[ - src_pol.split(':')[2]]: - new_rule2 = copy.deepcopy(new_rule) - new_fqns = [project_domains[pr], - project_names[pr], srcvn] - new_vnfqns = ':'.join(new_fqns) - new_rule2['src_addresses'][0][ - 'virtual_network'] = new_vnfqns - new_rule2['src_addresses'][0][ - 'network_policy'] = None - no_of_rules.append(new_rule2) - elif ((each_rule['dst_addresses'][0][ - 'network_policy'] is not None) and - (each_rule['src_addresses'][0][ - 'network_policy'] is None)): - dst_pol = str(each_rule['dst_addresses'][0][ - 'network_policy']) - for each_vn in self.topo.policy_vn[ - dst_pol.split(':')[2]]: - new_rule = copy.deepcopy(each_rule) - new_fqn = [project_domains[pr], - project_names[pr], each_vn] - new_vnfqn = ':'.join(new_fqn) - new_rule['dst_addresses'][0][ - 'virtual_network'] = new_vnfqn - new_rule['dst_addresses'][0][ - 'network_policy'] = None - no_of_rules.append(new_rule) - elif ((each_rule['dst_addresses'][0][ - 'network_policy'] is None) and - (each_rule['src_addresses'][0][ - 'network_policy'] is not None)): - src_pol = str(each_rule['src_addresses'][0][ - 'network_policy']) - for srcvn in self.topo.policy_vn[ - src_pol.split(':')[2]]: - new_rule = copy.deepcopy(each_rule) - new_fqn = [project_domains[pr], - project_names[pr], srcvn] - new_vnfqn = ':'.join(new_fqn) - new_rule['src_addresses'][0][ - 'virtual_network'] = new_vnfqn - new_rule['src_addresses'][0][ - 'network_policy'] = None - no_of_rules.append(new_rule) - else: - no_of_rules.append(each_rule) - - # Traslation of quantum rules to ACES - fq_name = [project_domains[pr], - project_names[pr], vn] - fq_vn = ':'.join(fq_name) - self.logger.info( - "Traslation of quantum rules to ACES format") - updated_quantum_rules, uni_rule = tx_quantum_rules_to_aces( - no_of_rules, fq_vn) - user_rules_tx[policy] = updated_quantum_rules - # Step 5b: Aggregate rules by network - self.logger.info("vn is %s, vn_policy is %s" % - (vn, policy)) - rules_by_vn[vn] += user_rules_tx[policy] - - # Step 6:Remove the duplicate rules if the multilple - # policies have same rule - rules_by_vn[vn] = trim_duplicate_rules(rules_by_vn[vn]) - - # Step 7:Translate quantum- ACEs to system format and - # update ACE IDs - if rules_by_vn[vn] != []: - rules_by_vn[vn] = tx_quntum_def_aces_to_system( - fq_vn, rules_by_vn[vn], uni_rule) - rules_by_vn[vn] = policy_test_utils.update_rule_ace_id( - rules_by_vn[vn]) - self.logger.debug("VN: %s, expected ACE's is " % (vn)) - for r in rules_by_vn[vn]: - self.logger.info("%s" % - (json.dumps(r, sort_keys=True))) - # end building VN ACE's from user rules - - # Step 8:Get actual from vna in compute nodes [referred - # as cn] and compare with quntum rules and update the - # result - rules_by_all_vn = rules_by_vn[vn] - project_name = project_names[pr] - result, msg = comp_user_rules_to_system_rules( - self, vn, rules_by_all_vn, policys_list, pro_vm_list, vm_list, vm, project_name) - self.logger.info( - "Verify policy rules for other vn if it is present") - old_vn = vn - else: - pass - else: - self.logger.info( - "Skipping the policy rule comparison since VM's are not exist for selected project:%s" % - (project_names[pr])) - self.logger.info( - "Policy rules comparison with system for all Virtual networks are done") - return (result, msg) - - # end comp_rules_from_policy_to_system - - -def get_project_list(self): - all_projects = self.api_s_inspect.get_cs_domain()['domain']['projects'] - project_names = [] - project_ids = [] - project_domains = [] - for i in range(len(all_projects)): - pro_domain = str(all_projects[i]['to'][0]) - pro_name = str(all_projects[i]['to'][1]) - pro_id = str(all_projects[i]['uuid']) - if all( - x != pro_name for x in ( - 'default-project', - 'invisible_to_admin', - 'service')): - if pro_name.startswith('vpc'): - pass - else: - project_names.append(pro_name) - project_ids.append(pro_id) - project_domains.append(pro_domain) - else: - pass - return (project_names, project_ids, project_domains) - - -def tx_quantum_rules_to_aces(no_of_rules, fq_vn): - ''' Generating the quantum rules to aces ''' - total_rules = len(no_of_rules) - user_rules_tx = [] - uni_rule = {} - # step 1: Getting all tuples list from quantum rules : - for i in range(total_rules): - temp_rule = {} - temp_rule['direction'] = str(no_of_rules[i]['direction']) - temp_rule['proto_l'] = str(no_of_rules[i]['protocol']) - dest = str(no_of_rules[i]['dst_addresses'][0]['virtual_network']) - if dest == 'any': - temp_rule['dst'] = 'any' - elif dest == 'local': - temp_rule['dst'] = fq_vn - else: - # dst_ntw=string.split(dest,':') - # temp_rule['dst']=dst_ntw[2] - temp_rule['dst'] = dest - temp_rule['simple_action'] = str( - no_of_rules[i]['action_list']['simple_action']) - temp_rule['action_l'] = [ - no_of_rules[i]['action_list']] - source_addr = str( - no_of_rules[i]['src_addresses'][0]['virtual_network']) - if source_addr == 'any': - temp_rule['src'] = 'any' - elif source_addr == 'local': - temp_rule['src'] = fq_vn - else: - # src_addr=string.split(source_addr,':') - # temp_rule['src']=src_addr[2] - temp_rule['src'] = source_addr - if ((no_of_rules[i]['src_ports'][0]['start_port']) == -1 - and (no_of_rules[i]['src_ports'][0]['end_port']) == -1): - temp_rule['src_port_l'] = {'max': '65535', 'min': '0'} - else: - a = str(no_of_rules[i]['src_ports'][0]['start_port']) - b = str(no_of_rules[i]['src_ports'][0]['end_port']) - temp_rule['src_port_l'] = {'max': a, 'min': b} - if ((no_of_rules[i]['dst_ports'][0]['start_port']) == - - 1 and (no_of_rules[i]['dst_ports'][0]['end_port']) == - - 1): - temp_rule['dst_port_l'] = {'max': '65535', 'min': '0'} - else: - a = str(no_of_rules[i]['dst_ports'][0]['start_port']) - b = str(no_of_rules[i]['dst_ports'][0]['end_port']) - temp_rule['dst_port_l'] = {'max': a, 'min': b} - user_rules_tx.append(temp_rule) - - # step 2 :protocol value mapping - for rule in user_rules_tx: - if rule['proto_l'] == 'any': - rule['proto_l'] = {'max': '255', 'min': '0'} - else: - rule['proto_l'] = {'max': str(rule['proto_l']), - 'min': str(rule['proto_l'])} - - # step 3: if the rules are unidirectional - for rule in user_rules_tx: - if rule['direction'] == '>': - if (rule['src'] != rule['dst']): - uni_rule = copy.deepcopy(rule) - # update newly copied rule: swap address and insert 'any' to - # protocol and src/dst ports - uni_rule['src'], uni_rule['dst'] = uni_rule[ - 'dst'], uni_rule['src'] - uni_rule['src_port_l'], uni_rule['dst_port_l'] = { - 'max': '65535', 'min': '0'}, {'max': '65535', 'min': '0'} - uni_rule['proto_l'] = {'max': '255', 'min': '0'} - uni_rule['simple_action'] = 'deny' - uni_rule['action_l'] = ['deny'] - break - - # step 4: expanding rules if bidir rule - for rule in user_rules_tx: - if rule['direction'] == '<>': - rule['direction'] = '>' - pos = user_rules_tx.index(rule) - new_rule = copy.deepcopy(rule) - # update newly copied rule: swap address/ports & insert - new_rule['src'], new_rule['dst'] = new_rule['dst'], new_rule['src'] - new_rule['src_port_l'], new_rule['dst_port_l'] = new_rule[ - 'dst_port_l'], new_rule['src_port_l'] - - if new_rule.has_key('action_l') and new_rule['action_l'][0].has_key('apply_service'): - if rule['src'] == fq_vn: - new_rule['action_l'][0]['apply_service'] = [] - if rule['dst'] == fq_vn: - user_rules_tx[pos]['action_l'][0]['apply_service'] = [] - - user_rules_tx.insert(pos + 1, new_rule) - - return (user_rules_tx, uni_rule) - -# end of tx_quantum_rules_to_aces - - -def trim_duplicate_rules(rules_by_vn): - temp_rule = rules_by_vn - for i, left in enumerate(temp_rule): - for j, right in enumerate(temp_rule): - if left != right: - if ( - ( - left['src'] == right['src']) and ( - left['dst'] == right['dst']) and ( - left['src_port_l'] == right['src_port_l']) and ( - left['dst_port_l'] == right['dst_port_l']) and ( - left['proto_l'] == right['proto_l'])): - temp_rule.pop(j) - else: - pass - return temp_rule -# end of trim_duplicate_rules - - -def comp_user_rules_to_system_rules( - self, - vn, - rules_by_all_vn, - policy, - all_vms, - vm_list, - vm, - project_name): - # Step 1:Get actual from vna in compute nodes [referred as cn] - result = True - cn_vna_rules_by_vn = {} # {'vn1':[{...}, {..}], 'vn2': [{..}]} - err_msg = {} # To capture error {compute: {vn: error_msg}} - for compNode in self.inputs.compute_ips: - self.logger.info("Verify rules expected in CN if VN-VM in CN") - self.logger.info("CN: %s, Check for expected data" % (compNode)) - inspect_h = self.agent_inspect[compNode] - got_vm_name = inspect_h.get_vna_tap_interface_by_vm( - str(all_vms[vm].id)) - if got_vm_name: - print "checking for vn %s in compute %s" % (vn, compNode) - vn_fq_name = inspect_h.get_vna_vn( - vn_name=vn, project=project_name)['name'] - vna_acl = inspect_h.get_vna_acl_by_vn(vn_fq_name) - if vna_acl: - cn_vna_rules_by_vn[vn] = vna_acl['entries'] # system_rules - else: - cn_vna_rules_by_vn[vn] = [] - # compare with test input & assert on failure - ret = policy_test_utils.compare_rules_list( - rules_by_all_vn, cn_vna_rules_by_vn[vn]) - if ret: - result = ret['state'] - msg = ret['msg'] - err_msg[compNode] = {vn: msg} - self.logger.error("CN: %s, VN: %s, test result not expected, \ - msg: %s" % (compNode, vn, msg)) - self.logger.debug("expected rules: ") - for r in rules_by_all_vn: - self.logger.debug(r) - self.logger.debug("actual rules from system: ") - for r in cn_vna_rules_by_vn[vn]: - self.logger.debug(r) - result = False - else: - self.logger.info( - "CN: %s, VN: %s, result of expected rules check passed" % - (compNode, vn)) - self.logger.info( - "Done the rule verification for vm:%s with attached policy:%s and vn:%s " % - (vm_list[vm], policy, vn)) - else: - pass - return (result, err_msg) -# end of comp_user_rules_to_system_rules - - -def tx_quntum_def_aces_to_system(test_vn, user_rules_tx, uni_rule): - '''convert ACEs derived from user rules to system format: - 1. For every user rule, add deny rule; skip adding duplicates - 2. For non-empty policy, add permit-all at the end - 3. add ace_id, rule_type - 4. Update VN to FQDN format - 5. remove direction and simple_action fields @end.. - ''' - if user_rules_tx == []: - return user_rules_tx - any_proto_port_rule = { - 'direction': '>', 'proto_l': {'max': '255', 'min': '0'}, - 'src_port_l': {'max': '65535', 'min': '0'}, - 'dst_port_l': {'max': '65535', 'min': '0'}} - - # step 0: check & build allow_all for local VN if rules are defined in - # policy - test_vn_allow_all_rule = copy.copy(any_proto_port_rule) - test_vn_allow_all_rule['simple_action'] = 'pass' - test_vn_allow_all_rule['action_l'] = ['pass'] - test_vn_allow_all_rule['src'], test_vn_allow_all_rule[ - 'dst'] = test_vn, test_vn - - # check the rule for any protocol with same network exist and for deny - # rule - test_vn_deny_all_rule = copy.copy(any_proto_port_rule) - test_vn_deny_all_rule['simple_action'] = 'deny' - test_vn_deny_all_rule['action_l'] = ['deny'] - test_vn_deny_all_rule['src'], test_vn_deny_all_rule[ - 'dst'] = test_vn, test_vn - - # step 1: check & add permit-all rule for same VN but not for 'any' - # network - last_rule = copy.copy(any_proto_port_rule) - last_rule['simple_action'] = 'pass' - last_rule['action_l'] = [{'simple_action': 'pass', 'gateway_name': None, - 'apply_service': [], 'mirror_to': None, - 'assign_routing_instance': None, - 'log': False, 'alert': False}] - last_rule['src'], last_rule['dst'] = 'any', 'any' - - # check any rule exist in policy : - final_user_rule = get_any_rule_if_exist(last_rule, user_rules_tx) - - # step 2: check & add deny_all for every user-created rule - system_added_rules = [] - for rule in user_rules_tx: - pos = len(user_rules_tx) - new_rule = copy.deepcopy(rule) - new_rule['proto_l'] = {'max': '255', 'min': - '0'} - new_rule['direction'] = '>' - new_rule['src_port_l'], new_rule['dst_port_l'] = { - 'max': '65535', 'min': '0'}, {'max': '65535', 'min': '0'} - new_rule['simple_action'] = 'deny' - new_rule['action_l'] = ['deny'] - system_added_rules.append(new_rule) - - # step to check any one of the rule is any protocol and source and dst ntw - # is test vn then check for the duplicate rules - final_any_rules = get_any_rule_if_src_dst_same_ntw_exist( - test_vn_allow_all_rule, test_vn_deny_all_rule, user_rules_tx) - if final_any_rules: - user_rules_tx = final_any_rules - else: - pass - - # Skip adding rules if they already exist... - # print json.dumps(system_added_rules, sort_keys=True) - if not policy_test_utils.check_rule_in_rules( - test_vn_allow_all_rule, - user_rules_tx): - user_rules_tx.append(test_vn_allow_all_rule) - for rule in system_added_rules: - if not policy_test_utils.check_rule_in_rules(rule, user_rules_tx): - user_rules_tx.append(rule) - - # step 3: check & add permit-all rule for same VN but not for 'any' - # network - last_rule = copy.copy(any_proto_port_rule) - last_rule['simple_action'], last_rule['action_l'] = 'pass', ['pass'] - last_rule['src'], last_rule['dst'] = 'any', 'any' - - # if rule is unidirectional then append the deny rule if src and dst is - # different - if uni_rule: - user_rules_tx.append(uni_rule) - else: - pass - - # if the first rule is not 'any rule ' then append the last rule defined - # above. - for rule in user_rules_tx: - any_rule_flag = True - if ((rule['src'] == 'any') and (rule['dst'] == 'any')): - any_rule_flag = False - else: - pass - if any_rule_flag: - user_rules_tx.append(last_rule) - else: - pass - # triming the duplicate rules - user_rules_tx = policy_test_utils.remove_dup_rules(user_rules_tx) - # triming the protocol with any option for rest of the fileds - tcp_any_rule = { - 'proto_l': { - 'max': 'tcp', 'min': 'tcp'}, 'src': 'any', 'dst': 'any', 'src_port_l': { - 'max': '65535', 'min': '0'}, 'dst_port_l': { - 'max': '65535', 'min': '0'}} - udp_any_rule = { - 'proto_l': { - 'max': 'udp', 'min': 'udp'}, 'src': 'any', 'dst': 'any', 'src_port_l': { - 'max': '65535', 'min': '0'}, 'dst_port_l': { - 'max': '65535', 'min': '0'}} - icmp_any_rule = { - 'proto_l': { - 'max': 'icmp', 'min': 'icmp'}, 'src': 'any', 'dst': 'any', 'src_port_l': { - 'max': '65535', 'min': '0'}, 'dst_port_l': { - 'max': '65535', 'min': '0'}} - icmp_match, index_icmp = check_5tuple_in_rules( - icmp_any_rule, user_rules_tx) - tcp_match, index_tcp = check_5tuple_in_rules(tcp_any_rule, user_rules_tx) - udp_match, index_udp = check_5tuple_in_rules(udp_any_rule, user_rules_tx) - if icmp_match: - for rule in user_rules_tx[index_icmp + 1:len(user_rules_tx)]: - if rule['proto_l'] == {'max': 'icmp', 'min': 'icmp'}: - user_rules_tx.remove(rule) - else: - pass - if tcp_match: - for rule in user_rules_tx[index_tcp + 1:len(user_rules_tx)]: - if rule['proto_l'] == {'max': 'tcp', 'min': 'tcp'}: - user_rules_tx.remove(rule) - else: - pass - if udp_match: - for rule in user_rules_tx[index_udp + 1:len(user_rules_tx)]: - if rule['proto_l'] == {'max': 'udp', 'min': 'udp'}: - user_rules_tx.remove(rule) - else: - pass - # if any rule is exist the it will execute - if final_user_rule: - user_rules_tx = final_user_rule - else: - pass - # step 4: add ace_id, type, src to all rules - for rule in user_rules_tx: - rule['ace_id'] = str(user_rules_tx.index(rule) + 1) - rule['rule_type'] = 'Terminal' # currently checking policy aces only - # if rule['src'] != 'any' : - # m = re.match(r"(\S+):(\S+):(\S+)", rule['src']) - # if not m: rule['src'] = ':'.join(self.inputs.project_fq_name) + ':' + rule['src'] - # if rule['dst'] != 'any': - # m = re.match(r"(\S+):(\S+):(\S+)", rule['dst']) - # if not m: rule['dst'] = ':'.join(self.inputs.project_fq_name) + ':' + rule['dst'] - try: - del rule['direction'] - except: - continue - try: - del rule['simple_action'] - except: - continue - - return user_rules_tx - -# end tx_user_def_aces_to_system - - -def get_any_rule_if_exist(all_rule, user_rules_tx): - final_rules = [] - if policy_test_utils.check_rule_in_rules(all_rule, user_rules_tx): - for rule in user_rules_tx: - if rule == all_rule: - final_rules.append(rule) - break - else: - final_rules.append(rule) - else: - pass - return final_rules -# end get_any_rule_if_exist - - -def get_any_rule_if_src_dst_same_ntw_exist( - test_vn_allow_all_rule, - test_vn_deny_all_rule, - user_rules_tx): - final_any_rules = [] - if ( - policy_test_utils.check_rule_in_rules( - test_vn_allow_all_rule, - user_rules_tx) or policy_test_utils.check_rule_in_rules( - test_vn_deny_all_rule, - user_rules_tx)): - for rule in user_rules_tx: - if ((rule == test_vn_allow_all_rule) - or (rule == test_vn_deny_all_rule)): - final_any_rules.append(rule) - break - else: - final_any_rules.append(rule) - else: - pass - return final_any_rules -# end get_any_rule_if_src_dst_same_ntw_exist - - -def check_5tuple_in_rules(rule, rules): - '''check if 5-tuple of given rule exists in given rule-set..Return True if rule exists; else False''' - #print ("check rule %s in rules" %(json.dumps(rule, sort_keys=True))) - match_keys = ['proto_l', 'src', 'dst', 'src_port_l', 'dst_port_l'] - for r in rules: - match = True - for k in match_keys: - if r[k] != rule[k]: - # print ("current rule not matching due to key %s, move on.." %k) - match = False - break - if match: - break - return (match, rules.index(r)) -# end check_5tuple_in_rules - -def _create_n_policy_n_rules(self, number_of_policy, valid_rules, number_of_dummy_rules, option='quantum'): - ''' Create n number of policy & n number of rules - created policy will be policy1,policy2,policy3...policyn so on - Sample rules_list: - src_ports and dst_ports : can be 'any'/tuple/list as shown below - protocol : 'any' or a string representing a protocol number : ICMP(1), TCP(6), UDP(17) - simple_action : pass/deny - source_network/dest_network : VN name - rules= [ - { - 'direction' : '<>', 'simple_action' : 'pass', - 'protocol' : 'any', - 'source_network': vn1_name, - 'src_ports' : 'any', - 'src_ports' : (10,100), - 'dest_network' : vn1_name, - 'dst_ports' : [100,10], - }, - ] - ''' - x = 80 - y = 80 - rules_list = [] - policy_name = 'policy' - self.logger.info('Creating %d dummy rules' % (number_of_dummy_rules)) - total_policy = number_of_policy - while len(rules_list) < number_of_dummy_rules: - if option == 'quantum': - rules = [ - { - 'direction': '<>', 'simple_action': 'deny', - 'protocol': 'udp', 'src_ports': (x, x), - 'dst_ports': (y, y), - 'source_network': 'any', - 'dest_network': 'any', - }, - ] - else: - rules = [ - PolicyRuleType( - direction='<>', protocol='udp', dst_addresses=[AddressType(virtual_network='any')], - src_addresses=[AddressType(virtual_network='any')], dst_ports=[PortType(x, x)], - action_list=ActionListType(simple_action='deny'), - src_ports=[PortType(y, y)]), - ] - rules_list.append(rules[0]) - x += 1 - y += 1 - # end while - # append valid rule at the end - self.logger.info('Appending %d valid rules to end of the rule list' % - (len(valid_rules))) - for rule in valid_rules: - rules_list.append(rule) - self.logger.info('Using policy fixture to create %d policy with %d rules' % - (number_of_policy, len(rules_list))) - number_of_policy += 1 - policy_objs_list = [] - for i in range(1, number_of_policy): - try: - if option == 'quantum': - policy_fixture = self.useFixture( - PolicyFixture(policy_name=policy_name + str(i), - rules_list=rules_list, inputs=self.inputs, - connections=self.connections)) - else: - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - policy_fixture = self.useFixture( - NetworkPolicyTestFixtureGen( - self.vnc_lib, network_policy_name=policy_name + - str(i), - parent_fixt=proj_fixt, network_policy_entries=PolicyEntriesType(rules_list))) - - except Exception as e: - self.logger.error( - 'Exception %s occured while creating %d policy with %d rules' % - (e, total_policy, len(rules_list))) - self.assertTrue( - False, 'Exception occured while creating %d policy with %d rules' % - (total_policy, len(rules_list))) - if option == 'quantum': - self.logger.info('Created policy %s' % - (policy_fixture.policy_fq_name)) - policy_objs_list.append(policy_fixture.policy_obj) - policy_fixture.verify_policy_in_api_server() - else: - self.logger.info('Created policy %s' % (policy_fixture._obj.name)) - policy_objs_list.append(policy_fixture._obj) - policy_read = self.vnc_lib.network_policy_read( - id=str(policy_fixture._obj.uuid)) - if not policy_read: - self.logger.error("Policy %s read on API server failed" % - policy_name + str(i)) - return {'result': False, 'msg': "Policy %s read failed on API server" % policy_name + str(i)} - - # end for - return policy_objs_list -# end _create_n_policy_n_rules - diff --git a/common/policy/policy_test_utils.py b/common/policy/policy_test_utils.py deleted file mode 100644 index e194a08b3..000000000 --- a/common/policy/policy_test_utils.py +++ /dev/null @@ -1,445 +0,0 @@ -''' This module provides utils for Policy tests ''' -import inspect -import copy -import json -import fixtures -from tcutils.topo.topo_helper import topology_helper -from vnc_api.vnc_api import * -from vnc_api.gen.resource_test import * -import re - - -def update_rule_ace_id(rules_list): - ''' After combining multiple policies, renumber ace_id of the rules by - index of rules in the combined list - Return updated rules_list. - ''' - for rule in rules_list: - rule['ace_id'] = str(rules_list.index(rule) + 1) - return rules_list - - -def remove_dup_rules(rules_list): - ''' After combining multiple policies, there can be duplicate rules. - Leave only one copy and remove duplicate rules. - Return updated rules_list. - XXX For now tested for permit_all rules only. - TODO: handle duplicate rules within a policy and duplicate rules - across policies, in case of both policies attached to a VN. - ''' - print "==>", inspect.getframeinfo(inspect.currentframe())[2] - new_list = [] - while len(rules_list) > 0: - match = None - ref_rule = rules_list.pop(0) - new_list.append(ref_rule) - match = compare_dict(ref_rule, rules_list) - if match: - print "matching idx are", match - for i in reversed(match): - rules_list.pop(i) - # end for - # end if - # end for - # print "length of list after removing dup is ", len(new_list) - return new_list - - -def compare_dict(ref_dict, test_dict_l): - ''' Comapare a rule with the rules_list. Return a list of indices - that match the ref_dict''' - matching_idx = [] - for idx, rule in enumerate(test_dict_l): - match = 0 - for key in filter(lambda x: x != 'ace_id', ref_dict): - if ref_dict[key] != rule[key]: - match = 0 - break - else: - match = 1 - # end for - if match == 1: - matching_idx.append(idx) - return matching_idx - - -def move_matching_rule_to_bottom(rules_list, rule={}, vn=None): - ''' check if permit_all exists.. if yes, pop the rule out and append the - rule to the end of the list. - Return updated rules_list''' - print "==>", inspect.getframeinfo(inspect.currentframe())[2] - idx = check_if_rule_present(rules_list, rule) - if idx: - permit_all_rule = rules_list.pop(idx) - rules_list.append(permit_all_rule) - return rules_list - - -def check_rule_in_rules(rule, rules): - '''check if 5-tuple of given rule exists in given rule-set..Return True if rule exists; else False''' - #print ("check rule %s in rules" %(json.dumps(rule, sort_keys=True))) - match_keys = ['src', 'proto_l', 'dst', 'src_port_l', 'dst_port_l'] - for r in rules: - match = True - for k in match_keys: - if r[k] != rule[k]: - # print ("current rule not matching due to key %s, move on.." %k) - match = False - break - if match == True: - break - return match - - -def check_if_rule_present(rules_list, rule={}, vn=None): - ''' if present, return index of the rule, else return None ''' - print "==>", inspect.getframeinfo(inspect.currentframe())[2] - match_rule = rule - for rule in rules_list: - match = 1 - for key in match_rule: - if match_rule[key] != rule[key]: - match = 0 - break - # endif - # end for key - if match: - return rules_list.index(rule) - return None - - -def trim_realign_rules(rules_list): - ''' remove duplicate rules & move permit_all rule to bottom - Return updated rules_list''' - new_list = remove_dup_rules(rules_list) - permit_all_rule = {'proto_l': {'max': '255', 'min': '0'}, 'src_port_l': { - 'max': '65535', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}, 'action_l': ['pass']} - final_list = move_matching_rule_to_bottom(new_list, rule=permit_all_rule) - return final_list - - -def compare_rules_list(user_rules_tx, system_rules, exp_name='user_rules_tx', act_name='system_rules'): - ''' Compares 2 list of rules [as dictionary] returns a dictionary with keys - as result & msg_list. - For success, return is empty. For failure, result is set to False & msg has - the error info. ''' - print "-" * 40 - proto_map = {'1': 'icmp', '6': 'tcp', '17': 'udp'} - result = True - ret = {} - msg = [] - # Check for empty policy, 0 rule_list - if len(user_rules_tx) == 0: - if len(system_rules) == 0: - print "empty policy check pass.." - return ret - # For non-zero rule policies, continue checking num rules - if len(system_rules) != len(user_rules_tx): - msg = "No of rules in system: %s is not same as expected: %s " % ( - len(system_rules), len(user_rules_tx)) - print "expected: " - for r in user_rules_tx: - print json.dumps(r, sort_keys=True) - print "-" * 40 - print "got: " - for r in system_rules: - print json.dumps(r, sort_keys=True) - ret['state'] = 'False' - ret['msg'] = msg - return ret - # If num_rules are ok, compare rule contents - # i. build key list to be checked - non_port_keys = [] - port_keys = [] - rule_keys = list(user_rules_tx[0].keys()) - for k in rule_keys: - if k.find('port') == -1: - non_port_keys.append(k) - else: - port_keys.append(k) - if k.find('proto') != -1: - proto_key = k - for i in range(len(system_rules)): - if isinstance(system_rules[i][k], dict): - if system_rules[i][k]['min'] in proto_map: - system_rules[i][k]['min'] = proto_map[ - system_rules[i][k]['min']] - if system_rules[i][k]['max'] in proto_map: - system_rules[i][k]['max'] = proto_map[ - system_rules[i][k]['max']] - elif system_rules[i][k] in proto_map: - system_rules[i][k] = proto_map[system_rules[i][k]] - - # ii. match non_port_key values first - for i in range(len(user_rules_tx)): - match = None - for k in non_port_keys: - if user_rules_tx[i][k] != system_rules[i][k]: - if k == 'action_l': - match, mesg = compare_action_list(user_rules_tx[i][k], system_rules[i][k]) - if not match: - msg.append(mesg) - else: - msg.append("Rule mismatch found: value for key: %s not matching: expected- %s, got- %s" - % (k, user_rules_tx[i][k], system_rules[i][k])) - match = False - if match != False: - # iii. if good, check port keys.. need special handling for icmp proto - # skip src/dst port check for icmp proto - # icmp in policy rules can appear in following formats in different - # datasets - icmp_names = [{'max': '1', 'min': '1'}, '1', - {'max': 'icmp', 'min': 'icmp'}, 'icmp'] - if user_rules_tx[i][proto_key] not in icmp_names: - for k in port_keys: - if user_rules_tx[i][k] != system_rules[i][k]: - msg.append( - "key: %s not matching between expected & system rules - %s, %s" % - (k, user_rules_tx[i][k], system_rules[i][k])) - if msg != []: - result = False - print "-" * 40 - print "Compare failed..!, msg is: ", msg - ret['state'] = 'False' - ret['msg'] = msg - print "-" * 40 - return ret - -# end compare_rules_list - - -def compare_args(key, a, b, exp_name='expected', act_name='actual'): - ''' For a given key, compare values a, b got from 2 different databases. - If instance is dict and not matching, call compare_rules_list to get details''' - ret = None - if a != b: - ret = key + " not matching --->expected: " + \ - str(a) + " --->got: " + str(b) - if a != b and isinstance(a, dict): - ret = compare_rules_list(a, b, exp_name, act_name) - if a != b and isinstance(a, list): - ret = compare_rules_list(a, b, exp_name, act_name) - return ret - -# This procedure compare list1 is exists in list2 or not. - - -def compare_list(self, list1, list2): - diff_list = [] - compare = True - for item in list1: - if not item in list2: - diff_list.append(item) - compare = False - if not compare: - self.logger.info("List compare failed: expected is %s and actual is %s" % (list1, list2)) - return compare -# end compare_list - - -def get_dict_with_matching_key_val(key, value, dict_l, scope): - - match = 0 - for d in dict_l: - if d[scope][key] == value: - print "match found" - match = 1 - return {'state': 1, 'ret': d} - if not match: - msg = "No matching rule found with key: " + key + "value: " + value - print msg - return {'state': None, 'ret': msg} - - -def get_policy_not_in_vn(initial_policy_list, complete_policy_list): - ret = list(set(complete_policy_list) - set(initial_policy_list)) - if len(ret) > 0: - return ret[0] - else: - return None - - -def xlate_cn_rules(rules_list): - ''' Take rules from control node and translate to quantum rules data format to compare..''' - new_rule_list = [] - for rule in rules_list: - new_rule = {} - for key, value in rule.items(): - key = key.replace('-', '_') - if type(value) == dict: - value = replace_key(value) - new_rule[key] = value - # Ignore following for now... - if not new_rule['dst_addresses'].has_key('subnet_list'): - new_rule['dst_addresses']['subnet_list'] = [] - if not new_rule['src_addresses'].has_key('subnet_list'): - new_rule['src_addresses']['subnet_list'] = [] - new_rule['src_addresses']['subnet'] = None - new_rule['dst_addresses']['subnet'] = None - new_rule['src_addresses'] = [new_rule['src_addresses']] - new_rule['dst_addresses'] = [new_rule['dst_addresses']] - new_rule['dst_ports']['end_port'] = int( - new_rule['dst_ports']['end_port']) - new_rule['dst_ports']['start_port'] = int( - new_rule['dst_ports']['start_port']) - new_rule['src_ports']['end_port'] = int( - new_rule['src_ports']['end_port']) - new_rule['src_ports']['start_port'] = int( - new_rule['src_ports']['start_port']) - new_rule['dst_ports'] = [new_rule['dst_ports']] - new_rule['src_ports'] = [new_rule['src_ports']] - if new_rule['action_list']['mirror_to']['analyzer_name'] != None: - new_rule['action_list']['mirror_to']['udp_port'] = None - else: - new_rule['action_list']['mirror_to'] = None - new_rule['action_list']['gateway_name'] = None - if new_rule['action_list'].has_key('apply_service'): - new_rule['action_list']['apply_service'] = [new_rule['action_list']['apply_service']] - else: - new_rule['action_list']['apply_service'] = [] - new_rule['rule_sequence']['major'] = int( - new_rule['rule_sequence']['major']) - new_rule['rule_sequence']['minor'] = int( - new_rule['rule_sequence']['minor']) - new_rule['rule_sequence'] = None - if 'log' in new_rule['action_list'].keys(): - new_rule['action_list']['log'] = json.loads(new_rule['action_list']['log']) - new_rule['action_list']['alert'] = json.loads(new_rule['action_list']['alert']) - # appending each rule to new list - new_rule_list.append(new_rule) - print "after xlate: ", new_rule_list - return new_rule_list - -# end of def xlate_cn_rules - - -def replace_key(d): - new = {} - for k, v in d.iteritems(): - if isinstance(v, dict): - v = replace_key(v) - new[k.replace('-', '_')] = v - return new - -# end of replace_key - - -def update_topo(topo, test_vn, new_policy): - ''' Purpose of this def is to update & return topology object as needed. - Example: Need to modify VN's policy list.. This change requires update - of every [affected] policy's VN list as well. - Reference info of how data is maintained: - vn_policy= {'vnet0': ['policy0', 'policy1'], 'vnet1': ['policy2', 'policy3']} - policy_vn= {'policy0': ['vnet0']} - ''' - n_topo = copy.deepcopy(topo) - # i] remove test_vn from currently associated policies - topo_helper_obj = topology_helper(n_topo) - n_topo.policy_vn = topo_helper_obj.get_policy_vn() - for p in topo.vn_policy[test_vn]: - n_topo.policy_vn[p].remove(test_vn) - # ii] update vn_policy[test_vn] and policy_vn[new_policy] with new info - n_topo.vn_policy[test_vn] = [new_policy] - n_topo.policy_vn[new_policy].append(test_vn) - return n_topo - - -def get_policy_peer_vns(self, vnet_list, vn_fixture): - ''' For each VN, get the allowed peers based on rule action to the peer VN's. - Every VN pair needs to allow for route exchange to happen.. - vnet_list is the vn pair for which policy peering is inspected - input vnet_list as vn_name & not fqdn. - return dict, with key as vn_name and value as again vn_name''' - - vn_policys_peer_vns = {} # allowed peer vns for a vn - for vn in vnet_list: - vn_policys_peer_vns[vn] = vn_fixture[ - vn].get_allowed_peer_vns_by_policy() - print "vn_policys_peer_vns is: ", vn_policys_peer_vns - - all_vns = [] # Build all vns list to replace any - for i, j in vn_fixture.items(): - x = j.vn_fq_name - all_vns.append(x) - - actual_peer_vns_by_policy = {} # return dict with policy peer vn list - # allowed vn peers any any keyword expanded - final_vn_policys_peer_vns = {} - # Expanding any keyword to all created VNs - for vn in vnet_list: - final_vn_policys_peer_vns[vn] = [] - vppvns = vn_policys_peer_vns[vn] - for vppvn in vppvns: - if vppvn != 'any': - final_vn_policys_peer_vns[vn].append(vppvn) - else: - final_vn_policys_peer_vns[vn].extend(all_vns) - final_vn_policys_peer_vns[vn] = list( - set(final_vn_policys_peer_vns[vn])) - - print "final_vn_policys_peer_vns: ", final_vn_policys_peer_vns - for vn in vnet_list: - actual_peer_vns_by_policy[vn] = [] - fqvn = vn_fixture[vn].vn_fq_name - if final_vn_policys_peer_vns[vn] != []: - for pvn in final_vn_policys_peer_vns[vn]: - #get pvn name based on fqdn format- domain:project:vn - m = re.match(r"(\S+):(\S+):(\S+)", pvn) - if m: - m = re.search(r"(\S+):(\S+):(\S+)", pvn) - pvn_name = m.group(3) - else: - pvn_name = pvn - self.logger.info("vn %s sees %s as peer vn" %(vn, pvn)) - if fqvn in final_vn_policys_peer_vns[pvn_name]: - self.logger.info( - "peer vn %s sees vn %s as peer, add if not already in the actual peer list" %(pvn_name, fqvn)) - if pvn not in actual_peer_vns_by_policy[vn]: - actual_peer_vns_by_policy[vn].append(pvn) - self.logger.info("vn %s has following vn's as actual peers -%s" %(vn, actual_peer_vns_by_policy[vn])) - - return actual_peer_vns_by_policy - -def compare_action_list(user_action_l, system_action_l): - - mesg = None; ret = False - for item_u in user_action_l: - ret = False - #[TBD]may need to change the index below if multiple SI in system_action_l - if item_u['simple_action'] != system_action_l[0]: - ret = False - break - else: - ret = True - if item_u.has_key('apply_service') and item_u['apply_service'] != []: - - for si in item_u['apply_service']: - si = si.replace(':','_') - ret = False - if si in system_action_l[2]: - ret = True - if not ret: - mesg = "user action list does not match in system,user action list:%s, \ - system action list:%s" % (user_action_l,system_action_l) - - return (ret, mesg) - -if __name__ == '__main__': - ''' Unit test to invoke policy utils.. ''' - - input_data = [ - {'proto_l': {'max': '6', 'min': '6'}, 'src': 'default-domain:admin:vnet0', 'ace_id': '1', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'rule_type': 'Terminal', 'src_port_l': {'max': '0', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'proto_l': {'max': '6', 'min': '6'}, 'src': 'default-domain:admin:vnet0', 'ace_id': '2', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'rule_type': 'Terminal', 'src_port_l': {'max': '1', 'min': '1'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'proto_l': {'max': '6', 'min': '6'}, 'src': 'default-domain:admin:vnet0', 'ace_id': '3', 'dst': 'default-domain:admin:vnet0', 'action_l': [ - 'deny'], 'rule_type': 'Terminal', 'src_port_l': {'max': '2', 'min': '2'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'proto_l': {'max': '6', 'min': '6'}, 'src': 'default-domain:admin:vnet0', 'ace_id': '4', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'rule_type': 'Terminal', 'src_port_l': {'max': '3', 'min': '3'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'dst_port_l': {'max': '65535', 'min': '0'}, 'src_port_l': {'max': '65535', 'min': '0'}, 'ace_id': '5', 'dst': 'default-domain:admin:vnet0', 'action_l': ['pass'], 'proto_l': {'max': '255', 'min': '0'}}, - {'proto_l': {'max': '17', 'min': '17'}, 'src': 'default-domain:admin:vnet0', 'ace_id': '1', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'rule_type': 'Terminal', 'src_port_l': {'max': '0', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'proto_l': {'max': '17', 'min': '17'}, 'src': 'default-domain:admin:vnet0', 'ace_id': '2', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'rule_type': 'Terminal', 'src_port_l': {'max': '1', 'min': '1'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'proto_l': {'max': '17', 'min': '17'}, 'src': 'default-domain:admin:vnet0', 'ace_id': '3', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'rule_type': 'Terminal', 'src_port_l': {'max': '2', 'min': '2'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'proto_l': {'max': '17', 'min': '17'}, 'src': 'default-domain:admin:vnet0', 'ace_id': '4', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'rule_type': 'Terminal', 'src_port_l': {'max': '3', 'min': '3'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'dst_port_l': {'max': '65535', 'min': '0'}, 'src_port_l': {'max': '65535', 'min': '0'}, 'ace_id': '5', 'dst': 'default-domain:admin:vnet0', 'action_l': ['pass'], 'proto_l': {'max': '255', 'min': '0'}}] - - system_data = [{'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '6', 'min': '6'}, 'ace_id': '1', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'src_port_l': {'max': '0', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '6', 'min': '6'}, 'ace_id': '2', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'src_port_l': {'max': '1', 'min': '1'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '6', 'min': '6'}, 'ace_id': '3', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'src_port_l': {'max': '2', 'min': '2'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '6', 'min': '6'}, 'ace_id': '4', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'src_port_l': {'max': '3', 'min': '3'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '17', 'min': '17'}, 'ace_id': '5', 'dst': - 'default-domain:admin:vnet0', 'action_l': ['deny'], 'src_port_l': {'max': '0', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '17', 'min': '17'}, 'ace_id': '6', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'src_port_l': {'max': '1', 'min': '1'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '17', 'min': '17'}, 'ace_id': '7', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'src_port_l': {'max': '2', 'min': '2'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '17', 'min': '17'}, 'ace_id': '8', 'dst': 'default-domain:admin:vnet0', 'action_l': ['deny'], 'src_port_l': {'max': '3', 'min': '3'}, 'dst_port_l': {'max': '65535', 'min': '0'}}, {'rule_type': 'Terminal', 'src': 'default-domain:admin:vnet0', 'proto_l': {'max': '255', 'min': '0'}, 'ace_id': '9', 'dst': 'default-domain:admin:vnet0', 'action_l': ['pass'], 'src_port_l': {'max': '65535', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}}] - - updated_list = trim_realign_rules(input_data) - if updated_list == system_data: - print "Data compare of user-defined combined rules with system data successful!" - else: - print "Data compare after update failed!" - compare_rules_list(system_data, updated_list) - -# end __main__ diff --git a/common/servicechain/__init__.py b/common/servicechain/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/servicechain/config.py b/common/servicechain/config.py deleted file mode 100644 index 2a3d3c463..000000000 --- a/common/servicechain/config.py +++ /dev/null @@ -1,252 +0,0 @@ -import time - -import paramiko -import fixtures -from fabric.api import run, hide, settings -from tcutils.commands import ssh, execute_cmd, execute_cmd_out -from vn_test import VNFixture -from vm_test import VMFixture -from policy_test import PolicyFixture -from floating_ip import FloatingIPFixture -from svc_instance_fixture import SvcInstanceFixture -from svc_template_fixture import SvcTemplateFixture -from common.connections import ContrailConnections -from common.policy.config import AttachPolicyFixture -from tcutils.util import retry - - -class ConfigSvcChain(fixtures.TestWithFixtures): - - def delete_si_st(self, si_fixtures, st_fix): - for si_fix in si_fixtures: - self.logger.debug("Delete SI '%s'", si_fix.si_name) - si_fix.cleanUp() - self.remove_from_cleanups(si_fix) - - self.logger.debug("Delete ST '%s'", st_fix.st_name) - st_fix.cleanUp() - self.remove_from_cleanups(st_fix) - - def config_st_si(self, st_name, si_name_prefix, si_count, - svc_scaling=False, max_inst=1, domain='default-domain', project='admin', left_vn=None, - right_vn=None, svc_type='firewall', svc_mode='transparent', flavor='contrail_flavor_2cpu', static_route=['None', 'None', 'None'], ordered_interfaces=True, svc_img_name="vsrx"): - if svc_scaling == True: - if svc_mode == 'in-network-nat': - if_list = [['management', False, False], - ['left', True, False], ['right', False, False]] - else: - if_list = [['management', False, False], - ['left', True, False], ['right', True, False]] - else: - if_list = [['management', False, False], - ['left', False, False], ['right', False, False]] - - for entry in static_route: - if entry != 'None': - if_list[static_route.index(entry)][2] = True -# svc_img_name = "vsrx" - if left_vn and right_vn: - # In network/routed mode - if svc_mode == 'in-network': - svc_img_name = 'ubuntu-in-net' - if svc_scaling == True: - if_list = [['left', True, False], ['right', True, False]] - else: - if_list = [['left', False, False], ['right', False, False]] - elif left_vn: - # Analyzer mode - if svc_img_name != "analyzer": - svc_img_name = svc_img_name - else: - svc_img_name = "analyzer" - if_list = [['left', False, False]] - if svc_mode == 'transparent': - # No need to pass left vn for transparent mode. - left_vn = None - else: - # Transperent/bridge mode - svc_img_name = svc_img_name - # create service template - st_fixture = self.useFixture(SvcTemplateFixture( - connections=self.connections, inputs=self.inputs, domain_name=domain, - st_name=st_name, svc_img_name=svc_img_name, svc_type=svc_type, - if_list=if_list, svc_mode=svc_mode, svc_scaling=svc_scaling, flavor=flavor, ordered_interfaces=ordered_interfaces)) - assert st_fixture.verify_on_setup() - - # create service instances - si_fixtures = [] - for i in range(0, si_count): - verify_vn_ri = True - if i: - verify_vn_ri = False - si_name = si_name_prefix + str(i + 1) - si_fixture = self.useFixture(SvcInstanceFixture( - connections=self.connections, inputs=self.inputs, - domain_name=domain, project_name=project, si_name=si_name, - svc_template=st_fixture.st_obj, if_list=if_list, - left_vn_name=left_vn, right_vn_name=right_vn, do_verify=verify_vn_ri, max_inst=max_inst, static_route=static_route)) - si_fixtures.append(si_fixture) - - return (st_fixture, si_fixtures) - - def chain_si(self, si_count, si_prefix, project_name): - action_list = [] - for i in range(0, si_count): - si_name = si_prefix + str(i + 1) - # chain services by appending to action list - si_fq_name = 'default-domain' + ':' + project_name + ':' + si_name - action_list.append(si_fq_name) - return action_list - - def config_policy(self, policy_name, rules): - """Configures policy.""" - # create policy - policy_fix = self.useFixture(PolicyFixture( - policy_name=policy_name, rules_list=rules, - inputs=self.inputs, connections=self.connections)) - return policy_fix - - def config_vn(self, vn_name, vn_net): - vn_fixture = self.useFixture(VNFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_name=vn_name, inputs=self.inputs, subnets=vn_net)) - assert vn_fixture.verify_on_setup() - return vn_fixture - - def attach_policy_to_vn(self, policy_fix, vn_fix, policy_type=None): - policy_attach_fix = self.useFixture(AttachPolicyFixture( - self.inputs, self.connections, vn_fix, policy_fix, policy_type)) - return policy_attach_fix - - def config_and_verify_vm(self, vn_fix, vm_name, image_name='ubuntu-traffic'): - vm_fixture = self.config_vm(vn_fix, vm_name, image_name=image_name) - assert vm_fixture.verify_on_setup(), 'VM verification failed' - assert vm_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' - return vm_fixture - - def config_vm(self, vn_fix, vm_name, node_name=None, image_name='ubuntu-traffic', flavor='contrail_flavor_small'): - vm_fixture = self.useFixture(VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=vn_fix.obj, vm_name=vm_name, node_name=node_name, image_name=image_name, flavor=flavor)) - return vm_fixture - - def config_fip(self, vn_id, pool_name): - fip_fixture = self.useFixture(FloatingIPFixture( - project_name=self.inputs.project_name, inputs=self.inputs, - connections=self.connections, pool_name=pool_name, - vn_id=vn_id)) - return fip_fixture - - def detach_policy(self, vn_policy_fix): - self.logger.debug("Removing policy from '%s'", - vn_policy_fix.vn_fixture.vn_name) - vn_policy_fix.cleanUp() - self.remove_from_cleanups(vn_policy_fix) - - def unconfig_policy(self, policy_fix): - """Un Configures policy.""" - self.logger.debug("Delete policy '%s'", policy_fix.policy_name) - policy_fix.cleanUp() - self.remove_from_cleanups(policy_fix) - - def delete_vn(self, vn_fix): - self.logger.debug("Delete vn '%s'", vn_fix.vn_name) - vn_fix.cleanUp() - self.remove_from_cleanups(vn_fix) - - def delete_vm(self, vm_fix): - self.logger.debug("Delete vm '%s'", vm_fix.vm_name) - vm_fix.cleanUp() - self.remove_from_cleanups(vm_fix) - - def get_svm_obj(self, vm_name): - for vm_obj in self.nova_h.get_vm_list(): - if vm_obj.name == vm_name: - return vm_obj - errmsg = "No VM named '%s' found in the compute" % vm_name - self.logger.error(errmsg) - assert False, errmsg - - @retry(delay=10, tries=15) - def is_svm_active(self, vm_name): - vm_status = self.get_svm_obj(vm_name).status - if vm_status == 'ACTIVE': - self.logger.debug('SVM state is active') - return True - else: - self.logger.warn('SVM %s is not yet active. Current state: %s' % - (vm_name, vm_status)) - return False - - def get_svm_compute(self, svm_name): - svm_obj = self.get_svm_obj(svm_name) - vm_nodeip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(svm_obj)]['host_ip'] - return self.inputs.host_data[vm_nodeip] - - def get_svm_tapintf(self, svm_name): - self.is_svm_active(svm_name) - svm_obj = self.get_svm_obj(svm_name) - vm_nodeip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(svm_obj)]['host_ip'] - inspect_h = self.agent_inspect[vm_nodeip] - self.logger.debug( - "svm_obj:'%s' compute_ip:'%s' agent_inspect:'%s'", svm_obj.__dict__, - vm_nodeip, inspect_h.get_vna_tap_interface_by_vm(vm_id=svm_obj.id)) - return inspect_h.get_vna_tap_interface_by_vm(vm_id=svm_obj.id)[0]['name'] - - def get_bridge_svm_tapintf(self, svm_name, direction): - self.is_svm_active(svm_name) - svm_obj = self.get_svm_obj(svm_name) - vm_nodeip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(svm_obj)]['host_ip'] - inspect_h = self.agent_inspect[vm_nodeip] - self.logger.debug( - "svm_obj:'%s' compute_ip:'%s' agent_inspect:'%s'", svm_obj.__dict__, - vm_nodeip, inspect_h.get_vna_tap_interface_by_vm(vm_id=svm_obj.id)) - tap_intf_list = [] - vn = 'svc-vn-' + direction - vrf = ':'.join(self.inputs.project_fq_name) + ':' + vn + ':' + vn - for entry in inspect_h.get_vna_tap_interface_by_vm(vm_id=svm_obj.id): - if entry['vrf_name'] == vrf: - self.logger.debug( - 'The %s tap-interface of %s is %s' % - (direction, svm_name, entry['name'])) - return entry['name'] - - def get_svm_tapintf_of_vn(self, svm_name, vn): - self.is_svm_active(svm_name) - svm_obj = self.get_svm_obj(svm_name) - vm_nodeip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(svm_obj)]['host_ip'] - inspect_h = self.agent_inspect[vm_nodeip] - self.logger.debug( - "svm_obj:'%s' compute_ip:'%s' agent_inspect:'%s'", svm_obj.__dict__, - vm_nodeip, inspect_h.get_vna_tap_interface_by_vm(vm_id=svm_obj.id)) - tap_intf_list = [] - for entry in inspect_h.get_vna_tap_interface_by_vm(vm_id=svm_obj.id): - if entry['vrf_name'] == vn.vrf_name: - self.logger.debug( - 'The tap interface corresponding to %s on %s is %s' % - (vn.vn_name, svm_name, entry['name'])) - return entry['name'] - - def get_svm_metadata_ip(self, svm_name): - tap_intf = self.get_svm_tapintf(svm_name) - tap_object = inspect_h.get_vna_intf_details(tap_intf['name']) - return tap_object['mdata_ip_addr'] - - def start_tcpdump_on_intf(self, host, tapintf): - session = ssh(host['host_ip'], host['username'], host['password']) - cmd = 'tcpdump -nni %s -c 10 > /tmp/%s_out.log' % (tapintf, tapintf) - execute_cmd(session, cmd, self.logger) - # end start_tcpdump_on_intf - - def stop_tcpdump_on_intf(self, host, tapintf): - session = ssh(host['host_ip'], host['username'], host['password']) - self.logger.info('Waiting for tcpdump to complete') - time.sleep(20) - output_cmd = 'cat /tmp/%s_out.log' % tapintf - out, err = execute_cmd_out(session, output_cmd, self.logger) - return out - # end stop_tcpdump_on_intf diff --git a/common/servicechain/firewall/__init__.py b/common/servicechain/firewall/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/servicechain/firewall/verify.py b/common/servicechain/firewall/verify.py deleted file mode 100644 index 116076ced..000000000 --- a/common/servicechain/firewall/verify.py +++ /dev/null @@ -1,907 +0,0 @@ -from time import sleep -from common.servicechain.config import ConfigSvcChain -from common.servicechain.verify import VerifySvcChain -from common.servicechain.mirror.verify import VerifySvcMirror -from common.servicechain.mirror.config import ConfigSvcMirror -from tcutils.util import get_random_cidr -from tcutils.util import get_random_name - - -class VerifySvcFirewall(VerifySvcMirror): - - def verify_svc_span(self, in_net=False): - vn1_name = "left_vn" - vn1_subnets = ['31.1.1.0/24'] - vm1_name = 'left_vm' - vn2_name = "right_vn" - vn2_subnets = ['41.2.2.0/24'] - vm2_name = 'right_vm' - if in_net: - vn1_name = "in_left_vn" - vn1_subnets = ['32.1.1.0/24'] - vm1_name = 'in_left_vm' - vn2_name = "in_right_vn" - vn2_subnets = ['42.2.2.0/24'] - vm2_name = 'in_right_vm' - vn1_fixture = self.config_vn(vn1_name, vn1_subnets) - vn2_fixture = self.config_vn(vn2_name, vn2_subnets) - - vm1_fixture = self.config_vm(vn1_fixture, vm1_name) - vm2_fixture = self.config_vm(vn2_fixture, vm2_name) - assert vm1_fixture.verify_on_setup() - assert vm2_fixture.verify_on_setup() - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() - - si_count = 3 - st_name = "tcp_svc_template" - si_prefix = "tcp_bridge_" - policy_name = "allow_tcp" - if in_net: - st_name = "in_tcp_svc_template" - si_prefix = "in_tcp_bridge_" - policy_name = "in_allow_tcp" - tcp_st_fixture, tcp_si_fixtures = self.config_st_si( - st_name, si_prefix, si_count, - left_vn=vn1_name, right_vn=vn2_name) - else: - tcp_st_fixture, tcp_si_fixtures = self.config_st_si( - st_name, si_prefix, si_count) - action_list = self.chain_si(si_count, si_prefix) - # Update rule with specific port/protocol - rule = [{'direction': '<>', - 'protocol': 'tcp', - 'source_network': vn1_name, - 'src_ports': [8000, 8000], - 'dest_network': vn2_name, - 'dst_ports': [9000, 9000], - 'simple_action': None, - 'action_list': {'apply_service': action_list} - }] - - # Create new policy with rule to allow traffci from new VN's - tcp_policy_fixture = self.config_policy(policy_name, rule) - - self.verify_si(tcp_si_fixtures) - - st_name = "udp_svc_template" - si_prefix = "udp_bridge_" - policy_name = "allow_udp" - if in_net: - st_name = "in_udp_svc_template" - si_prefix = "in_udp_bridge_" - policy_name = "in_allow_udp" - udp_st_fixture, udp_si_fixtures = self.config_st_si( - st_name, si_prefix, si_count, - left_vn=vn1_name, right_vn=vn2_name) - else: - udp_st_fixture, udp_si_fixtures = self.config_st_si( - st_name, si_prefix, si_count) - action_list = self.chain_si(si_count, si_prefix) - # Update rule with specific port/protocol - rule = [{'direction': '<>', - 'protocol': 'udp', - 'source_network': vn1_name, - 'src_ports': [8001, 8001], - 'dest_network': vn2_name, - 'dst_ports': [9001, 9001], - 'simple_action': None, - 'action_list': {'apply_service': action_list} - }] - - # Create new policy with rule to allow traffci from new VN's - udp_policy_fixture = self.config_policy(policy_name, rule) - vn1_udp_policy_fix = self.attach_policy_to_vn( - [tcp_policy_fixture, udp_policy_fixture], vn1_fixture) - vn2_udp_policy_fix = self.attach_policy_to_vn( - [tcp_policy_fixture, udp_policy_fixture], vn2_fixture) - - result, msg = self.validate_vn(vn1_name) - assert result, msg - result, msg = self.validate_vn(vn2_name) - assert result, msg - self.verify_si(udp_si_fixtures) - - # Install traffic package in VM - vm1_fixture.install_pkg("Traffic") - vm2_fixture.install_pkg("Traffic") - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - sport = 8000 - dport = 9000 - sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture, - 'tcp', sport=sport, dport=dport) - errmsg = "TCP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - self.delete_si_st(tcp_si_fixtures, tcp_st_fixture) - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - sport = 8000 - dport = 9000 - sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture, - 'tcp', sport=sport, dport=dport) - errmsg = "TCP traffic with src port %s and dst port %s passed; Expected to fail" % ( - sport, dport) - assert sent and recv == 0, errmsg - - st_name = "tcp_svc_template" - si_prefix = "tcp_bridge_" - policy_name = "allow_tcp" - if in_net: - st_name = "in_tcp_svc_template" - si_prefix = "in_tcp_bridge_" - policy_name = "in_allow_tcp" - tcp_st_fixture, tcp_si_fixtures = self.config_st_si( - st_name, si_prefix, si_count, - left_vn=vn1_name, right_vn=vn2_name) - else: - tcp_st_fixture, tcp_si_fixtures = self.config_st_si( - st_name, si_prefix, si_count) - action_list = self.chain_si(si_count, si_prefix) - result, msg = self.validate_vn(vn1_name) - assert result, msg - result, msg = self.validate_vn(vn2_name) - assert result, msg - self.verify_si(tcp_si_fixtures) - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - sport = 8000 - dport = 9000 - sent, recv = self.verify_traffic(vm1_fixture, vm2_fixture, - 'tcp', sport=sport, dport=dport) - errmsg = "TCP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - def verify_svc_transparent_datapath(self, si_count=1, svc_scaling=False, max_inst=1, flavor='contrail_flavor_2cpu', proto='any', src_ports=[0, -1], dst_ports=[0, -1], svc_img_name='vsrx-bridge', ci=False): - """Validate the service chaining datapath""" - self.vn1_name = get_random_name('bridge_vn1') - self.vn1_subnets = [get_random_cidr()] - self.vm1_name = get_random_name('bridge_vm1') - self.vn2_name = get_random_name('bridge_vn2') - self.vn2_subnets = [get_random_cidr()] - self.vm2_name = get_random_name('bridge_vm2') - self.action_list = [] - self.if_list = [] - self.st_name = get_random_name('service_template_1') - si_prefix = get_random_name('bridge_si') + '_' - self.policy_name = get_random_name('policy_transparent') - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - - self.st_fixture, self.si_fixtures = self.config_st_si( - self.st_name, si_prefix, si_count, svc_scaling, max_inst, flavor=flavor, project=self.inputs.project_name, svc_img_name=svc_img_name) - self.action_list = self.chain_si( - si_count, si_prefix, self.inputs.project_name) - - self.rules = [ - { - 'direction': '<>', - 'protocol': proto, - 'source_network': self.vn1_name, - 'src_ports': src_ports, - 'dest_network': self.vn2_name, - 'dst_ports': dst_ports, - 'simple_action': None, - 'action_list': {'apply_service': self.action_list} - }, - ] - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - if ci: - image_name = 'cirros-0.3.0-x86_64-uec' - else: - image_name = 'ubuntu-traffic' - self.vm1_fixture = self.config_and_verify_vm( - self.vn1_fixture, self.vm1_name, image_name) - self.vm2_fixture = self.config_and_verify_vm( - self.vn2_fixture, self.vm2_name, image_name) - self.verify_si(self.si_fixtures) - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - if proto not in ['any', 'icmp']: - self.logger.info('Will skip Ping test') - else: - # Ping from left VM to right VM - errmsg = "Ping to Right VM %s from Left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, count='3'), errmsg - return True - - def verify_svc_in_network_datapath(self, si_count=1, svc_scaling=False, max_inst=1, svc_mode='in-network-nat', flavor='contrail_flavor_2cpu', static_route=['None', 'None', 'None'], ordered_interfaces=True, svc_img_name='vsrx', vn1_subnets=[get_random_cidr()], vn2_fixture=None, vn2_subnets=[get_random_cidr()], ci=False): - """Validate the service chaining in network datapath""" - - self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn1") - self.vn1_name = self.vn1_fq_name.split(':')[2] - self.vn1_subnets = vn1_subnets - self.vm1_name = get_random_name("in_network_vm1") - self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn2") - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.vn2_subnets = vn2_subnets - self.vm2_name = get_random_name("in_network_vm2") - self.action_list = [] - self.if_list = [['management', False, False], - ['left', True, False], ['right', True, False]] - for entry in static_route: - if entry != 'None': - self.if_list[static_route.index(entry)][2] = True - self.st_name = get_random_name("in_net_svc_template_1") - si_prefix = get_random_name("in_net_svc_instance") + "_" - - self.policy_name = get_random_name("policy_in_network") - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - if vn2_fixture is None: - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - else: - self.vn2_fixture = vn2_fixture - self.vn2_fq_name = vn2_fixture.vn_fq_name - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.st_fixture, self.si_fixtures = self.config_st_si( - self.st_name, si_prefix, si_count, svc_scaling, max_inst, left_vn=self.vn1_fq_name, - right_vn=self.vn2_fq_name, svc_mode=svc_mode, flavor=flavor, static_route=static_route, ordered_interfaces=ordered_interfaces, svc_img_name=svc_img_name, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, si_prefix, self.inputs.project_name) - self.rules = [ - { - 'direction': '<>', - 'protocol': 'any', - 'source_network': self.vn1_fq_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_fq_name, - 'dst_ports': [0, -1], - 'simple_action': None, - 'action_list': {'apply_service': self.action_list} - }, - ] - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - if ci: - image_name = 'cirros-0.3.0-x86_64-uec' - else: - image_name = 'ubuntu-traffic' - self.vm1_fixture = self.config_and_verify_vm( - self.vn1_fixture, self.vm1_name, image_name) - self.vm2_fixture = self.config_and_verify_vm( - self.vn2_fixture, self.vm2_name, image_name) - for si_fix in self.si_fixtures: - si_fix.verify_on_setup() - result, msg = self.validate_vn( - self.vn1_name, project_name=self.vn1_fixture.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.vn2_fixture.project_name) - assert result, msg - # Ping from left VM to right VM - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - return True - - def verify_multi_inline_svc(self, si_list=[('bridge', 1), ('in-net', 1), ('nat', 1)], flavor='contrail_flavor_2cpu', ordered_interfaces=True, vn1_subnets=[get_random_cidr()], vn2_subnets=[get_random_cidr()]): - """Validate in-line multi service chaining in network datapath""" - - self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn1") - self.vn1_name = self.vn1_fq_name.split(':')[2] - self.vn1_subnets = vn1_subnets - self.vm1_name = get_random_name("in_network_vm1") - self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn2") - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.vn2_subnets = vn2_subnets - self.vm2_name = get_random_name("in_network_vm2") - self.action_list = [] - self.si_list = [] - self.policy_name = get_random_name("policy_in_network") - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - for si in si_list: - self.if_list = [['management', False, False], - ['left', True, False], ['right', True, False]] - svc_scaling = False - si_count = 1 - self.st_name = get_random_name( - ("multi_sc_") + si[0] + "_" + str(si_list.index(si)) + ("_st")) - si_prefix = get_random_name( - ("multi_sc_") + si[0] + "_" + str(si_list.index(si)) + ("_si")) + "_" - max_inst = si[1] - left_vn = self.vn1_fq_name - right_vn = self.vn2_fq_name - if max_inst > 1: - svc_scaling = True - if si[0] == 'nat': - svc_mode = 'in-network-nat' - svc_img_name = 'vsrx' - elif si[0] == 'in-net': - svc_mode = 'in-network' - svc_img_name = 'ubuntu-in-net' - else: - svc_mode = 'transparent' - svc_img_name = 'tiny_trans_fw' - left_vn = None - right_vn = None - self.st_fixture, self.si_fixtures = self.config_st_si( - self.st_name, si_prefix, si_count, svc_scaling, max_inst, left_vn=left_vn, - right_vn=right_vn, svc_mode=svc_mode, flavor=flavor, - ordered_interfaces=ordered_interfaces, project=self.inputs.project_name, svc_img_name=svc_img_name) - action_step = self.chain_si( - si_count, si_prefix, self.inputs.project_name) - self.action_list += action_step - self.si_list += self.si_fixtures - self.rules = [ - { - 'direction': '<>', - 'protocol': 'any', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': None, - 'action_list': {'apply_service': self.action_list} - }, - ] - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - self.vm1_fixture = self.config_and_verify_vm( - self.vn1_fixture, self.vm1_name) - self.vm2_fixture = self.config_and_verify_vm( - self.vn2_fixture, self.vm2_name) - for si_fix in self.si_fixtures: - si_fix.verify_on_setup() - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - # Ping from left VM to right VM - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - return True - # end verify_multi_inline_svc - - def verify_policy_delete_add(self): - # Delete policy - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - self.unconfig_policy(self.policy_fixture) - # Ping from left VM to right VM; expected to fail - errmsg = "Ping to right VM ip %s from left VM passed; expected to fail" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - - # Create policy again - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - self.verify_si(self.si_fixtures) - - # Wait for the existing flow entry to age - sleep(40) - - # Ping from left VM to right VM - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - - return True - - def verify_protocol_port_change(self, mode='transparent'): - # Install traffic package in VM - self.vm1_fixture.install_pkg("Traffic") - self.vm2_fixture.install_pkg("Traffic") - - sport = 8000 - dport = 9000 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - sport = 8000 - dport = 9001 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'tcp', sport=sport, dport=dport) - errmsg = "TCP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - # Delete policy - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - self.unconfig_policy(self.policy_fixture) - - # Update rule with specific port/protocol - action_list = {'apply_service': self.action_list} - new_rule = {'direction': '<>', - 'protocol': 'tcp', - 'source_network': self.vn1_name, - 'src_ports': [8000, 8000], - 'dest_network': self.vn2_name, - 'dst_ports': [9001, 9001], - 'simple_action': None, - 'action_list': action_list - } - self.rules = [new_rule] - - # Create new policy with rule to allow traffci from new VN's - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - self.verify_si(self.si_fixtures) - - self.logger.debug("Send udp traffic; with policy rule %s", new_rule) - sport = 8000 - dport = 9000 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s passed; Expected to fail" % ( - sport, dport) - assert sent and recv == 0, errmsg - - sport = 8000 - dport = 9001 - self.logger.debug("Send tcp traffic; with policy rule %s", new_rule) - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'tcp', sport=sport, dport=dport) - errmsg = "TCP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - return True - - def verify_add_new_vns(self): - # Delete policy - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - self.unconfig_policy(self.policy_fixture) - - # Create one more left and right VN's - new_left_vn = "new_left_bridge_vn" - new_left_vn_net = ['51.1.1.0/24'] - new_right_vn = "new_right_bridge_vn" - new_right_vn_net = ['52.2.2.0/24'] - new_left_vn_fix = self.config_vn(new_left_vn, new_left_vn_net) - new_right_vn_fix = self.config_vn(new_right_vn, new_right_vn_net) - - # Launch VMs in new left and right VN's - new_left_vm = 'new_left_bridge_vm' - new_right_vm = 'new_right_bridge_vm' - new_left_vm_fix = self.config_vm(new_left_vn_fix, new_left_vm) - new_right_vm_fix = self.config_vm(new_right_vn_fix, new_right_vm) - assert new_left_vm_fix.verify_on_setup() - assert new_right_vm_fix.verify_on_setup() - # Wait for VM's to come up - new_left_vm_fix.wait_till_vm_is_up() - new_right_vm_fix.wait_till_vm_is_up() - - # Add rule to policy to allow traffic from new left_vn to right_vn - # through SI - new_rule = {'direction': '<>', - 'protocol': 'any', - 'source_network': new_left_vn, - 'src_ports': [0, -1], - 'dest_network': new_right_vn, - 'dst_ports': [0, -1], - 'simple_action': None, - 'action_list': {'apply_service': self.action_list} - } - self.rules.append(new_rule) - - # Create new policy with rule to allow traffci from new VN's - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - # attach policy to new VN's - new_policy_left_vn_fix = self.attach_policy_to_vn( - self.policy_fixture, new_left_vn_fix) - new_policy_right_vn_fix = self.attach_policy_to_vn( - self.policy_fixture, new_right_vn_fix) - - self.verify_si(self.si_fixtures) - - # Ping from left VM to right VM - sleep(5) - self.logger.info("Verfiy ICMP traffic between new VN's.") - errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip - assert new_left_vm_fix.ping_with_certainty( - new_right_vm_fix.vm_ip), errmsg - - self.logger.info( - "Verfiy ICMP traffic between new left VN and existing right VN.") - errmsg = "Ping to right VM ip %s from left VM passed; \ - Expected tp Fail" % self.vm2_fixture.vm_ip - assert new_left_vm_fix.ping_with_certainty(self.vm2_fixture.vm_ip, - expectation=False), errmsg - - self.logger.info( - "Verfiy ICMP traffic between existing VN's with allow all.") - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - - self.logger.info( - "Verfiy ICMP traffic between existing left VN and new right VN.") - errmsg = "Ping to right VM ip %s from left VM passed; \ - Expected to Fail" % new_right_vm_fix.vm_ip - assert self.vm1_fixture.ping_with_certainty(new_right_vm_fix.vm_ip, - expectation=False), errmsg - - # Ping between left VN's - self.logger.info( - "Verfiy ICMP traffic between new left VN and existing left VN.") - errmsg = "Ping to left VM ip %s from another left VM in different VN \ - passed; Expected to fail" % self.vm1_fixture.vm_ip - assert new_left_vm_fix.ping_with_certainty(self.vm1_fixture.vm_ip, - expectation=False), errmsg - - self.logger.info( - "Verfiy ICMP traffic between new right VN and existing right VN.") - errmsg = "Ping to right VM ip %s from another right VM in different VN \ - passed; Expected to fail" % self.vm2_fixture.vm_ip - assert new_right_vm_fix.ping_with_certainty(self.vm2_fixture.vm_ip, - expectation=False), errmsg - # Delete policy - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - self.detach_policy(new_policy_left_vn_fix) - self.detach_policy(new_policy_right_vn_fix) - self.unconfig_policy(self.policy_fixture) - - # Add rule to policy to allow only tcp traffic from new left_vn to right_vn - # through SI - self.rules.remove(new_rule) - udp_rule = {'direction': '<>', - 'protocol': 'udp', - 'source_network': new_left_vn, - 'src_ports': [8000, 8000], - 'dest_network': new_right_vn, - 'dst_ports': [9000, 9000], - 'simple_action': None, - 'action_list': {'apply_service': self.action_list} - } - self.rules.append(udp_rule) - - # Create new policy with rule to allow traffci from new VN's - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - # attach policy to new VN's - new_policy_left_vn_fix = self.attach_policy_to_vn( - self.policy_fixture, new_left_vn_fix) - new_policy_right_vn_fix = self.attach_policy_to_vn( - self.policy_fixture, new_right_vn_fix) - self.verify_si(self.si_fixtures) - - # Ping from left VM to right VM with udp rule - self.logger.info( - "Verify ICMP traffic with allow udp only rule from new left VN to new right VN") - errmsg = "Ping to right VM ip %s from left VM passed; Expected to fail" % new_right_vm_fix.vm_ip - assert new_left_vm_fix.ping_with_certainty(new_right_vm_fix.vm_ip, - expectation=False), errmsg - # Install traffic package in VM - self.vm1_fixture.install_pkg("Traffic") - self.vm2_fixture.install_pkg("Traffic") - new_left_vm_fix.install_pkg("Traffic") - new_right_vm_fix.install_pkg("Traffic") - - self.logger.info( - "Verify UDP traffic with allow udp only rule from new left VN to new right VN") - sport = 8000 - dport = 9000 - sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - self.logger.info("Verfiy ICMP traffic with allow all.") - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - self.logger.info("Verify UDP traffic with allow all") - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - # Delete policy - self.delete_vm(new_left_vm_fix) - self.delete_vm(new_right_vm_fix) - self.detach_policy(new_policy_left_vn_fix) - self.detach_policy(new_policy_right_vn_fix) - self.delete_vn(new_left_vn_fix) - self.delete_vn(new_right_vn_fix) - self.verify_si(self.si_fixtures) - - self.logger.info( - "Icmp traffic with allow all after deleting the new left and right VN.") - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - - return True - - def verify_add_new_vms(self): - # Launch VMs in new left and right VN's - new_left_vm = 'new_left_bridge_vm' - new_right_vm = 'new_right_bridge_vm' - new_left_vm_fix = self.config_vm(self.vn1_fixture, new_left_vm) - new_right_vm_fix = self.config_vm(self.vn2_fixture, new_right_vm) - assert new_left_vm_fix.verify_on_setup() - assert new_right_vm_fix.verify_on_setup() - # Wait for VM's to come up - new_left_vm_fix.wait_till_vm_is_up() - new_right_vm_fix.wait_till_vm_is_up() - - # Ping from left VM to right VM - errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip - assert new_left_vm_fix.ping_with_certainty( - new_right_vm_fix.vm_ip), errmsg - - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert new_left_vm_fix.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - - errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip - assert self.vm1_fixture.ping_with_certainty( - new_right_vm_fix.vm_ip), errmsg - - # Install traffic package in VM - self.vm1_fixture.install_pkg("Traffic") - self.vm2_fixture.install_pkg("Traffic") - self.logger.debug("Send udp traffic; with policy rule allow all") - sport = 8000 - dport = 9000 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - # Delete policy - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - self.unconfig_policy(self.policy_fixture) - - # Add rule to policy to allow traffic from new left_vn to right_vn - # through SI - new_rule = {'direction': '<>', - 'protocol': 'udp', - 'source_network': self.vn1_name, - 'src_ports': [8000, 8000], - 'dest_network': self.vn2_name, - 'dst_ports': [9000, 9000], - 'simple_action': None, - 'action_list': {'apply_service': self.action_list} - } - self.rules = [new_rule] - - # Create new policy with rule to allow traffci from new VN's - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - self.verify_si(self.si_fixtures) - - # Install traffic package in VM - new_left_vm_fix.install_pkg("Traffic") - new_right_vm_fix.install_pkg("Traffic") - - self.logger.debug("Send udp traffic; with policy rule %s", new_rule) - sport = 8000 - dport = 9000 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - sent, recv = self.verify_traffic(self.vm1_fixture, new_right_vm_fix, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - sent, recv = self.verify_traffic(new_left_vm_fix, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - - # Ping from left VM to right VM - errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % new_right_vm_fix.vm_ip - assert new_left_vm_fix.ping_with_certainty( - new_right_vm_fix.vm_ip, expectation=False), errmsg - - errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % self.vm2_fixture.vm_ip - assert new_left_vm_fix.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - - errmsg = "Ping to right VM ip %s from left VM failed; Expected to fail" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - - errmsg = "Ping to right VM ip %s from left VM passed; Expected to fail" % new_right_vm_fix.vm_ip - assert self.vm1_fixture.ping_with_certainty( - new_right_vm_fix.vm_ip, expectation=False), errmsg - - return True - - def verify_firewall_with_mirroring( - self, si_count=1, svc_scaling=False, max_inst=1, - firewall_svc_mode='in-network', mirror_svc_mode='transparent', flavor='contrail_flavor_2cpu', vn1_subnets=[get_random_cidr()], vn2_subnets=[get_random_cidr()]): - """Validate the service chaining in network datapath""" - - self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn1") - self.vn1_name = self.vn1_fq_name.split(':')[2] - self.vn1_subnets = vn1_subnets - self.vm1_name = get_random_name("in_network_vm1") - self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn2") - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.vn2_subnets = vn2_subnets - self.vm2_name = get_random_name("in_network_vm2") - self.action_list = [] - self.firewall_st_name = get_random_name("svc_firewall_template_1") - firewall_si_prefix = get_random_name("svc_firewall_instance") + "_" - self.mirror_st_name = get_random_name("svc_mirror_template_1") - mirror_si_prefix = get_random_name("svc_mirror_instance") + "_" - self.policy_name = get_random_name("policy_in_network") - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - if firewall_svc_mode == 'transparent': - self.if_list = [] - self.st_fixture, self.firewall_si_fixtures = self.config_st_si( - self.firewall_st_name, - firewall_si_prefix, si_count, - svc_scaling, max_inst, - left_vn=None, right_vn=None, - svc_mode=firewall_svc_mode, flavor=flavor, project=self.inputs.project_name) - if firewall_svc_mode == 'in-network'or firewall_svc_mode == 'in-network-nat': - self.if_list = [['management', False], - ['left', True], ['right', True]] - self.st_fixture, self.firewall_si_fixtures = self.config_st_si( - self.firewall_st_name, - firewall_si_prefix, si_count, - svc_scaling, max_inst, - left_vn=self.vn1_fq_name, - right_vn=self.vn2_fq_name, - svc_mode=firewall_svc_mode, flavor=flavor, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, firewall_si_prefix, self.inputs.project_name) - self.st_fixture, self.mirror_si_fixtures = self.config_st_si( - self.mirror_st_name, - mirror_si_prefix, si_count, - left_vn=self.vn1_fq_name, - svc_type='analyzer', - svc_mode=mirror_svc_mode, flavor=flavor, project=self.inputs.project_name) - self.action_list += (self.chain_si(si_count, - mirror_si_prefix, self.inputs.project_name)) - self.rules = [ - { - 'direction': '<>', - 'protocol': 'any', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[1]}, - 'apply_service': self.action_list[:1]} - }, - ] - - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - - self.vm1_fixture = self.config_vm(self.vn1_fixture, self.vm1_name) - self.vm2_fixture = self.config_vm(self.vn2_fixture, self.vm2_name) - self.vm1_fixture.wait_till_vm_is_up() - self.vm2_fixture.wait_till_vm_is_up() - - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.firewall_si_fixtures) - self.verify_si(self.mirror_si_fixtures) - - for si_fix in self.firewall_si_fixtures: - svm_node_ip = si_fix.svm_compute_node_ip() - # Ping from left VM to right VM - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - - # Verify ICMP mirror - sessions = self.tcpdump_on_all_analyzer( - self.mirror_si_fixtures, mirror_si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - for svm_name, (session, pcap) in sessions.items(): - if self.vm1_fixture.vm_node_ip == self.vm2_fixture.vm_node_ip: - if firewall_svc_mode == 'transparent': - count = 20 - else: - count = 10 - if self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - if firewall_svc_mode == 'in-network' and self.vm1_fixture.vm_node_ip == svm_node_ip: - count = 10 - else: - count = 20 - self.verify_icmp_mirror(svm_name, session, pcap, count) - return True diff --git a/common/servicechain/mirror/__init__.py b/common/servicechain/mirror/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/servicechain/mirror/config.py b/common/servicechain/mirror/config.py deleted file mode 100644 index 7932d3c0a..000000000 --- a/common/servicechain/mirror/config.py +++ /dev/null @@ -1,43 +0,0 @@ -from time import sleep - -from common.servicechain.config import ConfigSvcChain -from tcutils.commands import ssh, execute_cmd, execute_cmd_out - - -class ConfigSvcMirror(ConfigSvcChain): - - def start_tcpdump(self, session, tap_intf): - pcap = '/tmp/mirror-%s.pcap' % tap_intf - cmd = 'rm -f %s' % pcap - execute_cmd(session, cmd, self.logger) - sleep(5) - cmd = "tcpdump -ni %s udp port 8099 -w %s" % (tap_intf, pcap) - self.logger.info("Staring tcpdump to capture the mirrored packets.") - execute_cmd(session, cmd, self.logger) - return pcap - - def stop_tcpdump(self, session, pcap): - self.logger.info("Waiting for the tcpdump write to complete.") - sleep(30) - cmd = 'kill $(pidof tcpdump)' - execute_cmd(session, cmd, self.logger) - execute_cmd(session, 'sync', self.logger) - cmd = 'tcpdump -r %s | wc -l' % pcap - out, err = execute_cmd_out(session, cmd, self.logger) - count = int(out.strip('\n')) - cmd = 'rm -f %s' % pcap - execute_cmd(session, cmd, self.logger) - return count - - def tcpdump_on_all_analyzer(self, si_fixtures, si_prefix, si_count=1): - sessions = {} - for i in range(0, si_count): - si_fixture = si_fixtures[i] - svm_name = "__".join(si_fixture.si_fq_name) + "__" + str(1) - host = self.get_svm_compute(svm_name) - tapintf = self.get_svm_tapintf(svm_name) - session = ssh(host['host_ip'], host['username'], host['password']) - pcap = self.start_tcpdump(session, tapintf) - sessions.update({svm_name: (session, pcap)}) - - return sessions diff --git a/common/servicechain/mirror/verify.py b/common/servicechain/mirror/verify.py deleted file mode 100644 index dadef86fa..000000000 --- a/common/servicechain/mirror/verify.py +++ /dev/null @@ -1,1422 +0,0 @@ -import os -from time import sleep -from tcutils.util import get_random_cidr -from tcutils.util import get_random_name -from tcutils.util import retry -from tcutils.commands import ssh, execute_cmd, execute_cmd_out -from config import ConfigSvcMirror -from common.servicechain.verify import VerifySvcChain -from common.ecmp.ecmp_verify import ECMPVerify -from common.floatingip.config import CreateAssociateFip -from random import randint -from common.openstack_libs import network_exception as exceptions - - -class VerifySvcMirror(ConfigSvcMirror, VerifySvcChain, ECMPVerify): - - def verify_svc_mirroring(self, si_count=1, svc_mode='transparent', ci=False): - """Validate the service chaining datapath - Test steps: - 1. Create the SI/ST in svc_mode specified. - 2. Create vn11/vm1, vn21/vm2 - 3. Create the policy rule for ICMP/UDP and attach to vn's - 4. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to the analyzer - 5. If its a single analyzer only ICMP(5 pkts) will be sent else ICMP and UDP traffic will be sent. - Pass criteria : - count = sent - single node : Pkts mirrored to the analyzer should be equal to 'count' - multinode :Pkts mirrored to the analyzer should be equal to '2xcount' - """ - vn1_subnets = [get_random_cidr()] - vn2_subnets = [get_random_cidr()] - self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn1") - self.vn1_name = self.vn1_fq_name.split(':')[2] - self.vn1_subnets = vn1_subnets - self.vm1_name = get_random_name("in_network_vm1") - self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn2") - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.vn2_subnets = vn2_subnets - self.vm2_name = get_random_name("in_network_vm2") - - si_count = si_count - self.action_list = [] - self.if_list = [] - self.st_name = get_random_name("st1") - self.si_prefix = get_random_name("mirror_si") + "_" - self.policy_name = get_random_name("mirror_policy") - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - if ci: - svc_img_name = 'cirros-0.3.0-x86_64-uec' - image_name = 'cirros-0.3.0-x86_64-uec' - else: - svc_img_name = "vsrx" - image_name = 'ubuntu-traffic' - self.st_fixture, self.si_fixtures = self.config_st_si(self.st_name, - self.si_prefix, si_count, left_vn=self.vn1_fq_name, svc_type='analyzer', svc_mode=svc_mode, project=self.inputs.project_name, svc_img_name=svc_img_name) - self.action_list = self.chain_si( - si_count, self.si_prefix, self.inputs.project_name) - - self.rules = [{'direction': '<>', - 'protocol': 'icmp', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - } - ] - if len(self.action_list) == 2: - self.rules.append({'direction': '<>', - 'protocol': 'udp', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[1]}} - } - ) - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - - # Making sure VM falls on diffrent compute host - host_list = [] - for host in self.inputs.compute_ips: - host_list.append(self.inputs.host_data[host]['name']) - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - self.vm1_fixture = self.config_vm( - self.vn1_fixture, self.vm1_name, node_name=compute_1, image_name=image_name) - self.vm2_fixture = self.config_vm( - self.vn2_fixture, self.vm2_name, node_name=compute_2, image_name=image_name) - assert self.vm1_fixture.verify_on_setup() - assert self.vm2_fixture.verify_on_setup() - self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) - self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.si_fixtures) - # Verify ICMP traffic mirror - if ci: - return self.verify_mirroring(self.si_fixtures, self.vm1_fixture, self.vm2_fixture) - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - for svm_name, (session, pcap) in sessions.items(): - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[0], self.inputs.project_name) - if svm_name == svm[0].name: - count = 10 - if svc_mode == 'transparent' and self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - # One mirror instance - if len(self.action_list) != 2: - return True - - # Verify UDP traffic mirror - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - # Install traffic package in VM - self.vm1_fixture.install_pkg("Traffic") - self.vm2_fixture.install_pkg("Traffic") - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - for svm_name, (session, pcap) in sessions.items(): - count = sent - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[1], self.inputs.project_name) - if svm_name == svm[0].name: - count = sent - if svc_mode == 'transparent' and self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_l4_mirror(svm_name, session, pcap, count, 'udp') - - return True - - def verify_svc_mirroring_with_floating_ip(self, si_count=1): - """Validate the service mirrroring with flaoting IP - Test steps: - 1. Create the SI/ST in svc_mode specified. - 2. Create vn11/vm1, vn21/vm2 - 3. Assosciate vm2 with floating IP - 3. Create the policy rule for ICMP/UDP and attach to vn's - 4. Send the traffic from vm1 to vm2(floating ip) and verify if the packets gets mirrored to the analyzer - 5. If its a single analyzer only ICMP(5 pkts) will be sent else ICMP and UDP traffic will be sent. - Pass criteria : - count = sent - single node : Pkts mirrored to the analyzer should be equal to 'count' - multinode :Pkts mirrored to the analyzer should be equal to '2xcount' - """ - - vn1_subnets = [get_random_cidr()] - vn2_subnets = [get_random_cidr()] - self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn1") - self.vn1_name = self.vn1_fq_name.split(':')[2] - self.vn1_subnets = vn1_subnets - self.vm1_name = get_random_name("in_network_vm1") - self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn2") - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.vn2_subnets = vn2_subnets - self.vm2_name = get_random_name("in_network_vm2") - - si_count = si_count - self.action_list = [] - self.if_list = [] - self.st_name = get_random_name("st1") - self.si_prefix = get_random_name("mirror_si") + "_" - self.policy_name = get_random_name("mirror_policy") - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - - fip_pool_name = get_random_name('testpool') - - self.st_fixture, self.si_fixtures = self.config_st_si(self.st_name, - self.si_prefix, si_count, svc_type='analyzer', left_vn=self.vn1_name, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, self.si_prefix, self.inputs.project_name) - self.rules = [{'direction': '<>', - 'protocol': 'icmp', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn1_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - } - ] - if len(self.action_list) == 2: - self.rules.append({'direction': '<>', - 'protocol': 'udp', - 'source_network': self.vn1_name, - 'src_ports': [8001, 8001], - 'dest_network': self.vn1_name, - 'dst_ports': [9001, 9001], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[1]}} - } - ) - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - - # Making sure VM falls on diffrent compute host - host_list = [] - for host in self.inputs.compute_ips: - host_list.append(self.inputs.host_data[host]['name']) - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - self.vm1_fixture = self.config_vm( - self.vn1_fixture, self.vm1_name, node_name=compute_1) - self.vm2_fixture = self.config_vm( - self.vn2_fixture, self.vm2_name, node_name=compute_2) - assert self.vm1_fixture.verify_on_setup() - assert self.vm2_fixture.verify_on_setup() - self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) - self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) - - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.si_fixtures) - - self.fip_fixture = self.config_fip( - self.vn1_fixture.vn_id, pool_name=fip_pool_name) - self.fip_ca = self.useFixture(CreateAssociateFip(self.inputs, self.fip_fixture, - self.vn1_fixture.vn_id, - self.vm2_fixture.vm_id)) - fip = self.vm2_fixture.vnc_lib_h.floating_ip_read(id=self.fip_ca.fip_id).\ - get_floating_ip_address() - - # Verify ICMP traffic mirror - # sessions = self.tcpdump_on_all_analyzer(self.si_prefix, si_count) - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty(fip), errmsg - for svm_name, (session, pcap) in sessions.items(): - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[0], self.inputs.project_name) - if svm_name == svm[0].name: - count = 10 - if self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - # One mirror instance - if len(self.action_list) != 2: - return True - - # Verify UDP traffic mirror - # sessions = self.tcpdump_on_all_analyzer(self.si_prefix, si_count) - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - # Install traffic package in VM - self.vm1_fixture.install_pkg("Traffic") - self.vm2_fixture.install_pkg("Traffic") - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport, fip=fip) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - for svm_name, (session, pcap) in sessions.items(): - count = sent - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[1], self.inputs.project_name) - if svm_name == svm[0].name: - count = sent - if self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_l4_mirror(svm_name, session, pcap, count, 'udp') - - return True - - def verify_svc_mirror_with_deny(self, si_count=1): - """Validate the service chaining mirroring with deny rule - Test steps: - 1. Create the SI/ST in svc_mode specified. - 2. Create vn11/vm1, vn21/vm2 - 3. Create the policy rule for ICMP/UDP with deny rule and attach to vn's - 4. Cretae the dynamic policy with rule to mirror the pkts to analyzer and attach to VN's - 5. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to the analyzer - 5. If its a single analyzer only ICMP(5 pkts) will be sent else ICMP and UDP traffic will be sent. - Pass criteria : - Ping from should fail, only the pkts from vm1 should get mirrored. - count = sent - single node : Pkts mirrored to the analyzer should be equal to 'count' - multinode :Pkts mirrored to the analyzer should be equal to '2xcount' - """ - vn1_subnets = [get_random_cidr()] - vn2_subnets = [get_random_cidr()] - self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn1") - self.vn1_name = self.vn1_fq_name.split(':')[2] - self.vn1_subnets = vn1_subnets - self.vm1_name = get_random_name("in_network_vm1") - self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn2") - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.vn2_subnets = vn2_subnets - self.vm2_name = get_random_name("in_network_vm2") - - si_count = si_count - self.action_list = [] - self.if_list = [] - self.st_name = get_random_name("st1") - self.si_prefix = get_random_name("mirror_si") + "_" - self.policy_name = get_random_name("mirror_policy") - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - - self.dynamic_policy_name = get_random_name("mirror_policy") - self.rules = [{'direction': '<>', - 'protocol': 'icmp', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'deny', - }] - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - - # Making sure VM falls on diffrent compute host - host_list = [] - for host in self.inputs.compute_ips: - host_list.append(self.inputs.host_data[host]['name']) - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - self.vm1_fixture = self.config_vm( - self.vn1_fixture, self.vm1_name, node_name=compute_1) - self.vm2_fixture = self.config_vm( - self.vn2_fixture, self.vm2_name, node_name=compute_2) - assert self.vm1_fixture.verify_on_setup() - assert self.vm2_fixture.verify_on_setup() - self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) - self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) - - self.st_fixture, self.si_fixtures = self.config_st_si(self.st_name, - self.si_prefix, si_count, svc_type='analyzer', left_vn=self.vn1_name, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, self.si_prefix, self.inputs.project_name) - - dynamic_rules = [{'direction': '<>', - 'protocol': 'icmp', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - }] - dynamic_policy_fixture = self.config_policy( - self.dynamic_policy_name, dynamic_rules) - vn1_dynamic_policy_fix = self.attach_policy_to_vn( - dynamic_policy_fixture, self.vn1_fixture, policy_type='dynamic') - vn2_dynamic_policy_fix = self.attach_policy_to_vn( - dynamic_policy_fixture, self.vn2_fixture, policy_type='dynamic') - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.si_fixtures) - - # Verify ICMP traffic mirror - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM passed; expected to fail" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - for svm_name, (session, pcap) in sessions.items(): - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[0], self.inputs.project_name) - if svm_name == svm[0].name: - count = 5 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - return True - - def verify_icmp_mirror_on_all_analyzer(self, sessions, left_vm_fix, right_vm_fix, expectation=True): - # Ping from left VM to right VM - errmsg = "Ping to right VM ip %s from left VM failed" % right_vm_fix.vm_ip - if not expectation: - errmsg = "Ping to right VM ip %s from left VM passed, Expected to fail" % right_vm_fix.vm_ip - assert left_vm_fix.ping_with_certainty( - right_vm_fix.vm_ip, expectation=expectation), errmsg - - count = 10 - if not expectation: - count = 0 - if left_vm_fix.vm_node_ip != right_vm_fix.vm_node_ip: - count = count * 2 - for svm_name, (session, pcap) in sessions.items(): - self.verify_icmp_mirror(svm_name, session, pcap, count) - - return True - - def verify_icmp_mirror(self, svm_name, session, pcap, exp_count): - mirror_pkt_count = self.stop_tcpdump(session, pcap) - errmsg = "%s ICMP Packets mirrored to the analyzer VM %s,"\ - "Expected %s packets" % ( - mirror_pkt_count, svm_name, exp_count) - if not mirror_pkt_count == exp_count: - self.logger.error(errmsg) - assert False, errmsg - self.logger.info("%s ICMP packets are mirrored to the analyzer " - "service VM '%s'", mirror_pkt_count, svm_name) - - return True - - def verify_udp_mirror_on_all_analyzer(self, sessions, left_vm_fix, right_vm_fix, expectation=True): - return self.verify_l4_mirror_on_all_analyzer(sessions, left_vm_fix, right_vm_fix, proto='udp', expectation=True) - - def verify_tcp_mirror_on_all_analyzer(self, sessions, left_vm_fix, right_vm_fix, expectation=True): - return self.verify_l4_mirror_on_all_analyzer(sessions, left_vm_fix, right_vm_fix, proto='tcp', expectation=True) - - def verify_l4_mirror_on_all_analyzer(self, sessions, left_vm_fix, right_vm_fix, proto, expectation=True): - # Install traffic package in VM - left_vm_fix.install_pkg("Traffic") - right_vm_fix.install_pkg("Traffic") - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(left_vm_fix, right_vm_fix, - proto, sport=sport, dport=dport) - errmsg = "'%s' traffic with src port %s and dst port %s failed" % ( - proto, sport, dport) - count = sent - if not expectation: - count = 0 - errmsg = "'%s' traffic with src port %s and dst port %s passed; Expected to fail" % ( - proto, sport, dport) - if left_vm_fix.vm_node_ip != right_vm_fix.vm_node_ip: - count = count * 2 - assert sent and recv == sent, errmsg - for svm_name, (session, pcap) in sessions.items(): - self.verify_l4_mirror(svm_name, session, pcap, exp_count, proto) - - return True - - @retry(delay=2, tries=6) - def verify_l4_mirror(self, svm_name, session, pcap, exp_count, proto): - mirror_pkt_count = self.stop_tcpdump(session, pcap) - errmsg = "%s '%s' Packets mirrored to the analyzer VM, "\ - "Expected %s packets" % (mirror_pkt_count, proto, exp_count) - assert mirror_pkt_count == exp_count, errmsg - self.logger.info("%s '%s' packets are mirrored to the analyzer " - "service VM '%s'", mirror_pkt_count, proto, svm_name) - return True - - @retry(delay=2, tries=6) - def verify_mirroring(self, si_fix, src_vm, dst_vm): - result = True - svms = self.get_svms_in_si(si_fix[0], self.inputs.project_name) - svm = svms[0] - if svm.status == 'ACTIVE': - svm_name = svm.name - host = self.get_svm_compute(svm_name) - tapintf = self.get_bridge_svm_tapintf(svm_name, 'left') - session = ssh(host['host_ip'], host['username'], host['password']) - cmd = 'tcpdump -nni %s -c 5 > /tmp/%s_out.log' % (tapintf, tapintf) - execute_cmd(session, cmd, self.logger) - assert src_vm.ping_with_certainty(dst_vm.vm_ip) - sleep(10) - output_cmd = 'cat /tmp/%s_out.log' % tapintf - out, err = execute_cmd_out(session, output_cmd, self.logger) - print out - if '8099' in out: - self.logger.info('Mirroring action verified') - else: - result = False - self.logger.info('No mirroring action seen') - return result - - def verify_policy_delete_add(self, si_prefix, si_count=1): - # Delete policy - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - self.unconfig_policy(self.policy_fixture) - # Ping from left VM to right VM; expected to fail - errmsg = "Ping to right VM ip %s from left VM passed; expected to fail" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - # sessions = self.tcpdump_on_all_analyzer(si_prefix, si_count) - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - for svm_name, (session, pcap) in sessions.items(): - count = 0 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - # Create policy again - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - self.attach_policy_to_vn(self.policy_fixture, self.vn1_fixture) - self.attach_policy_to_vn(self.policy_fixture, self.vn2_fixture) - self.verify_si(self.si_fixtures) - - # Verify ICMP traffic mirror - # sessions = self.tcpdump_on_all_analyzer(si_prefix, si_count) - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - svmname = si_prefix + str('2_1') - for svm_name, (session, pcap) in sessions.items(): - count = 10 - if svm_name == svmname: - count = 0 - if self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - return True - - def verify_add_new_vns(self, si_prefix, si_count=1): - # Delete policy - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - self.unconfig_policy(self.policy_fixture) - - # Create one more left and right VN's - new_left_vn = "new_left_bridge_vn" - new_left_vn_net = ['51.1.1.0/24'] - new_right_vn = "new_right_bridge_vn" - new_right_vn_net = ['52.2.2.0/24'] - new_left_vn_fix = self.config_vn(new_left_vn, new_left_vn_net) - new_right_vn_fix = self.config_vn(new_right_vn, new_right_vn_net) - - # Launch VMs in new left and right VN's - new_left_vm = 'new_left_bridge_vm' - new_right_vm = 'new_right_bridge_vm' - new_left_vm_fix = self.config_vm(new_left_vn_fix, new_left_vm) - new_right_vm_fix = self.config_vm(new_right_vn_fix, new_right_vm) - assert new_left_vm_fix.verify_on_setup() - assert new_right_vm_fix.verify_on_setup() - # Wait for VM's to come up - self.nova_h.wait_till_vm_is_up(new_left_vm_fix.vm_obj) - self.nova_h.wait_till_vm_is_up(new_right_vm_fix.vm_obj) - - # Add rule to policy to allow traffic from new left_vn to right_vn - # through SI - new_rule = {'direction': '<>', - 'protocol': 'udp', - 'source_network': new_left_vn, - 'src_ports': [0, -1], - 'dest_network': new_right_vn, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - } - self.rules.append(new_rule) - if len(self.action_list) == 2: - self.rules.append({'direction': '<>', - 'protocol': 'icmp', - 'source_network': new_left_vn, - 'src_ports': [0, -1], - 'dest_network': new_right_vn, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[1]}} - } - ) - - # Create new policy with rule to allow traffic from new VN's - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - self.attach_policy_to_vn(self.policy_fixture, self.vn1_fixture) - self.attach_policy_to_vn(self.policy_fixture, self.vn2_fixture) - self.attach_policy_to_vn(self.policy_fixture, new_left_vn_fix) - self.attach_policy_to_vn(self.policy_fixture, new_right_vn_fix) - self.verify_si(self.si_fixtures) - - # Verify ICMP traffic mirror between existing VN's - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - svmname = si_prefix + str('2_1') - for svm_name, (session, pcap) in sessions.items(): - count = 10 - if svm_name == svmname: - count = 0 - if self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - # Verify UDP traffic mirror between New VN's - # sessions = self.tcpdump_on_all_analyzer(self.si_prefix, si_count) - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - # Install traffic package in VM - new_left_vm_fix.install_pkg("Traffic") - new_right_vm_fix.install_pkg("Traffic") - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(new_left_vm_fix, new_right_vm_fix, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - svmname = si_prefix + str('2_1') - for svm_name, (session, pcap) in sessions.items(): - count = sent - if svm_name == svmname: - count = 0 - if new_left_vm_fix.vm_node_ip != new_right_vm_fix.vm_node_ip: - count = count * 2 - self.verify_l4_mirror(svm_name, session, pcap, count, 'udp') - - # One mirror instance - if len(self.action_list) != 2: - return True - - # Verify UDP traffic mirror traffic between existing VN's - # sessions = self.tcpdump_on_all_analyzer(self.si_prefix, si_count) - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - # Install traffic package in VM - self.vm1_fixture.install_pkg("Traffic") - self.vm2_fixture.install_pkg("Traffic") - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - svmname = si_prefix + str('1_1') - for svm_name, (session, pcap) in sessions.items(): - count = sent - if svm_name == svmname: - count = 0 - if self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_l4_mirror(svm_name, session, pcap, count, 'udp') - - # Verify ICMP traffic mirror between new VN's - # sessions = self.tcpdump_on_all_analyzer(si_prefix, si_count) - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM failed" % new_right_vm_fix.vm_ip - assert new_left_vm_fix.ping_with_certainty( - new_right_vm_fix.vm_ip), errmsg - svmname = si_prefix + str('1_1') - for svm_name, (session, pcap) in sessions.items(): - count = 10 - if svm_name == svmname: - count = 0 - if left_vm_fix.vm_node_ip != right_vm_fix.vm_node_ip: - count = count * 2 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - return True - - def verify_svc_mirroring_unidirection(self, si_count=1, svc_mode='transparent'): - """Validate the service chaining datapath with unidirection traffic - Test steps: - 1. Create the SI/ST in svc_mode specified. - 2. Create vn11/vm1, vn21/vm2 - 3. Create the policy rule for ICMP/UDP with 'unidirection rule' and attach to vn's - 4. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to the analyzer - 5. If its a single analyzer only ICMP(5 pkts) will be sent else ICMP and UDP traffic will be sent. - Pass criteria : - Pinf from vm1 to vm2 should fail. Only the pkts from vm1 should get mirrored. - count = sent - single node : Pkts mirrored to the analyzer should be equal to 'count' - multinode :Pkts mirrored to the analyzer should be equal to '2xcount' - """ - vn1_subnets = [get_random_cidr()] - vn2_subnets = [get_random_cidr()] - self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn1") - self.vn1_name = self.vn1_fq_name.split(':')[2] - self.vn1_subnets = vn1_subnets - self.vm1_name = get_random_name("in_network_vm1") - self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn2") - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.vn2_subnets = vn2_subnets - self.vm2_name = get_random_name("in_network_vm2") - - si_count = si_count - self.action_list = [] - self.if_list = [] - self.st_name = get_random_name("st1") - self.si_prefix = get_random_name("mirror_si") + "_" - self.policy_name = get_random_name("mirror_policy") - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - - self.st_fixture, self.si_fixtures = self.config_st_si(self.st_name, - self.si_prefix, si_count, left_vn=self.vn1_name, svc_type='analyzer', svc_mode=svc_mode, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, self.si_prefix, self.inputs.project_name) - - self.rules = [{'direction': '>', - 'protocol': 'icmp', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - } - ] - if len(self.action_list) == 2: - self.rules.append({'direction': '>', - 'protocol': 'udp', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[1]}} - } - ) - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - - # Making sure VM falls on diffrent compute host - host_list = [] - for host in self.inputs.compute_ips: - host_list.append(self.inputs.host_data[host]['name']) - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - self.vm1_fixture = self.config_vm( - self.vn1_fixture, self.vm1_name, node_name=compute_1) - self.vm2_fixture = self.config_vm( - self.vn2_fixture, self.vm2_name, node_name=compute_2) - assert self.vm1_fixture.verify_on_setup() - assert self.vm2_fixture.verify_on_setup() - self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) - self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) - - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.si_fixtures) - - # Verify ICMP traffic mirror - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM passed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - for svm_name, (session, pcap) in sessions.items(): - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[0], self.inputs.project_name) - if svm_name == svm[0].name: - count = 5 - if svc_mode == 'transparent' and self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - # One mirror instance - if len(self.action_list) != 2: - return True - - # Verify UDP traffic mirror - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - # Install traffic package in VM - self.vm1_fixture.install_pkg("Traffic") - self.vm2_fixture.install_pkg("Traffic") - - sport = 8001 - dport = 9001 - sent, recv = self.verify_traffic(self.vm1_fixture, self.vm2_fixture, - 'udp', sport=sport, dport=dport) - errmsg = "UDP traffic with src port %s and dst port %s failed" % ( - sport, dport) - assert sent and recv == sent, errmsg - for svm_name, (session, pcap) in sessions.items(): - count = sent - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[1], self.inputs.project_name) - if svm_name == svm[0].name: - count = sent - if svc_mode == 'transparent' and self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_l4_mirror(svm_name, session, pcap, count, 'udp') - - return True - - def verify_attach_detach_policy_with_svc_mirroring(self, si_count=1): - """Validate the detach and attach policy with SI doesn't block traffic""" - - vn1_subnets = [get_random_cidr()] - vn2_subnets = [get_random_cidr()] - self.vn1_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn1") - self.vn1_name = self.vn1_fq_name.split(':')[2] - self.vn1_subnets = vn1_subnets - self.vm1_name = get_random_name("in_network_vm1") - self.vn2_fq_name = "default-domain:" + self.inputs.project_name + \ - ":" + get_random_name("in_network_vn2") - self.vn2_name = self.vn2_fq_name.split(':')[2] - self.vn2_subnets = vn2_subnets - self.vm2_name = get_random_name("in_network_vm2") - - si_count = si_count - self.action_list = [] - self.if_list = [] - self.st_name = get_random_name("st1") - self.si_prefix = get_random_name("mirror_si") + "_" - self.policy_name = get_random_name("mirror_policy") - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - svc_mode = 'in-network' - - self.st_fixture, self.si_fixtures = self.config_st_si(self.st_name, - self.si_prefix, si_count, left_vn=self.vn1_fq_name, svc_type='analyzer', svc_mode=svc_mode, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, self.si_prefix, self.inputs.project_name) - self.rules = [{'direction': '<>', - 'protocol': 'icmp', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - } - ] - - self.policy_fixture = self.config_policy(self.policy_name, self.rules) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - - # Making sure VM falls on diffrent compute host - host_list = [] - for host in self.inputs.compute_ips: - host_list.append(self.inputs.host_data[host]['name']) - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - self.vm1_fixture = self.config_vm( - self.vn1_fixture, self.vm1_name, node_name=compute_1) - self.vm2_fixture = self.config_vm( - self.vn2_fixture, self.vm2_name, node_name=compute_2) - assert self.vm1_fixture.verify_on_setup() - assert self.vm2_fixture.verify_on_setup() - self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) - self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) - - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.si_fixtures) - - # Verify ICMP traffic mirror - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - for svm_name, (session, pcap) in sessions.items(): - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[0], self.inputs.project_name) - if svm_name == svm[0].name: - count = 10 - if svc_mode == 'transparent' and self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - # detach the policy and attach again to both the network - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, self.vn2_fixture) - - # Verify ICMP traffic mirror - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - errmsg = "Ping to right VM ip %s from left VM failed" % self.vm2_fixture.vm_ip - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - for svm_name, (session, pcap) in sessions.items(): - svm = {} - svm = self.get_svms_in_si( - self.si_fixtures[0], self.inputs.project_name) - if svm_name == svm[0].name: - count = 10 - if svc_mode == 'transparent' and self.vm1_fixture.vm_node_ip != self.vm2_fixture.vm_node_ip: - count = count * 2 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - return True - - def verify_detach_attach_diff_policy_with_mirroring(self, si_count=1): - """validate attaching a policy with analyzer and detaching again removes all the routes and does not impact other policies""" - random_number = randint(700, 800) - self.domain_name = "default-domain" - self.project_name = self.inputs.project_name - - self.vn1_name = "VN1%s" % si_count + str(random_number) - self.vn1_subnets = [get_random_cidr()] - self.vn2_subnets = [get_random_cidr()] - self.vm1_name = 'VM-traffic' + str(random_number) - self.vn2_name = "VN2%s" % si_count + str(random_number) - self.vm2_name = 'VM-ubuntu' + str(random_number) - self.vn1_fq_name = ':'.join( - [self.domain_name, self.project_name, self.vn1_name]) - self.vn2_fq_name = ':'.join( - [self.domain_name, self.project_name, self.vn2_name]) - - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - - si_count = si_count - self.action_list = [] - self.if_list = [] - self.st_name = 'st-analyzer-left' + str(random_number) - self.si_prefix = 'mirror_si_' + str(random_number) - self.policy_name1 = 'pol1' + str(random_number) - self.policy_name2 = 'pol-analyzer' + str(random_number) - self.svc_mode = 'transparent' - self.svc_type = 'analyzer' - - self.st_fixture, self.si_fixtures = self.config_st_si(self.st_name, - self.si_prefix, si_count, left_vn=self.vn1_fq_name, svc_type=self.svc_type, svc_mode=self.svc_mode, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, self.si_prefix, self.inputs.project_name) - self.rules1 = [{'direction': '<>', - 'protocol': 'any', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass'} - } - ] - - self.rules2 = [{'direction': '<>', - 'protocol': 'any', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - } - ] - - self.pol1_fixture = self.config_policy(self.policy_name1, self.rules1) - self.pol_analyzer_fixture = self.config_policy( - self.policy_name2, self.rules2) - - self.vn1_policy_fix = self.attach_policy_to_vn( - self.pol1_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol1_fixture, self.vn2_fixture) - - self.vm1_fixture = self.config_vm( - self.vn1_fixture, self.vm1_name, image_name='ubuntu-traffic') - self.vm2_fixture = self.config_vm( - self.vn2_fixture, self.vm2_name, image_name='ubuntu') - assert self.vm1_fixture.verify_on_setup() - assert self.vm2_fixture.verify_on_setup() - - self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) - self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) - - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.si_fixtures) - # Verify ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 failed in step1" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip), errmsg - - self.detach_policy(self.vn1_policy_fix) - # Verify no ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 success in step2" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip, expectation=False), errmsg - - self.detach_policy(self.vn2_policy_fix) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol1_fixture, self.vn2_fixture) - # Verify no ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 success in step3" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip, expectation=False), errmsg - - self.detach_policy(self.vn2_policy_fix) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol_analyzer_fixture, self.vn2_fixture) - # Verify ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 success in step4" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip, expectation=False), errmsg - - self.detach_policy(self.vn2_policy_fix) - # Verify no ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 success in step5" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip, expectation=False), errmsg - - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol1_fixture, self.vn2_fixture) - # Verify no ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 success in step6" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip, expectation=False), errmsg - - return True - - def verify_detach_attach_policy_change_rules(self, si_count=1): - random_number = randint(800, 900) - self.domain_name = "default-domain" - self.project_name = self.inputs.project_name - - self.vn1_name = "VN1%s" % si_count + str(random_number) - self.vn1_subnets = [get_random_cidr()] - self.vm1_name = 'VM-traffic' + str(random_number) - self.vn2_name = "VN2%s" % si_count + str(random_number) - self.vn2_subnets = [get_random_cidr()] - self.vm2_name = 'VM-ubuntu' + str(random_number) - - self.vn1_fq_name = ':'.join( - [self.domain_name, self.project_name, self.vn1_name]) - self.vn2_fq_name = ':'.join( - [self.domain_name, self.project_name, self.vn2_name]) - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - - si_count = si_count - self.action_list = [] - self.if_list = [] - self.st_name = 'st-analyzer-left' + str(random_number) - self.si_prefix = 'mirror_si_' + str(random_number) - self.policy_name1 = 'pol1' + str(random_number) - self.policy_name2 = 'pol-analyzer' + str(random_number) - self.svc_mode = 'in-network' - self.svc_type = 'analyzer' - - self.st_fixture, self.si_fixtures = self.config_st_si(self.st_name, - self.si_prefix, si_count, left_vn=self.vn1_fq_name, svc_type=self.svc_type, svc_mode=self.svc_mode, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, self.si_prefix, self.inputs.project_name) - self.rules1 = [{'direction': '<>', - 'protocol': 'any', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass'} - } - ] - - self.rules2 = [{'direction': '<>', - 'protocol': 'any', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - } - ] - - self.pol1_fixture = self.config_policy(self.policy_name1, self.rules1) - self.pol_analyzer_fixture = self.config_policy( - self.policy_name2, self.rules2) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.pol_analyzer_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol_analyzer_fixture, self.vn2_fixture) - - self.vm1_fixture = self.config_vm( - self.vn1_fixture, self.vm1_name, image_name='ubuntu-traffic') - self.vm2_fixture = self.config_vm( - self.vn2_fixture, self.vm2_name, image_name='ubuntu') - assert self.vm1_fixture.verify_on_setup() - assert self.vm2_fixture.verify_on_setup() - - self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) - self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) - - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.si_fixtures) - - # Verify ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 failed in step1" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip), errmsg - - self.detach_policy(self.vn1_policy_fix) - # Verify ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 failed in step2" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip), errmsg - - # change policy rules to rules1 and Verify no ICMP traffic b/w VN1 and - # VN2 - data = { - 'policy': {'entries': self.pol1_fixture.policy_obj['policy']['entries']}} - self.pol_analyzer_fixture.update_policy( - self.pol_analyzer_fixture.policy_obj['policy']['id'], data) - errmsg = "Ping b/w VN1 and VN2 success in step3" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip, expectation=False), errmsg - - self.detach_policy(self.vn2_policy_fix) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol_analyzer_fixture, self.vn2_fixture) - # Verify no ICMP traffic b/w VN1 and VN2 - errmsg = "Ping b/w VN1 and VN2 success in step5" - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip, expectation=False), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip, expectation=False), errmsg - - return True - - def verify_policy_order_change(self, si_count=1): - random_number = randint(901, 950) - self.domain_name = "default-domain" - self.project_name = self.inputs.project_name - - self.vn1_name = "VN1%s" % si_count + str(random_number) - self.vn1_subnets = [get_random_cidr()] - self.vm1_name = 'VM-traffic' + str(random_number) - self.vn2_name = "VN2%s" % si_count + str(random_number) - self.vn2_subnets = [get_random_cidr()] - self.vm2_name = 'VM-ubuntu' + str(random_number) - - self.vn1_fq_name = ':'.join( - [self.domain_name, self.project_name, self.vn1_name]) - self.vn2_fq_name = ':'.join( - [self.domain_name, self.project_name, self.vn2_name]) - - self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets) - self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets) - - si_count = si_count - self.action_list = [] - self.if_list = [] - self.st_name = 'st-analyzer-left' + str(random_number) - self.si_prefix = 'mirror_si_' + str(random_number) - self.policy_name1 = 'pol1' + str(random_number) - self.policy_name2 = 'pol-analyzer' + str(random_number) - self.svc_mode = 'transparent' - self.svc_type = 'analyzer' - - self.st_fixture, self.si_fixtures = self.config_st_si(self.st_name, - self.si_prefix, si_count, left_vn=self.vn1_fq_name, svc_type=self.svc_type, svc_mode=self.svc_mode, project=self.inputs.project_name) - self.action_list = self.chain_si( - si_count, self.si_prefix, self.inputs.project_name) - self.rules1 = [{'direction': '<>', - 'protocol': 'any', - 'source_network': 'any', - 'src_ports': [0, -1], - 'dest_network': 'any', - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass'} - } - ] - - self.rules2 = [{'direction': '<>', - 'protocol': 'any', - 'source_network': self.vn1_name, - 'src_ports': [0, -1], - 'dest_network': self.vn2_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'simple_action': 'pass', - 'mirror_to': {'analyzer_name': self.action_list[0]}} - } - ] - - self.pol1_fixture = self.config_policy(self.policy_name1, self.rules1) - self.pol_analyzer_fixture = self.config_policy( - self.policy_name2, self.rules2) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.pol_analyzer_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol_analyzer_fixture, self.vn2_fixture) - - self.vm1_fixture = self.config_vm( - self.vn1_fixture, self.vm1_name, image_name='ubuntu-traffic') - self.vm2_fixture = self.config_vm( - self.vn2_fixture, self.vm2_name, image_name='ubuntu') - assert self.vm1_fixture.verify_on_setup() - assert self.vm2_fixture.verify_on_setup() - - self.nova_h.wait_till_vm_is_up(self.vm1_fixture.vm_obj) - self.nova_h.wait_till_vm_is_up(self.vm2_fixture.vm_obj) - - result, msg = self.validate_vn( - self.vn1_name, project_name=self.inputs.project_name) - assert result, msg - result, msg = self.validate_vn( - self.vn2_name, project_name=self.inputs.project_name) - assert result, msg - self.verify_si(self.si_fixtures) - # Verify ICMP traffic b/w VN1 and VN2 and mirror - errmsg = "Ping b/w VN1 and VN2 failed in step1" - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip), errmsg - for svm_name, (session, pcap) in sessions.items(): - count = 20 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.pol1_fixture, self.vn1_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol1_fixture, self.vn2_fixture) - self.vn1_policy_a_fix = self.attach_policy_to_vn( - self.pol_analyzer_fixture, self.vn1_fixture) - self.vn2_policy_a_fix = self.attach_policy_to_vn( - self.pol_analyzer_fixture, self.vn2_fixture) - vn1_seq_num = {} - vn2_seq_num = {} - vn1_seq_num[self.policy_name1] = self.get_seq_num( - self.vn1_fixture, self.policy_name1) - vn1_seq_num[self.policy_name2] = self.get_seq_num( - self.vn1_fixture, self.policy_name2) - vn2_seq_num[self.policy_name1] = self.get_seq_num( - self.vn2_fixture, self.policy_name1) - vn2_seq_num[self.policy_name2] = self.get_seq_num( - self.vn2_fixture, self.policy_name2) - - # Verify ICMP traffic b/w VN1 and VN2 but no mirror - errmsg = "Ping b/w VN1 and VN2 failed in step2" - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip), errmsg - for svm_name, (session, pcap) in sessions.items(): - if vn1_seq_num[self.policy_name2] < vn1_seq_num[self.policy_name1] or vn2_seq_num[self.policy_name2] < vn2_seq_num[self.policy_name1]: - self.logger.info( - '%s is assigned first. Mirroring expected' % self.policy_name2) - count = 20 - else: - self.logger.info( - '%s is assigned first. No mirroring expected' % self.policy_name1) - count = 0 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - self.detach_policy(self.vn1_policy_fix) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.pol1_fixture, self.vn1_fixture) - self.detach_policy(self.vn2_policy_fix) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.pol1_fixture, self.vn2_fixture) - - vn1_seq_num[self.policy_name1] = self.get_seq_num( - self.vn1_fixture, self.policy_name1) - vn1_seq_num[self.policy_name2] = self.get_seq_num( - self.vn1_fixture, self.policy_name2) - vn2_seq_num[self.policy_name1] = self.get_seq_num( - self.vn2_fixture, self.policy_name1) - vn2_seq_num[self.policy_name2] = self.get_seq_num( - self.vn2_fixture, self.policy_name2) - - # Verify ICMP traffic b/w VN1 and VN2 and mirror - errmsg = "Ping b/w VN1 and VN2 failed in step3 and step4" - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip), errmsg - for svm_name, (session, pcap) in sessions.items(): - if vn1_seq_num[self.policy_name2] < vn1_seq_num[self.policy_name1] or vn2_seq_num[self.policy_name2] < vn2_seq_num[self.policy_name1]: - self.logger.info( - '%s is assigned first. Mirroring expected' % self.policy_name2) - count = 20 - else: - self.logger.info( - '%s is assigned first. No mirroring expected' % self.policy_name1) - count = 0 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - self.detach_policy(self.vn1_policy_fix) - self.detach_policy(self.vn2_policy_fix) - - # Verify ICMP traffic b/w VN1 and VN2 and mirror - errmsg = "Ping b/w VN1 and VN2 failed in step5" - sessions = self.tcpdump_on_all_analyzer( - self.si_fixtures, self.si_prefix, si_count) - assert self.vm1_fixture.ping_with_certainty( - self.vm2_fixture.vm_ip), errmsg - assert self.vm2_fixture.ping_with_certainty( - self.vm1_fixture.vm_ip), errmsg - for svm_name, (session, pcap) in sessions.items(): - count = 20 - self.verify_icmp_mirror(svm_name, session, pcap, count) - - return True - - def get_seq_num(self, vn_fix, pol_name): - vn_obj = self.vnc_lib.virtual_network_read( - id=vn_fix.vn_id) - for net_pol_ref in vn_obj.get_network_policy_refs(): - if net_pol_ref['to'][-1] == pol_name: - vn_seq_num = net_pol_ref['attr'].sequence.major - return vn_seq_num - - def cleanUp(self): - super(VerifySvcMirror, self).cleanUp() diff --git a/common/servicechain/verify.py b/common/servicechain/verify.py deleted file mode 100644 index 425144391..000000000 --- a/common/servicechain/verify.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import sys -from time import sleep - -import fixtures -import testtools -import unittest -import types -import time -sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) -from tcutils.util import retry -from traffic.core.stream import Stream -from traffic.core.profile import StandardProfile, ContinuousProfile, ContinuousSportRange -from traffic.core.helpers import Host, Sender, Receiver - -class VerifySvcChain(fixtures.TestWithFixtures): - - def verify_si(self, si_fixtures): - for si_fix in si_fixtures: - si_fix.verify_on_setup() - - @retry(delay=5, tries=6) - def validate_vn(self, vn_name, domain_name='default-domain', - project_name='admin'): - ri_fq_name = [domain_name, project_name, vn_name, vn_name] - ri_obj = self.vnc_lib.routing_instance_read(fq_name=ri_fq_name) - errmsg = "RI object not found for RI: %s" % ri_fq_name - if not ri_obj: - self.logger.warn(errmsg) - return False, errmsg - - vmi_refs = ri_obj.get_virtual_machine_interface_back_refs() - errmsg = "VMI refs is none for RI %s" % ri_fq_name - if not vmi_refs: - self.logger.warn(errmsg) - return False, errmsg - - ri_refs = ri_obj.get_routing_instance_refs() - errmsg = "RI refs is none for RI %s" % ri_fq_name - if not ri_refs: - self.logger.warn(errmsg) - return False, errmsg - - return True, "VN valdation passed." - - def verify_traffic(self, sender_vm, receiver_vm, proto, sport, dport, count=None, fip=None): - # Create stream and profile - if fip: - stream = Stream( - protocol="ip", sport=sport, dport=dport, proto=proto, src=sender_vm.vm_ip, - dst=fip) - else: - stream = Stream( - protocol="ip", sport=sport, dport=dport, proto=proto, src=sender_vm.vm_ip, - dst=receiver_vm.vm_ip) - profile_kwargs = {'stream': stream} - if fip: - profile_kwargs.update({'listener': receiver_vm.vm_ip}) - if count: - profile_kwargs.update({'count': count}) - profile = StandardProfile(**profile_kwargs) - else: - profile = ContinuousProfile(**profile_kwargs) - - # Set VM credentials - send_node = Host(sender_vm.vm_node_ip, - self.inputs.host_data[sender_vm.vm_node_ip]['username'], - self.inputs.host_data[sender_vm.vm_node_ip]['password']) - recv_node = Host(receiver_vm.vm_node_ip, - self.inputs.host_data[receiver_vm.vm_node_ip]['username'], - self.inputs.host_data[receiver_vm.vm_node_ip]['password']) - send_host = Host(sender_vm.local_ip, - sender_vm.vm_username, sender_vm.vm_password) - recv_host = Host(receiver_vm.local_ip, - receiver_vm.vm_username, receiver_vm.vm_password) - - # Create send, receive helpers - sender = Sender("send%s" % - proto, profile, send_node, send_host, self.inputs.logger) - receiver = Receiver("recv%s" % - proto, profile, recv_node, recv_host, self.inputs.logger) - - # start traffic - receiver.start() - sender.start() - sleep(5) - - # stop traffic - sender.stop() - receiver.stop() - self.logger.debug("Sent: %s; Received: %s", sender.sent, receiver.recv) - return (sender.sent, receiver.recv) diff --git a/common/structure.py b/common/structure.py deleted file mode 100644 index 340ee4dbd..000000000 --- a/common/structure.py +++ /dev/null @@ -1,23 +0,0 @@ -class DynamicArgs: - """ Class variable that specifies expected fields - This class helps initialiazing the __init__() for - other classes,which would inherit from this class. - The _fields variable in the subclasses need to be declared - with the positional paraleters as below: - _fields = ['auth_url', 'username', 'password', 'tenant_id', 'insecure'] - """ - _fields= [] - def __init__(self, *args, **kwargs): - if len(args) != len(self._fields): - raise TypeError('Expected {} arguments'.format(len(self._fields))) - - # Set the arguments - for name, value in zip(self._fields, args): - setattr(self, name, value) - - # Set the additional arguments (if any) - extra_args = set(kwargs.keys()) - set(self._fields) - for name in extra_args: - setattr(self, name, kwargs.pop(name)) - if kwargs: - raise TypeError('Duplicate values for {}'.format(','.join(kwargs))) diff --git a/common/system/__init__.py b/common/system/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/system/system_verification.py b/common/system/system_verification.py deleted file mode 100644 index 9a0bd944f..000000000 --- a/common/system/system_verification.py +++ /dev/null @@ -1,94 +0,0 @@ -from tcutils.test_lib.test_utils import assertEqual - -def system_vna_verify_policy(self, policy_fixt, topo, state): - # Verify all policies in all compute nodes.. - self.logger.info("Starting Verifications after %s" % (state)) - ret = policy_fixt.verify_policy_in_vna(topo) - assertEqual(ret['result'], True, ret['msg']) -# end of system_vna_verify_policy - - -def all_policy_verify( - self, - config_topo, - topo, - state='unspecified', - fixture_only='no'): - '''Call all policy related verifications.. - Useful to debug failures... call this on failure.. - Verify & assert on fail''' - self.logger.info("Starting Verifications after %s" % (state)) - # calling policy fixture verifications - for policy_name, policy_fixt in config_topo['policy'].items(): - ret = policy_fixt.verify_on_setup() - assertEqual(ret['result'], True, ret['msg']) - # calling vn-policy verification - for vn_name, vn_fixt in config_topo['vn'].items(): - ret = vn_fixt.verify_vn_policy_in_api_server() - assertEqual(ret['result'], True, ret['msg']) - if fixture_only == 'no': - # This is not a fixture verfication, - # requires runtime[config_topo] & user-def[topo] topology to be in sync to verify - # calling vna-acl verification - # pick any policy configured - policy_fixt = config_topo['policy'][str(topo.policy_list[0])] - system_vna_verify_policy(self, policy_fixt, topo, state) -# end of all_policy_verify - - -def verify_system_parameters(self, verification_obj): - for projects in verification_obj['data'][1]: - for poj_obj in verification_obj['data'][1][projects]['project']: - # for each project in the topology verify the project parameters. - assert verification_obj['data'][1][projects][ - 'project'][poj_obj].verify_on_setup() - for vn_obj in verification_obj['data'][1][projects]['vn']: - # for each vn in all the projects in the topology verify the vn - # parameters. - assert verification_obj['data'][1][ - projects]['vn'][vn_obj].verify_on_setup() - for vm_obj in verification_obj['data'][1][projects]['vm']: - # for each vm in all the projects in the topology verify the vm - # parameters. - assert verification_obj['data'][1][ - projects]['vm'][vm_obj].verify_on_setup() - for policy_obj in verification_obj['data'][1][projects]['policy']: - # for each policy in all the projects in the topology verify the - # policies. - assert verification_obj['data'][1][projects][ - 'policy'][policy_obj].verify_on_setup() -# end verify_system_parameters - -def get_comp_node_by_vn(self, vn_fq_name): - vn_fq_name=vn_fq_name.split(":") - sing_comp_node_vn_list={} - domain= vn_fq_name[-3] - project=vn_fq_name[-2] - vn= vn_fq_name[-1] - #Get the list of vn names from each compute node - for compute_ip in self.inputs.compute_ips : - no_of_vns=self.agent_inspect[compute_ip].get_vna_vn_list( domain=domain, project=project)['VNs'] - vn_list=[] - for i in range(len(no_of_vns)) : - x=no_of_vns[i]['name'] - x=x.split(":") - vn_list.append(x[-1]) - sing_comp_node_vn_list[compute_ip]= vn_list - #Get the compute node list for each vn - vn_comp_node_list={} - vn_list=sing_comp_node_vn_list.values() - for i in range(len(vn_list)) : - compute_ips=[] - for j in range(len(vn_list[i])): - for compute_ip in sing_comp_node_vn_list.keys() : - if vn_list[i][j] in sing_comp_node_vn_list[compute_ip] : - compute_ips.append(compute_ip) - vn_comp_node_list[vn_list[i][j]]=compute_ips - #Compare the vn from the compute nodes - vn_comp_nodes ={} - for vn_list in vn_comp_node_list.keys() : - if vn_list == vn : - vn_comp_nodes[vn_list]= vn_comp_node_list[vn_list] - return vn_comp_nodes[vn] - #end of get_comp_node_by_vn - diff --git a/common/timeutils.py b/common/timeutils.py deleted file mode 100644 index d5ed81d3e..000000000 --- a/common/timeutils.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Time related utilities and helper functions. -""" - -import calendar -import datetime -import time - -import iso8601 -import six - - -# ISO 8601 extended time format with microseconds -_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' -_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' -PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND - - -def isotime(at=None, subsecond=False): - """Stringify time in ISO 8601 format.""" - if not at: - at = utcnow() - st = at.strftime(_ISO8601_TIME_FORMAT - if not subsecond - else _ISO8601_TIME_FORMAT_SUBSECOND) - tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' - st += ('Z' if tz == 'UTC' else tz) - return st - - -def parse_isotime(timestr): - """Parse time from ISO 8601 format.""" - try: - return iso8601.parse_date(timestr) - except iso8601.ParseError as e: - raise ValueError(six.text_type(e)) - except TypeError as e: - raise ValueError(six.text_type(e)) - - -def strtime(at=None, fmt=PERFECT_TIME_FORMAT): - """Returns formatted utcnow.""" - if not at: - at = utcnow() - return at.strftime(fmt) - - -def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): - """Turn a formatted time back into a datetime.""" - return datetime.datetime.strptime(timestr, fmt) - - -def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object.""" - offset = timestamp.utcoffset() - if offset is None: - return timestamp - return timestamp.replace(tzinfo=None) - offset - - -def is_older_than(before, seconds): - """Return True if before is older than seconds.""" - if isinstance(before, six.string_types): - before = parse_strtime(before).replace(tzinfo=None) - else: - before = before.replace(tzinfo=None) - - return utcnow() - before > datetime.timedelta(seconds=seconds) - - -def is_newer_than(after, seconds): - """Return True if after is newer than seconds.""" - if isinstance(after, six.string_types): - after = parse_strtime(after).replace(tzinfo=None) - else: - after = after.replace(tzinfo=None) - - return after - utcnow() > datetime.timedelta(seconds=seconds) - - -def utcnow_ts(): - """Timestamp version of our utcnow function.""" - if utcnow.override_time is None: - # NOTE(kgriffs): This is several times faster - # than going through calendar.timegm(...) - return int(time.time()) - - return calendar.timegm(utcnow().timetuple()) - - -def utcnow(): - """Overridable version of utils.utcnow.""" - if utcnow.override_time: - try: - return utcnow.override_time.pop(0) - except AttributeError: - return utcnow.override_time - return datetime.datetime.utcnow() - - -def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formated date from timestamp.""" - return isotime(datetime.datetime.utcfromtimestamp(timestamp)) - - -utcnow.override_time = None - - -def set_time_override(override_time=None): - """Overrides utils.utcnow. - - Make it return a constant time or a list thereof, one at a time. - - :param override_time: datetime instance or list thereof. If not - given, defaults to the current UTC time. - """ - utcnow.override_time = override_time or datetime.datetime.utcnow() - - -def advance_time_delta(timedelta): - """Advance overridden time using a datetime.timedelta.""" - assert(not utcnow.override_time is None) - try: - for dt in utcnow.override_time: - dt += timedelta - except TypeError: - utcnow.override_time += timedelta - - -def advance_time_seconds(seconds): - """Advance overridden time by seconds.""" - advance_time_delta(datetime.timedelta(0, seconds)) - - -def clear_time_override(): - """Remove the overridden time.""" - utcnow.override_time = None - - -def marshall_now(now=None): - """Make an rpc-safe datetime with microseconds. - - Note: tzinfo is stripped, but not required for relative times. - """ - if not now: - now = utcnow() - return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, - minute=now.minute, second=now.second, - microsecond=now.microsecond) - - -def unmarshall_time(tyme): - """Unmarshall a datetime dict.""" - return datetime.datetime(day=tyme['day'], - month=tyme['month'], - year=tyme['year'], - hour=tyme['hour'], - minute=tyme['minute'], - second=tyme['second'], - microsecond=tyme['microsecond']) - - -def delta_seconds(before, after): - """Return the difference between two timing objects. - - Compute the difference in seconds between two date, time, or - datetime objects (as a float, to microsecond resolution). - """ - delta = after - before - return total_seconds(delta) - - -def total_seconds(delta): - """Return the total seconds of datetime.timedelta object. - - Compute total seconds of datetime.timedelta, datetime.timedelta - doesn't have method total_seconds in Python2.6, calculate it manually. - """ - try: - return delta.total_seconds() - except AttributeError: - return ((delta.days * 24 * 3600) + delta.seconds + - float(delta.microseconds) / (10 ** 6)) - - -def is_soon(dt, window): - """Determines if time is going to happen in the next window seconds. - - :param dt: the time - :param window: minimum seconds to remain to consider the time not soon - - :return: True if expiration is within the given duration - """ - soon = (utcnow() + datetime.timedelta(seconds=window)) - return normalize_time(dt) <= soon diff --git a/common/topo/__init__.py b/common/topo/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/topo/sdn_policy_topo_with_multi_project.py b/common/topo/sdn_policy_topo_with_multi_project.py deleted file mode 100644 index 42e5814d5..000000000 --- a/common/topo/sdn_policy_topo_with_multi_project.py +++ /dev/null @@ -1,471 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - - -class sdn_basic_policy_topo_with_3_project (): - - def __init__(self, domain='default-domain'): - print "building dynamic topo" - self.project_list = ['project1', 'project2', 'project3', 'admin'] - # end __init__ - - def build_topo_project1(self, domain='default-domain', project='project1', username='juniper', password='juniper123'): - # - # Topo for project: project1 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1'] - # - # Define network info for each VN: - self.vn_nets = {'vnet1': ['10.1.1.0/24', '11.1.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'project1-ipam'} - # - # Define network policies - self.policy_list = ['policy1', 'policy4'] - self.vn_policy = {'vnet1': ['policy1', 'policy4']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc1': 'vnet1'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy1'] = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'default-domain:project2:vnet2', - 'source_network': 'default-domain:project1:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules[ - 'policy4'] = [{'direction': '<>', 'protocol': 'tcp', 'dest_network': 'default-domain:project3:vnet3', - 'source_network': 'default-domain:project1:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - # Define the security_group and its rules - # Define security_group name - self.sg_list = ['test_sg_p1'] - # - # Define security_group with vm - self.sg_of_vm = {'vmc1': ['test_sg_p1']} - # Define the security group rules - import uuid - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - self.sg_rules = {} - self.sg_rules['test_sg_p1'] = [ - {'direction': '>', - 'protocol': 'any', - 'dst_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_1 - }, {'direction': '>', - 'protocol': 'any', - 'src_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], 'rule_uuid': uuid_2}, ] - return self - - def build_topo_project2(self, domain='default-domain', project='project2', username='project2', password='project123'): - # - # Topo for project: project2 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet2'] - # - # Define network info for each VN: - self.vn_nets = {'vnet2': ['12.1.1.0/24', '13.1.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {} - # - # Define network policies - self.policy_list = ['policy2', 'policy5'] - self.vn_policy = {'vnet2': ['policy2', 'policy5']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc2': 'vnet2'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy2'] = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'default-domain:project1:vnet1', - 'source_network': 'default-domain:project2:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules[ - 'policy5'] = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'default-domain:admin:vnet-admin', - 'source_network': 'default-domain:project2:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - # - # Define the security_group and its rules - # Define security_group name - self.sg_list = ['test_sg_p2'] - # - # Define security_group with vm - self.sg_of_vm = {'vmc2': ['test_sg_p2']} - # - # Define the security_group rules - import uuid - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - self.sg_rules = {} - self.sg_rules['test_sg_p2'] = [ - {'direction': '>', - 'protocol': 'any', - 'dst_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_1 - }, {'direction': '>', - 'protocol': 'any', - 'src_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_2}, ] - - return self - - def build_topo_project3(self, domain='default-domain', project='project3', username='project3', password='project123'): - # - # Topo for project: project3 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet3'] - # - # Define network info for each VN: - self.vn_nets = {'vnet3': ['99.9.9.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet3': 'project3-ipam'} - # - # Define network policies - self.policy_list = ['policy3'] - self.vn_policy = {'vnet3': ['policy3']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc3': 'vnet3'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy3'] = [{'direction': '<>', 'protocol': 'tcp', 'dest_network': 'default-domain:project1:vnet1', - 'source_network': 'default-domain:project3:vnet3', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - # - # Define the security_group and its rules - # Define security_group name - self.sg_list = ['test_sg_p3'] - # - # Define security_group with vm - self.sg_of_vm = {'vmc3': ['test_sg_p3']} - # - # Define the security_group rules - import uuid - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - self.sg_rules = {} - self.sg_rules['test_sg_p3'] = [ - {'direction': '>', - 'protocol': 'any', - 'dst_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_1 - }, {'direction': '>', - 'protocol': 'any', - 'src_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_2 - }, ] - return self - - def build_topo_admin(self, domain='default-domain', project='admin', username=None, password=None): - # - # Topo for project: admin - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet-admin'] - # - # Define network info for each VN: - self.vn_nets = {'vnet-admin': ['33.3.3.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet-admin': 'admin-ipam'} - # - # Define network policies - self.policy_list = ['policy-admin'] - self.vn_policy = {'vnet-admin': ['policy-admin']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc-admin': 'vnet-admin'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy-admin'] = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'default-domain:project2:vnet2', - 'source_network': 'default-domain:admin:vnet-admin', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - # - # Define the security_group and its rules - # Define security_group name - self.sg_list = ['test_sg_admin'] - # - # Define security_group with vm - self.sg_of_vm = {'vmc-admin': ['test_sg_admin']} - # - # Define the security_group rules - import uuid - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - self.sg_rules = {} - self.sg_rules['test_sg_admin'] = [ - {'direction': '>', - 'protocol': 'any', - 'dst_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_1 - }, {'direction': '>', - 'protocol': 'any', - 'src_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_2 - }, ] - return self - # end sdn_basic_policy_topo_with_3_project - - -class sdn_basic_policy_topo_with_fip (): - - def __init__(self, domain='default-domain', compute_node_list=None): - print "building dynamic topo" - self.project_list = ['project1', 'project2', 'project3', 'admin'] - - # Define the vm to compute node mapping to pin a vm to a particular - # compute node or else leave empty. - #self.vm_node_map = {} - self.vm_node_map = {'vmc1': 'CN0', 'vmc2': - 'CN1', 'vmc3': 'CN0', 'vmc-admin': 'CN1'} - - # Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - # end __init__ - - def build_topo_project1(self, domain='default-domain', project='project1', username='juniper', password='juniper123'): - # - # Topo for project: project1 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1'] - # - # Define network info for each VN: - self.vn_nets = {'vnet1': ['10.1.1.0/24', '11.1.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'project1-ipam'} - # - # Define network policies - self.policy_list = ['policy1', 'policy4'] - self.vn_policy = {'vnet1': ['policy1', 'policy4']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc1': 'vnet1'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy1'] = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'default-domain:project2:vnet2', - 'source_network': 'default-domain:project1:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules[ - 'policy4'] = [{'direction': '<>', 'protocol': 'tcp', 'dest_network': 'default-domain:project3:vnet3', - 'source_network': 'default-domain:project1:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - return self - - def build_topo_project2(self, domain='default-domain', project='project2', username='project2', password='project123'): - # - # Topo for project: project2 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet2'] - # - # Define network info for each VN: - self.vn_nets = {'vnet2': ['12.1.1.0/24', '13.1.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {} - # - # Define network policies - self.policy_list = ['policy2', 'policy5'] - self.vn_policy = {'vnet2': ['policy2', 'policy5']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc2': 'vnet2'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy2'] = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'default-domain:project1:vnet1', - 'source_network': 'default-domain:project2:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules[ - 'policy5'] = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'default-domain:admin:vnet-admin', - 'source_network': 'default-domain:project2:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - return self - - def build_topo_project3(self, domain='default-domain', project='project3', username='project3', password='project123'): - # - # Topo for project: project3 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet3'] - # - # Define network info for each VN: - self.vn_nets = {'vnet3': ['99.9.9.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet3': 'project3-ipam'} - # - # Define network policies - self.policy_list = ['policy3'] - self.vn_policy = {'vnet3': ['policy3']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc3': 'vnet3'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy3'] = [{'direction': '<>', 'protocol': 'tcp', 'dest_network': 'default-domain:project1:vnet1', - 'source_network': 'default-domain:project3:vnet3', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - return self - - def build_topo_admin(self, domain='default-domain', project='admin', username=None, password=None): - # - # Topo for project: admin - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet-admin'] - # - # Define network info for each VN: - self.vn_nets = {'vnet-admin': ['33.3.3.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet-admin': 'admin-ipam'} - # - # Define network policies - self.policy_list = ['policy-admin'] - self.vn_policy = {'vnet-admin': ['policy-admin']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc-admin': 'vnet-admin'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy-admin'] = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'default-domain:project2:vnet2', - 'source_network': 'default-domain:admin:vnet-admin', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - # - # Define public VN - self.public_vn = 'public-vn' - return self - # end sdn_basic_policy_topo_with_fip - -if __name__ == '__main__': - print "Currently topology limited to one domain/project.." - print "Based on need, can be extended to cover config for multiple domain/projects" - print "Running unit test for this module ..." - my_topo = sdn_basic_policy_topo_with_3_project(domain='default-domain') - x = my_topo.__dict__ - print "\nprinting keys of topology dict:" - for key, value in x.iteritems(): - print key - print - # print "keys & values:" - # for key, value in x.iteritems(): print key, "-->", value - # Use topology_helper to extend/derive data from user-defined topology to help verifications. - # ex. get list of all vm's from topology; get list of vn's associated to a - # policy - import topo_helper - topo_h = topo_helper.topology_helper(my_topo) - #vmc_list= topo_h.get_vmc_list() - policy_vn = topo_h.get_policy_vn() - print "printing derived topo data - vn's associated to a policy: \n", policy_vn -# diff --git a/common/tor/__init__.py b/common/tor/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/common/tor/base.py b/common/tor/base.py deleted file mode 100644 index 4d4381837..000000000 --- a/common/tor/base.py +++ /dev/null @@ -1,365 +0,0 @@ -import time -from netaddr import * - -from common.neutron.base import BaseNeutronTest - -from pif_fixture import PhysicalInterfaceFixture -from lif_fixture import LogicalInterfaceFixture -from physical_router_fixture import PhysicalRouterFixture -from host_endpoint import HostEndpointFixture -from tor_fixture import ToRFixtureFactory -import test -from tcutils.tcpdump_utils import search_in_pcap, delete_pcap -from vm_test import VMFixture - - -class BaseTorTest(BaseNeutronTest): - - @classmethod - def setUpClass(cls): - super(BaseTorTest, cls).setUpClass() - cls.vnc_api_h = cls.vnc_lib - cls.vnc_lib_fixture = cls.connections.vnc_lib_fixture - # end setUpClass - - @classmethod - def tearDownClass(cls): - super(BaseTorTest, cls).tearDownClass() - # end tearDownClass - - def get_available_devices(self, device_type): - ''' device_type is one of router/tor - ''' - available = [] - for (device, device_dict) in self.inputs.physical_routers_data.iteritems(): - if device_dict['type'] == device_type : - available.append(device_dict) - return available - # end get_available_devices - - def get_available_endpoints(self, device_ip): - endpoints = [] - for (ip, ep_list) in self.inputs.tor_hosts_data.iteritems(): - if device_ip == ip: - return ep_list - return endpoints - # end get_available_endpoints - - def setup_routers(self, count=1): - ''' Returns a list of physical router fixtures - ''' - router_objs = [] - routers_info_list = self.get_available_devices('router') - assert len(routers_info_list) >= count, ( - 'Not enough devices available! Expected %s, Got %s' % ( - count, len(routers_info_list))) - for i in range(0,count): - router_params = routers_info_list[i] - phy_router_fixture = self.useFixture(PhysicalRouterFixture( - router_params['name'], router_params['mgmt_ip'], - model=router_params['model'], - vendor=router_params['vendor'], - asn=router_params['asn'], - ssh_username=router_params['ssh_username'], - ssh_password=router_params['ssh_password'], - mgmt_ip=router_params['mgmt_ip'], - tunnel_ip=router_params['tunnel_ip'], - ports=router_params['ports'], - connections=self.connections, - logger=self.logger)) - router_objs.append(phy_router_fixture) - return router_objs - # end setup_routers - - def setup_tors(self, count=1): - tor_objs = [] - tors_info_list = self.get_available_devices('tor') - assert len(tors_info_list) >= count, ( - 'Not enough devices available! Expected %s, Got %s' % ( - count, len(tors_info_list))) - for i in range(0, count): - tor_params = tors_info_list[i] - tor_fixture = self.useFixture(ToRFixtureFactory.get_tor( - tor_params['name'], - tor_params['mgmt_ip'], - vendor=tor_params['vendor'], - ssh_username=tor_params['ssh_username'], - ssh_password=tor_params['ssh_password'], - tunnel_ip=tor_params['tunnel_ip'], - ports=tor_params['ports'], - tor_ovs_port=tor_params['tor_ovs_port'], - tor_ovs_protocol=tor_params['tor_ovs_protocol'], - controller_ip=tor_params['controller_ip'], - connections=self.connections, - logger=self.logger)) - tor_objs.append(tor_fixture) - return tor_objs - # end setup_tors - - def setup_vmis(self, vn_id, fixed_ips=[], - mac_address=None, - security_groups=[], - extra_dhcp_opts=[], count=1): - vmis=[] - if mac_address: - mac_address = EUI(mac_address) - mac_address.dialect = mac_unix - real_fixed_ips = fixed_ips - for i in range(0,count): - if fixed_ips: - ip_address = fixed_ips[0]['ip_address'] - real_fixed_ips[0]['ip_address'] = str(IPAddress( - real_fixed_ips[0]['ip_address']) + i) - vmi = self.setup_vmi(vn_id, real_fixed_ips, - mac_address, - security_groups, - extra_dhcp_opts) - vmis.append(vmi) - if mac_address: - mac_address = EUI(mac_address.value + 1) - mac_address.dialect = mac_unix - return vmis - # end setup_vmis - - def setup_tor_port(self, tor_fixture, port_index=0, vlan_id=0, vmi_objs=[], - cleanup=True): - device_id = tor_fixture.phy_device.uuid - tor_ip = tor_fixture.mgmt_ip - pif_name = self.inputs.tor_hosts_data[tor_ip][port_index]['tor_port'] - lif_name = pif_name + '.' + str(vlan_id) - pif_fixture = PhysicalInterfaceFixture(pif_name, - device_id=device_id, - connections=self.connections) - pif_fixture.setUp() - if cleanup: - self.addCleanup(pif_fixture.cleanUp) - - lif_fixture = LogicalInterfaceFixture( - lif_name, - pif_id=pif_fixture.uuid, - vlan_id=vlan_id, - vmi_ids=[x.uuid for x in vmi_objs], - connections=self.connections) - lif_fixture.setUp() - if cleanup: - self.addCleanup(lif_fixture.cleanUp) - return (pif_fixture, lif_fixture) - # end setup_tor_port - - def setup_bms(self, tor_fixture, port_index=0, namespace='ns1', - ns_intf='tap1', ns_mac_address=None, - ns_ip_address=None, - ns_netmask=None, - ns_gateway=None, - vlan_id=None, - verify=True, - cleanup=True): - '''Setups up a bms using HostEndpointFixture - - tor_ip : tor mgmt IP - port_index : index of the port in tor_hosts dict of - the ToR - namespace : name of the netns instance - ns_intf : Interface name on the netns instance - ns_mac_address : MAC address of ns_intf on netns instance - ns_ip_address : IP Address of ns_intf - ns_gateway : Gateway IP to be assigned to netns - vlan_id : Vlan id to be assigned to ns_intf, default is - untagged - verify : If True, does dhclient on the netns intf and - verifies if it has got the expected IP - ''' - tor_ip = tor_fixture.mgmt_ip - tor_name = tor_fixture.name - host_info = self.inputs.tor_hosts_data[tor_ip][port_index] - self.logger.info('Creating a BMS host on TOR %s , port %s' % ( - tor_ip, host_info['tor_port'])) - bms_obj = HostEndpointFixture( - host_ip=host_info['mgmt_ip'], - namespace=namespace, - interface=host_info['host_port'], - username=host_info['username'] or 'root', - password=host_info['password'] or 'c0ntrail123', - ns_intf=ns_intf, - ns_mac_address=ns_mac_address, - ns_ip_address=ns_ip_address, - ns_netmask=ns_netmask, - ns_gateway=ns_gateway, - connections=self.connections, - vlan_id=vlan_id, - tor_name=tor_name) - bms_obj.setUp() - if cleanup: - self.addCleanup(bms_obj.cleanUp) - if verify: - retval,output = bms_obj.run_dhclient() - assert retval, "BMS %s did not seem to have got an IP" % ( - bms_obj.name) - if ns_ip_address: - self.validate_interface_ip(bms_obj, ns_ip_address) - return bms_obj - # end setup_bms - - def create_vn(self, vn_name=None, vn_subnets=None, disable_dns=False, - vxlan_id=None, enable_dhcp=True, **kwargs): - vn_fixture = super(BaseTorTest, self).create_vn(vn_name, vn_subnets, - vxlan_id, enable_dhcp, **kwargs) - if disable_dns: - dns_dict = {'dns_nameservers': ['0.0.0.0']} - for vn_subnet_obj in vn_fixture.vn_subnet_objs: - vn_fixture.update_subnet(vn_subnet_obj['id'], dns_dict) - return vn_fixture - # end create_vn - - def validate_interface_ip(self, bms_fixture, expected_ip): - assert expected_ip == bms_fixture.info['inet_addr'],\ - 'BMS IP not expected : Seen:%s, Expected:%s' % ( - bms_fixture.info['inet_addr'], expected_ip) - # end validate_interface_ip - - def set_configured_vxlan_mode(self): - self.vnc_lib_fixture.set_vxlan_mode('configured') - self.addCleanup(self.vnc_lib_fixture.set_vxlan_mode, 'automatic') - - def restart_openvwitches(self, tor_fixtures): - '''In some scenarios,(Ex: Vxlan id change), it is required - that one needs to restart the openvswitch processes ourselves - This is unlike QFX where a change is taken care of by itself. - ''' - for tor_fixture in tor_fixtures: - if tor_fixture.vendor == 'openvswitch': - tor_fixture.restart_ovs() - # end restart_openvwitches - - def clear_arps(self, bms_fixtures): - for bms_fixture in bms_fixtures: - bms_fixture.clear_arp(all_entries=True) - # end clear_arps - - def set_global_asn(self, asn): - existing_asn = self.vnc_lib_fixture.get_global_asn() - ret = self.vnc_lib_fixture.set_global_asn(asn) - self.addCleanup(self.vnc_lib_fixture.set_global_asn, existing_asn) - return ret - # end set_global_asn - - def add_vmi_to_lif(self, lif_fixture, vmi_uuid): - lif_fixture.add_virtual_machine_interface(vmi_uuid) - self.addCleanup(lif_fixture.delete_virtual_machine_interface, vmi_uuid) - # end add_vmi_to_lif - - - def validate_arp(self, bms_fixture, ip_address=None, - mac_address=None, expected_mac=None, - expected_ip=None): - ''' Method to validate IP/MAC - Given a IP and expected MAC of the IP, - or given a MAC and expected IP, this method validates it - against the arp table in the BMS and returns True/False - ''' - (ip, mac) = bms_fixture.get_arp_entry(ip_address=ip_address, - mac_address=mac_address) - search_term = ip_address or mac_address - if expected_mac : - assert expected_mac == mac, ( - 'Arp entry mismatch for %s, Expected : %s, Got : %s' % ( - search_term, expected_mac, mac)) - if expected_ip : - assert expected_ip == ip, ( - 'Arp entry mismatch for %s, Expected : %s, Got : %s' % ( - search_term, expected_ip, ip)) - self.logger.info('BMS %s:ARP check using %s : Got (%s, %s)' % ( - bms_fixture.identifier, search_term, ip, mac)) - # end validate_arp - - def validate_bms_gw_mac(self, bms_fixture, physical_router_fixture): - ''' - Validate that the Gw MAC of the BMS is the irb MAC of the physical - router - ''' - bms_gw_mac = bms_fixture.get_gateway_mac() - bms_gw_ip = bms_fixture.get_gateway_ip() - router_irb_mac = physical_router_fixture.get_virtual_gateway_mac( - bms_gw_ip) - assert bms_gw_mac == router_irb_mac, ( - "BMS Gateway MAC mismatch! Expected: %s, Got: %s" % ( - bms_gw_mac, router_irb_mac)) - self.logger.info('Validated on BMS %s that MAC of gateway is ' - 'same as routers irb MAC : %s' % (bms_fixture.identifier, - router_irb_mac)) - # end validate_bms_gw_mac - - def get_mgmt_ip_of_node(self, ip): - return self.inputs.host_data[ip]['ip'] - - def validate_arp_forwarding(self, source_fixture, - ip, dest_fixture, source_interface=None): - ''' - Validate that arp packet from a VM/BMS destined to 'ip' - is seen on the destination VM/BMS - Returns True in such a case, else False - ''' - (session, pcap) = dest_fixture.start_tcpdump(filters='arp -v') - source_fixture.arping(ip, source_interface) - time.sleep(5) - dest_fixture.stop_tcpdump(session, pcap) - if isinstance(source_fixture, HostEndpointFixture): - source_ip = source_fixture.info['inet_addr'] - elif isinstance(source_fixture, VMFixture): - source_ip = source_fixture.vm_ips[0] - - if isinstance(dest_fixture, HostEndpointFixture): - dest_name = dest_fixture.identifier - elif isinstance(dest_fixture, VMFixture): - dest_name = dest_fixture.vm_name - - result = search_in_pcap(session, pcap, 'Request who-has %s tell %s' % ( - ip, source_ip)) - if result : - message = 'ARP request from %s to %s is seen on %s' % ( - source_ip, ip, dest_name) - else: - message = 'ARP request from %s to %s is NOT seen on %s' % ( - source_ip, ip, dest_name) - - self.logger.info(message) - delete_pcap(session, pcap) - return (result, message) - # end validate_arp_forwarding - - def validate_dhcp_forwarding(self, source_fixture, - dest_fixture, source_interface=None): - ''' - Validate that dhcp discover packet from a VM/BMS - is seen on the destination VM/BMS - Returns True in such a case, else False - ''' - (session, pcap) = dest_fixture.start_tcpdump(filters='udp port 68 -v') - source_fixture.run_dhclient(source_interface, timeout=20) - time.sleep(5) - dest_fixture.stop_tcpdump(session, pcap) - if isinstance(source_fixture, HostEndpointFixture): - source_mac = source_fixture.info['hwaddr'] - source_name = source_fixture.identifier - elif isinstance(source_fixture, VMFixture): - source_mac = source_fixture.get_vm_interface_name(source_interface) - source_name = source_fixture.vm_name - - if isinstance(dest_fixture, HostEndpointFixture): - dest_name = dest_fixture.identifier - elif isinstance(dest_fixture, VMFixture): - dest_name = dest_fixture.vm_name - - result = search_in_pcap(session, pcap, 'BOOTP/DHCP, Request from %s' % ( - source_mac)) - if result : - message = 'DHCP discover/request from %s, MAC %s is seen '\ - 'on %s' % (source_name, source_mac, dest_name) - else: - message = 'DHCP discover/request from %s, MAC %s is NOT '\ - 'seen on %s' % (source_name, source_mac, dest_name) - - self.logger.info(message) - delete_pcap(session, pcap) - return (result, message) - # end validate_dhcp_forwarding diff --git a/common/vcenter_libs.py b/common/vcenter_libs.py deleted file mode 100644 index 55fc719f6..000000000 --- a/common/vcenter_libs.py +++ /dev/null @@ -1,120 +0,0 @@ -import time -import atexit -import requests -try: - from pyVim import connect - from pyVmomi import vim - - _vimtype_dict = { - 'dc' : vim.Datacenter, - 'cluster' : vim.ClusterComputeResource, - 'vm' : vim.VirtualMachine, - 'host' : vim.HostSystem, - 'host.NasSpec' : vim.host.NasVolume.Specification, - 'network' : vim.Network, - 'ds' : vim.Datastore, - 'dvs.PortGroup' : vim.dvs.DistributedVirtualPortgroup, - 'dvs.VSwitch' : vim.dvs.VmwareDistributedVirtualSwitch, - 'dvs.PVLan' : vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec, - 'dvs.PortConfig' : vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy, - 'dvs.ConfigSpec' : vim.dvs.DistributedVirtualPortgroup.ConfigSpec, - 'dvs.PortConn' : vim.dvs.PortConnection, - 'dvs.Blob' : vim.dvs.KeyedOpaqueBlob, - 'ip.Config' : vim.vApp.IpPool.IpPoolConfigInfo, - 'ip.Association' : vim.vApp.IpPool.Association, - 'ip.Pool' : vim.vApp.IpPool, - 'dev.E1000' : vim.vm.device.VirtualE1000, - 'dev.VDSpec' : vim.vm.device.VirtualDeviceSpec, - 'dev.VD' : vim.vm.device.VirtualDevice, - 'dev.ConnectInfo' : vim.vm.device.VirtualDevice.ConnectInfo, - 'dev.DVPBackingInfo' : vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo, - 'dev.Ops.add' : vim.vm.device.VirtualDeviceSpec.Operation.add, - 'dev.Ops.remove' : vim.vm.device.VirtualDeviceSpec.Operation.remove, - 'dev.Ops.edit' : vim.vm.device.VirtualDeviceSpec.Operation.edit, - 'vm.Config' : vim.vm.ConfigSpec, - 'vm.Reloc' : vim.vm.RelocateSpec, - 'vm.Clone' : vim.vm.CloneSpec, - 'vm.PassAuth' : vim.vm.guest.NamePasswordAuthentication, - 'vm.Prog' : vim.vm.guest.ProcessManager.ProgramSpec, - } - -except: - _vimtype_dict = {} - connect = None - vim = None - _vimtype_dict = None - -def _vim_obj(typestr, **kwargs): - return _vimtype_dict[typestr](**kwargs) - -def _wait_for_task (task): - while (task.info.state == vim.TaskInfo.State.running or - task.info.state == vim.TaskInfo.State.queued): - time.sleep(2) - if task.info.state != vim.TaskInfo.State.success: - if task.info.state == vim.TaskInfo.State.error: - raise ValueError(task.info.error.localizedMessage) - raise ValueError("wait_for_task failed:%s" % task.info) - return - -def _match_obj(obj, param): - attr = param.keys()[0] - attrs = [attr] - if '.' in attr: - attrs = attr.split('.') - for i in range(len(attrs) - 1): - if not hasattr(obj, attrs[i]): - break - obj = getattr(obj, attrs[i]) - attr = attrs[-1] - return hasattr(obj, attr) and getattr(obj, attr) == param.values()[0] - -def get_vcenter_connection(inputs): - SI = None - try: - SI = connect.SmartConnect(host=inputs.vcenter_server, - user=inputs.vcenter_username, - pwd=inputs.vcenter_password, - port=int(inputs.vcenter_port)) - atexit.register(connect.Disconnect, SI) - content = SI.RetrieveContent() - return SI - except IOError, ex: - pass - -def get_vm_info_by_uuid(inputs,uuid): - - try: - SI=get_vcenter_connection(inputs) - VM = SI.content.searchIndex.FindByUuid(None, uuid, - True, - True) - return VM - except IOError,ex: - pass - -def get_esxi_host_of_vm_by_uuid(inputs,uuid): - VM = get_vm_info_by_uuid(inputs,uuid) - return VM.runtime.host.name - -def get_contrail_vm_by_vm_uuid(inputs,uuid): - esxi_host = get_esxi_host_of_vm_by_uuid(inputs,uuid) - for esxi in inputs.esxi_vm_ips: - if esxi_host == esxi['ip']: - contrail_vm = esxi['contrail_vm'] - ip = contrail_vm.split('@')[1] - return inputs.host_data[ip]['name'] - -class Inputs: - def __init__(self): - self.vcenter_server='10.204.217.189' - self.vcenter_username='administrator@vsphere.local' - self.vcenter_password='Contrail123!' - self.vcenter_port='443' - -def main(): - inputs=Inputs() - print get_contrail_vm_by_vm_uuid(inputs,'9175dc3b-5ff5-45ca-a836-05dc986ef19d') - -if __name__ == "__main__": - main() diff --git a/configs/__init__.py b/configs/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/configs/flavors.cfg b/configs/flavors.cfg deleted file mode 100644 index 55081298f..000000000 --- a/configs/flavors.cfg +++ /dev/null @@ -1,40 +0,0 @@ -############################################################################## -# Descibe flavor details and where its available -# This config file will be used by test scripts to add images through -# glance commands -# -# [flavor name] # Generic name of the flavor -# vcpus=1 -# ram=512 -# disk=5 # Disk in GB -# -# CAUTION: ANY CHANGES TO DEFAULT APPLIES TO ALL SECTIONS -# IF NOT OVERWRITTEN LOCALLY -############################################################################## - -[DEFAULT] -vcpus = 1 -ram = 512 -disk = 5 - -[contrail_flavor_tiny] -ram = 512 -disk = 1 - -[contrail_flavor_small] -ram = 1024 -disk = 10 - -[contrail_flavor_medium] -ram = 1024 -disk = 20 - -[contrail_flavor_large] -vcpus = 1 -ram = 1024 -disk = 40 - -[contrail_flavor_2cpu] -vcpus = 2 -ram = 2048 -disk = 40 diff --git a/configs/images.cfg b/configs/images.cfg deleted file mode 100644 index 668fac03c..000000000 --- a/configs/images.cfg +++ /dev/null @@ -1,168 +0,0 @@ -############################################################################## -# Descibe image details and where its available -# This config file will be used by test scripts to add images through -# glance commands -# -# [image name] # Generic name of the image -# name = # Image name as in the webserver -# location = # Specify location of the image in the web server -# webserver = # Host name or IP address of webserver -# username = # User name considered for the image -# password = # Password considered for the image -# flavor = # Default flavor for the image -# params = # Parameter to glance while creating the image -# -# CAUTION: ANY CHANGES TO DEFAULT APPLIES TO ALL SECTIONS -# IF NOT OVERWRITTEN LOCALLY -############################################################################## - -[DEFAULT] -name = -webserver = -location = /images/converts/ -username = -password = -type = os -flavor = contrail_flavor_small -vcpath = /images/vcenter/ -params = --container-format bare --disk-format vmdk --property vmware_disktype="sparse" --property vmware_adaptertype="ide" -name_docker = phusion-baseimage-enablesshd -#params = --container-format ovf --disk-format qcow2 --property hypervisor_type=qemu - -[cirros-0.3.0-x86_64-uec] -name = cirros-0.3.0-x86_64-disk.vmdk.gz -username = cirros -password = cubswin:) -flavor = m1.tiny - -[redmine-fe] -name = turnkey-redmine-12.0-squeeze-x86.vmdk.gz -location = /images/ -username = root -password = c0ntrail123 -params = --container-format ovf --disk-format vmdk - -[redmine-be] -name = turnkey-redmine-12.0-squeeze-x86-mysql.vmdk.gz -location = /images/ -username = root -password = c0ntrail123 -params = --container-format ovf --disk-format vmdk - -[redmine-dhcp-server] -name = redmine-isc-dhcp-server.vmdk.gz -username = root -password = c0ntrail123 - -[ubuntu-dhcp-server] -name = ubuntu-dhcp-server.vmdk.gz -username = ubuntu -password = ubuntu - -[ubuntu-dns-server] -name = ubuntu-dns-server.vmdk.gz -username = ubuntu -password = ubuntu - -[ubuntu-dhcpdns-server] -name = ubuntu-dhcpdns-server.vmdk.gz -username = ubuntu -password = ubuntu - -[ubuntu-with-vlan8021q] -name = ubuntu-with-vlan8021q.vmdk.gz -username = ubuntu -password = ubuntu - -[ubuntu] -name = ubuntu.vmdk.gz -username = ubuntu -password = ubuntu -flavor = contrail_flavor_tiny -vctmpl = ubuntu.vmtx -vcname = ubuntu-disk1.vmdk - -[zeroshell] -name = ZeroShell-qemu-bridge.vmdk.gz - -[vsrx-bridge] -name = junos-vsrx-12.1-transparent.img.gz -location = /images/vsrx/ -params = --container-format ovf --disk-format qcow2 --property hypervisor_type=qemu - -[vsrx] -name = junos-vsrx-12.1-in-network.img.gz -location = /images/vsrx/ -username = root -password = c0ntrail123 -params = --container-format ovf --disk-format qcow2 --property hypervisor_type=qemu - -[vsrx-fw] -name = junos-vsrx-12.1-in-network-fw.img.gz -location = /images/vsrx/ -username = root -password = c0ntrail123 -params = --container-format ovf --disk-format qcow2 --property hypervisor_type=qemu - -[tiny_nat_fw] -name = tinycore-in-network-nat.qcow2.gz -location = /images/tinycore/ -params = --container-format bare --disk-format vmdk --property vmware_disktype="sparse" --property vmware_adaptertype="ide" --property hypervisor_type=qemu - -[tiny_trans_fw] -name = tinycore-net-transparent-fw.qcow2.gz -location = /images/tinycore/ -params = --container-format bare --disk-format vmdk --property vmware_disktype="sparse" --property vmware_adaptertype="ide" --property hypervisor_type=qemu - -[nat-service] -name = junos-vsrx-12.1-in-network.img.gz -location = /images/vsrx/ -username = root -password = c0ntrail123 -params = --container-format ovf --disk-format qcow2 --property hypervisor_type=qemu - -[ubuntu-traffic] -name = ubuntu-traffic.vmdk.gz -username = ubuntu -password = ubuntu -vctmpl = ubuntu-traffic.vmtx -vcname = ubuntu-traffic-disk1.vmdk -name_docker = ubuntu-traffic-docker - -[ubuntu-in-net] -name = ubuntu-in-net.vmdk.gz -username = ubuntu -password = ubuntu - -[redmine-web-traffic] -name = redmine-web-traffic.vmdk.gz -location = /images/traffic/ -params = --container-format ovf --disk-format vmdk - -[redmine-db-traffic] -name = redmine-db-traffic.vmdk.gz -location = /images/traffic/ -params = --container-format ovf --disk-format vmdk - -[analyzer] -name = analyzer-vm-console.qcow2.gz -location = /images/analyzer/ -params = --container-format ovf --disk-format qcow2 --property hypervisor_type=qemu - -[phusion-baseimage-enablesshd] -type = docker -name = phusion-baseimage-enablesshd.tar.gz -username = root -password = c0ntrail123 -location = /images/docker/ -params = --container-format docker --disk-format raw - -[ubuntu-traffic-docker] -type = docker -name = ubuntu-traffic-docker.tar.gz -username = ubuntu -password = ubuntu -location = /images/docker/ -params = --container-format docker --disk-format raw -name_docker = ubuntu-traffic-docker - diff --git a/fixtures/ceilometer/__init__.py b/fixtures/ceilometer/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/fixtures/ceilometer/ceilometer_client.py b/fixtures/ceilometer/ceilometer_client.py deleted file mode 100755 index 3fd3c23e9..000000000 --- a/fixtures/ceilometer/ceilometer_client.py +++ /dev/null @@ -1,254 +0,0 @@ -import os -from common.openstack_libs import ks_auth_identity_v2 as v2 -from common.openstack_libs import ks_session as session -from common.openstack_libs import ceilo_client as client -from common.structure import DynamicArgs - -VERSION = 2 - -def make_query(user_id=None, tenant_id=None, resource_id=None, - user_ids=None, tenant_ids=None, resource_ids=None): - """Returns query built from given parameters. - This query can be then used for querying resources, meters and - statistics. - :Parameters: - - `user_id`: user_id, has a priority over list of ids - - `tenant_id`: tenant_id, has a priority over list of ids - - `resource_id`: resource_id, has a priority over list of ids - - `user_ids`: list of user_ids - - `tenant_ids`: list of tenant_ids - - `resource_ids`: list of resource_ids - """ - user_ids = user_ids or [] - tenant_ids = tenant_ids or [] - resource_ids = resource_ids or [] - - query = [] - if user_id: - user_ids = [user_id] - for u_id in user_ids: - query.append({"field": "user_id", "op": "eq", "value": u_id}) - - if tenant_id: - tenant_ids = [tenant_id] - for t_id in tenant_ids: - query.append({"field": "project_id", "op": "eq", "value": t_id}) - - if resource_id: - resource_ids = [resource_id] - for r_id in resource_ids: - query.append({"field": "resource_id", "op": "eq", "value": r_id}) - - return query - - -class AuthToken(DynamicArgs): - """Returns auth_token - :Parameters: - - `username`: user_id - - `tenant_id`: tenant_id - - `password`: password - - `auth_url`: auth url - """ - - _fields = ['auth_url', 'username', 'password', 'tenant_id', 'insecure'] - - def get_token(self): - '''Return auth token''' - auth=v2.Password(auth_url=self.auth_url - ,username=self.username, - password=self.password, - tenant_id=self.tenant_id) - - sess = session.Session(auth=auth,verify=False) - self.token = auth.get_token(sess) - return self.token - -class CeilometerClient(DynamicArgs): - """Returns ceilometer clent - :Parameters: - - `username`: user_id - - `tenant_id`: tenant_id - - `password`: password - - `auth_url`: auth url - - `ceilometer_url`: ceilometer url - """ - _fields = ['auth_url', 'username', 'password', 'tenant_name', - 'ceilometer_url'] - - def get_cclient(self): - - #TO DO - working with auth token - #auth_client = AuthToken(self.auth_url, - # username = self.username, - # password = self.password, - # tenant_id = self.tenant_name) - #token = auth_client.get_token() - self.cclient = client.get_client(VERSION, os_username = self.username, - os_password = self.password, - os_auth_url = self.auth_url, - os_tenant_name = self.tenant_name, - insecure = True) - return self.cclient - -class Meter: - """Represents one Ceilometer meter.""" - - def __init__(self, meter): - self.meter = meter - - @property - def user_id(self): - return self.meter.user_id - - @property - def name(self): - return self.meter.name - - @property - def resource_id(self): - return self.meter.resource_id - - @property - def source(self): - return self.meter.source - - @property - def meter_id(self): - return self.meter.meter_id - - @property - def project_id(self): - return self.meter.project_id - - @property - def type(self): - return self.meter.type - - @property - def unit(self): - return self.meter.unit - -class Resource: - - """Represents one Ceilometer resource.""" - - def __init__(self, resource,ceilometer_usage=None): - self.resource = resource - - @property - def user_id(self): - return self.resource.user_id - - @property - def project_id(self): - return self.resource.project_id - - @property - def resource_id(self): - return self.resource.resource_id - - @property - def source(self): - return self.resource.source - -class Sample: - """Represents one Ceilometer sample.""" - - def __init__(self,sample): - self.sample = sample - - @property - def user_id(self): - return self.sample.user_id - - @property - def project_id(self): - return self.sample.project_id - - @property - def resource_id(self): - return self.sample.resource_id - - @property - def counter_unit(self): - return self.sample.counter_unit - - @property - def resource_metadata(self): - return self.sample.resource_metadata - - @property - def counter_volume(self): - return self.sample.counter_volume - - @property - def counter_name(self): - return self.sample.counter_name - - @property - def counter_type(self): - return self.sample.counter_type - -class Statistic: - """Represents one Ceilometer statistic.""" - def __init__(self,stat): - self.stat = stat - -def resource_list(cclient, query=None, ceilometer_usage_object=None): - """List the resources.""" - resources = cclient.resources.list(q=query) - return [Resource(r, ceilometer_usage_object) for r in resources] - - -def sample_list(cclient, meter_name, query=None, limit=None): - """List the samples for this meters.""" - samples = cclient.samples.list(meter_name=meter_name, - q=query, limit=limit) - return [Sample(s) for s in samples] - - -def meter_list(cclient, query=None): - """List the user's meters.""" - meters = cclient.meters.list(query) - return [Meter(m) for m in meters] - - -def statistic_list(cclient, meter_name, query=None, period=None): - """List of statistics.""" - statistics = cclient.\ - statistics.list(meter_name=meter_name, q=query, period=period) - return [Statistic(s) for s in statistics] - - -def main(): - auth_url = os.getenv('OS_AUTH_URL') or \ - 'http://10.204.216.7:5000/v2.0' - username = os.getenv('OS_USERNAME') or \ - 'admin' - password = os.getenv('OS_PASSWORD') or \ - 'contrail123' - tenant_name = os.getenv('OS_TENANT_NAME') or \ - 'admin' - c_url = os.getenv('OS_TELEMETRY_URL') or \ - 'http://10.204.216.7:8777/' - cclient = CeilometerClient(auth_url, username, - password, - tenant_name, - c_url,insecure = True) - cclient = cclient.get_cclient() - #q = make_query(user_id='ffe9ce8cac3a4d3088bff11d34f7c09b', tenant_id='a4faf2a1d086459b89ec1b776ddf42dd') - #q = make_query(tenant_id='a4faf2a1d086459b89ec1b776ddf42dd') - q = make_query(tenant_id='3c07b22cfabb4ba8b9387749250e3ed8') - #abc = statistic_list(cclient,'cpu_util',query=q,period = '5') - #abc = statistic_list(cclient,'cpu_util',query=q) - #abc = statistic_list(cclient,'cpu_util') - #abc = statistic_list(cclient,'ip.floating.receive.packets',period='5') - #abc = statistic_list(cclient,'ip.floating.receive.bytes') - abc = resource_list(cclient,query=q) - #abc = sample_list(cclient,'ip.floating') - print q - print abc - -if __name__ == "__main__": - main() diff --git a/fixtures/compute_node_test.py b/fixtures/compute_node_test.py deleted file mode 100644 index 422f3c149..000000000 --- a/fixtures/compute_node_test.py +++ /dev/null @@ -1,341 +0,0 @@ -import fixtures -from tcutils.commands import execute_cmd -from tcutils.util import retry -from fabric.api import run, local -from fabric.operations import put, get -from fabric.context_managers import settings -import ConfigParser -from datetime import datetime -import re -import time -import tempfile - - -class ComputeNodeFixture(fixtures.Fixture): - - """ Fixture to configure, verify agent in compute node... - Also cover vrouter related operations in the node. - """ - - def __init__( - self, - connections, - node_ip, - username='root', - password='c0ntrail123'): - self.agent_conf_file = '/etc/contrail/contrail-vrouter-agent.conf' - self.connections = connections - self.inputs = self.connections.inputs - self.logger = self.inputs.logger - self.already_present = False - self.ip = node_ip - for name, ip in self.inputs.compute_info.iteritems(): - if ip == self.ip: - self.name = name - break - self.new_agent_conf_file = tempfile.NamedTemporaryFile( - mode='w+t', - prefix=self.name) - self.recd_agent_conf = tempfile.NamedTemporaryFile( - prefix=self.name+'-recd-') - self.recd_agent_conf_file = self.recd_agent_conf.name - self.username = username - self.password = password - # set agent params to defaults - self.default_values = {} - self.default_values['DEFAULT'] = { - 'flow_cache_timeout': 180, - 'headless_mode': 'false'} - self.default_values['FLOWS'] = {'max_vm_flows': 100} - self.max_system_flows = 512000 - # end __init__ - - def setUp(self): - super(ComputeNodeFixture, self).setUp() - # end setUp - - def cleanUp(self): - super(ComputeNodeFixture, self).cleanUp() - # end cleanUp - - def get_agent_conf_file(self): - self.file_transfer( - "get", - self.agent_conf_file, - self.recd_agent_conf_file) - - def put_agent_conf_file(self): - self.file_transfer( - "put", - self.new_agent_conf_file.name, - self.agent_conf_file) - - def read_agent_config(self): - self.get_agent_conf_file() - self.config = ConfigParser.SafeConfigParser() - try: - self.config.read(self.recd_agent_conf_file) - except ConfigParser.ParsingError as e: - self.logger.error('Hit Parsing Error!!') - self.logger.error('---------------------') - self.logger.error(e) - self.logger.error('---------------------') - - def dump_config(self): - for section_name in self.config.sections(): - self.logger.debug('Section: %s' % section_name) - self.logger.debug( - ' Options: %s' % - self.config.options(section_name)) - for name, value in self.config.items(section_name): - self.logger.debug(' %s = %s' % (name, value)) - self.logger.debug - - def write_agent_config(self): - with open(self.new_agent_conf_file.name, 'w') as file_to_update: - self.config.write(file_to_update) - - def execute_cmd(self, cmd): - return self.inputs.run_cmd_on_server( - self.ip, - cmd, - username=self.username, - password=self.password) - - def file_transfer(self, type, node_file, local_file): - with settings(host_string='%s@%s' % (self.username, self.ip), password=self.password, warn_only=True, - abort_on_prompts=False): - if type == "get": - return get(node_file, local_file) - if type == "put": - return put(node_file, local_file) - - def set_flow_aging_time(self, flow_cache_timeout=100): - self.logger.info( - 'Set flow aging time in node %s to %s' % - (self.ip, flow_cache_timeout)) - self.read_agent_config() - self.config.set( - 'DEFAULT', - 'flow_cache_timeout', - str(flow_cache_timeout)) - self.write_agent_config() - self.put_agent_conf_file() - self.get_config_flow_aging_time() - if self.flow_cache_timeout != flow_cache_timeout: - self.logger.error( - "Problem in setting flow_cache_timeout in node %s, expected %s, got %s" % - (self.name, flow_cache_timeout, self.flow_cache_timeout)) - else: - self.logger.info( - "Flow_cache_timeout set to %s successfully" % - (flow_cache_timeout)) - - def get_config_flow_aging_time(self): - self.flow_cache_timeout = int(self.get_option_value('DEFAULT', 'flow_cache_timeout')) - return self.flow_cache_timeout - - def get_config_per_vm_flow_limit(self): - self.max_vm_flows = float(self.get_option_value('FLOWS', 'max_vm_flows')) - - def set_per_vm_flow_limit(self, max_vm_flows=75): - self.logger.info('Set flow limit per VM at %s percent.' % max_vm_flows) - self.read_agent_config() - self.config.set('FLOWS', 'max_vm_flows', str(max_vm_flows)) - self.write_agent_config() - self.put_agent_conf_file() - self.get_config_per_vm_flow_limit() - if self.max_vm_flows != float(max_vm_flows): - self.logger.error( - "Problem in setting per_vm_flow_limit in node %s, expected %s, got %s" % - (self.name, max_vm_flows, self.max_vm_flows)) - else: - self.logger.info( - "Per_vm_flow_limit set to %s successfully" % - (max_vm_flows)) - - def get_headless_mode(self): - self.headless_mode = self.get_option_value('DEFAULT', 'headless_mode') - - def get_option_value(self, section_name, option_name): - self.logger.debug( - 'Get %s in section %s, node %s' % - (option_name, section_name, self.ip)) - self.read_agent_config() - try: - self.config.get(section_name, option_name) - exists = True - except ConfigParser.NoOptionError: - exists = False - pass - if exists: - option_value = self.config.get( - section_name, - option_name) - else: - option_value = self.default_values[section_name][option_name] - self.logger.debug( - "Section: %s, Option: %s not set explicitly in config file, go with default value: %s" % - (section_name, option_name, option_value)) - return option_value - - def set_headless_mode(self, headless_mode='false'): - self.logger.info('Set headless_mode in node %s' % (self.ip)) - self.read_agent_config() - self.config.set('DEFAULT', 'headless_mode', headless_mode) - self.write_agent_config() - self.put_agent_conf_file() - self.get_headless_mode() - if self.headless_mode != headless_mode: - self.logger.error( - "Problem in setting headless_mode in node %s, expected %s, got %s" % - (self.name, headless_mode, self.headless_mode)) - else: - self.logger.info( - "Headless mode set to %s successfully" % - (headless_mode)) - - @retry(delay=5, tries=15) - def wait_for_vrouter_agent_state(self, state='active'): - cmd = "contrail-status | grep 'contrail-vrouter-agent'" - service_status = self.execute_cmd(cmd) - if state in service_status: - self.logger.info( - 'contrail-vrouter-agent is in %s state' % state) - return True - else: - self.logger.info( - '%s' % service_status) - self.logger.info( - 'Waiting contrail-vrouter-agent to come up to %s state' % state) - return False - #end wait_for_vrouter_agent_state - - def sup_vrouter_process_restart(self): - self.logger.info( - 'Restart supervisor-vrouter process in node %s' % - (self.ip)) - cmd = "service supervisor-vrouter restart" - self.execute_cmd(cmd) - # This value is set based on experiment.. It takes 5secs after process - # is restarted to start setting up new flows - self.logger.debug( - "Wait for contrail-vrouter-agent to be in active state.") - self.wait_for_vrouter_agent_state(state='active') - - def sup_vrouter_process_start(self): - self.logger.info( - 'start supervisor-vrouter process in node %s' % - (self.ip)) - cmd = "service supervisor-vrouter start" - self.execute_cmd(cmd) - - def sup_vrouter_process_stop(self): - self.logger.info( - 'Stop supervisor-vrouter process in node %s' % - (self.ip)) - cmd = "service supervisor-vrouter stop" - self.execute_cmd(cmd) - - def get_vrouter_flow_count(self): - ''' Return dict of flow count by action - Forward, Deny, NAT ... - Calling code should migrate to get_vrouter_matching_flow_count, which is more specific.. - ''' - flow_count = {} - valid_flow_actions = ['F', 'D', 'N'] - for action in valid_flow_actions: - self.logger.debug( - 'Get count of flows in node %s with action %s' % - (self.ip, action)) - cmd = 'flow -l | grep Action | grep %s | wc -l ' % (action) - flow_count[action] = self.execute_cmd(cmd) - now = datetime.now() - self.logger.info( - "Flow count @ time %s in node %s is %s" % - (now, self.name, flow_count)) - return flow_count - - def get_vrouter_matching_flow_count(self, flow_data_l=[]): - '''Return dict of flow data from node matching the parameters supplied - Currently this filters flows based on tx_vm_ip, rx_vm_ip, proto & vrf_id. - Provide forward & reverse flows to be matched as inputs.. - ''' - flow_count = {'all': 0, 'allowed': 0, 'dropped_by_limit': 0} - for flow_data in flow_data_l: - src_ip = flow_data['src_ip'] - dst_ip = flow_data['dst_ip'] - proto = flow_data['proto'] - vrf = flow_data['vrf'] - self.logger.info('Get count of flows in node %s' % (self.ip)) - cmd_1 = 'flow -l | grep %s -A1 | grep %s -A1 | grep \"%s (%s\" -A1 | grep Action | wc -l' % ( - src_ip, dst_ip, proto, vrf) - cmd_2 = 'flow -l |grep %s -A1| grep %s -A1 |grep \"%s (%s\" -A1 |grep Action |grep -v FlowLim| wc -l' % ( - src_ip, dst_ip, proto, vrf) - cmd_3 = 'flow -l |grep %s -A1| grep %s -A1 |grep \"%s (%s\" -A1 |grep Action |grep FlowLim| wc -l' % ( - src_ip, dst_ip, proto, vrf) - flow_count['all'] += int(self.execute_cmd(cmd_1)) - self.logger.debug('Command issued: %s, all flows: %s' %(cmd_1, flow_count['all'])) - flow_count['allowed'] += int(self.execute_cmd(cmd_2)) - self.logger.debug('Command issued: %s, allowed flows: %s' %(cmd_2, flow_count['allowed'])) - flow_count['dropped_by_limit'] += int(self.execute_cmd(cmd_3)) - self.logger.debug('Command issued: %s, Limit dropped flows: %s' %(cmd_3, flow_count['dropped_by_limit'])) - self.logger.info( - "Flow count in node %s is %s" % - (self.name, flow_count['allowed'])) - return flow_count - - def get_agent_headless_mode(self): - result = False - try: - self.get_agent_conf_file() - self.config=self.read_agent_config() - opt = self.config.get('DEFAULT','headless_mode') - if opt == 'true': - result = True - except: - self.logger.info ('Headless mode is not set in the cofig file of agent') - - return result - # end get_agent_headless_mode - - def set_agent_headless_mode(self): - """ Reboot the agent to start in headless mode. - """ - mode = 'true' - self.logger.info ('Set the agent in headless mode!!!') - self.get_agent_conf_file() - self.read_agent_config() - self.config.set('DEFAULT', 'headless_mode', mode) - file= self.write_agent_config() - self.write_agent_config() - self.put_agent_conf_file() - self.sup_vrouter_process_restart() - # end set_agent_headless_mode - - # Needs implementation - # def get_OsVersion(self): - - # def get_VrouterReleaseVersion(self): - - # def get_VrouterBuildVersion(self): - - # def get_OS_Release_BuildVersion(self): - - def get_active_controller(self, refresh=False): - ''' Get the active contol node. - ''' - if not getattr(self, 'control_node', None) or refresh: - self.control_node = None - inspect_h = self.connections.agent_inspect[self.ip] - agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status() - for entry in agent_xmpp_status: - if entry['cfg_controller'] == 'Yes' \ - and entry['state'] == 'Established': - self.control_node = entry['controller_ip'] - break - if not self.control_node: - self.logger.error('Active controller is not found') - self.control_node = self.inputs.get_host_ip(self.control_node) - self.logger.debug('Active controller for agent %s is %s' - %(self.ip, self.control_node)) - return self.control_node diff --git a/fixtures/contrail_fixtures.py b/fixtures/contrail_fixtures.py deleted file mode 100644 index 65ad63e00..000000000 --- a/fixtures/contrail_fixtures.py +++ /dev/null @@ -1,81 +0,0 @@ - -def contrail_fix_ext(*dargs, **dkwargs): - ''' - Must have methods = (verify_on_setup) - or set verify=False explicitly - - verify function will be run only once unless force=True is set on call - Example: - - @contrail_fix_ext () - class Foo (object): - def __init__ (self): - pass - ## <--- Fail - - @contrail_fix_ext (verify=False) - class Foo (object): - def __init__ (self): - pass - ## <--- Setup will pass - ''' - def inner(cls): - cls._decorator_states = { - 'setup_done': False, - 'setup_verified': False, - 'obj_verified': False, - 'args': dargs, - 'kwargs': dkwargs, - } - cls_setup = cls.setUp - - def setup(self, *args, **kwargs): - if not self._decorator_states['setup_done']: - ret = cls_setup(self) - self._decorator_states['setup_done'] = True - if getattr(self._decorator_states['kwargs'], - 'verify_on_setup', True): - if not (self._decorator_states[ - 'setup_verified'] and not getattr(kwargs, 'force', - False)): - self.verify_on_setup() - self._decorator_states['setup_verified'] = True - return ret - if cls._decorator_states['kwargs'].get('verify_on_setup', True): - for method in ('verify_on_setup', ): - if not (method in dir(cls) and callable(getattr( - cls, method))): - raise NotImplementedError, 'class must implement %s' % method - - cls.setUp = setup - return cls - return inner - -# def check_state(): -# print "in check_state " -# def wrapper(function): -# print "in wrapper function " -# def s_wrapped(a,*args,**kwargs): -# print "in wrapped function " + str(a) + str(args) + str(kwargs) -# if not self.inputs.verify_state(): -# self.inputs.logger.warn( "Pre-Test validation failed.. Skipping test %s" %(function.__name__)) -# else : -# return function(self,*args,**kwargs) -# return s_wrapped -# return wrapper -# -# def logger(): -# print "in main logger" -# def log_wrapper(function): -# print "In log wrapper function" -# def l_wrapper(self, *args,**kwargs): -# print "In log wrapped function" -# self.inputs.logger.info('=' * 80) -# self.inputs.logger.info('STARTING TEST : ' + function.__name__ ) -# self.inputs.logger.info('END TEST : '+ function.__name__ ) -# self.inputs.logger.info('-' * 80) -# return function(self, *args, **kwargs) -# return l_wrapper -# return log_wrapper - -# ContrailFixtureExtension end diff --git a/fixtures/contrailapi.py b/fixtures/contrailapi.py deleted file mode 100644 index 658b07c5d..000000000 --- a/fixtures/contrailapi.py +++ /dev/null @@ -1,112 +0,0 @@ -from tcutils.util import * -from vnc_api.vnc_api import * -from orchestrator import Orchestrator - -class ContrailApi(Orchestrator): - - def __init__(self, inputs, vnc, logger): - self._inputs = inputs - self._vnc = vnc - self._log = logger - - def get_policy(self, fq_name, **kwargs): - return self._vnc.network_policy_read(fq_name=fq_name) - - def get_floating_ip(self, fip_id, **kwargs): - fip_obj = self._vnc.floating_ip_read(id=fip_id) - return fip_obj.get_floating_ip_address() - - def create_floating_ip(self, pool_obj, project_obj, **kwargs): - fip_obj = FloatingIp(get_random_name('fip'), pool_obj) - fip_obj.set_project(project_obj) - self._vnc.floating_ip_create(fip_obj) - fip_obj = self._vnc.floating_ip_read(fq_name=fip_obj.fq_name) - return (fip_obj.get_floating_ip_address(), fip_obj.uuid) - - def delete_floating_ip(self, fip_id, **kwargs): - self._vnc.floating_ip_delete(id=fip_id) - - def assoc_floating_ip(self, fip_id, vm_id, **kwargs): - fip_obj = self._vnc.floating_ip_read(id=fip_id) - vm_obj = self._vnc.virtual_machine_read(id=vm_id) - vmi = vm_obj.get_virtual_machine_interface_back_refs()[0]['uuid'] - vmintf = self._vnc.virtual_machine_interface_read(id=vmi) - fip_obj.set_virtual_machine_interface(vmintf) - self._log.debug('Associating FIP:%s with VMI:%s' % (fip_id, vm_id)) - self._vnc.floating_ip_update(fip_obj) - return fip_obj - - def disassoc_floating_ip(self, fip_id, **kwargs): - self._log.debug('Disassociating FIP %s' % fip_id) - fip_obj = self._vnc.floating_ip_read(id=fip_id) - fip_obj.virtual_machine_interface_refs=None - self._vnc.floating_ip_update(fip_obj) - return fip_obj - - def add_security_group(self, vm_id, sg_id, **kwargs): - sg = self.get_security_group(sg_id) - vnc_vm = self._vnc.virtual_machine_read(id=vm_id) - vmis = [vmi['uuid'] for vmi in vnc_vm.get_virtual_machine_interface_back_refs()] - vmis = [self._vnc.virtual_machine_interface_read(id=vmi) for vmi in vmis] - for vmi in vmis: - sg_lst = vmi.get_security_group_refs() - if not sg_lst: - sg_lst = [] - sg_lst.append({'uuid': sg.uuid, 'to':sg.fq_name}) - vmi.set_security_group_list(sg_lst) - self._vnc.virtual_machine_interface_update(vmi) - - def remove_security_group(self, vm_id, sg_id, **kwargs): - sg = self.get_security_group(sg_id) - vnc_vm = self._vnc.virtual_machine_read(id=vm_id) - vmis = [vmi['uuid'] for vmi in vnc_vm.get_virtual_machine_interface_back_refs()] - vmis = [self._vnc.virtual_machine_interface_read(id=vmi) for vmi in vmis] - for vmi in vmis: - sg_lst = vmi.get_security_group_refs() - if not sg_lst: - return - for i, sg_ref in enumerate(sg_lst): - if sg_ref['uuid'] == sg.uuid: - break - else: - return - sg_lst.pop(i) - vmi.set_security_group_list(sg_lst) - self._vnc.virtual_machine_interface_update(vmi) - - def create_security_group(self, sg_name, parent_fqname, sg_entries, **kwargs): - sg = SecurityGroup(sg_name, parent_type='project', - fq_name=parent_fqname+[sg_name]) - sg.security_group_entries = PolicyEntriesType(sg_entries) - self._vnc.security_group_create(sg) - sg = self._vnc.security_group_read(fq_name=sg.get_fq_name()) - return sg.uuid - - def delete_security_group(self, sg_id, **kwargs): - self._vnc.security_group_delete(id=sg_id) - - def get_security_group(self, sg_id, **kwargs): - try: - return self._vnc.security_group_read(id=sg_id) - except: - try: - return self._vnc.security_group_read(fq_name=sg_id) - except: - return None - - def get_security_group_rules(self, sg_id, **kwargs): - sg_info = self._vnc.security_group_read(id=sg_id) - return sg_info.get_security_group_entries().exportDict()['PolicyEntriesType']['policy_rule'] - - def delete_security_group_rules(self, sg_id, **kwargs): - sg = self._vnc.security_group_read(id=sg_id) - sg.set_security_group_entries(None) - self._vnc.security_group_update(sg) - - def set_security_group_rules(self, sg_id, sg_entries, **kwargs): - sg = self._vnc.security_group_read(id=sg_id) - sg.set_security_group_entries(PolicyEntriesType(sg_entries)) - return self._vnc.security_group_update(sg) - - def get_vn_list(self, **kwargs): - return self._vnc.virtual_networks_list(kwargs['parent_id'])['virtual-networks'] diff --git a/fixtures/control_node.py b/fixtures/control_node.py deleted file mode 100755 index 6a17f6c28..000000000 --- a/fixtures/control_node.py +++ /dev/null @@ -1,266 +0,0 @@ -import fixtures -from ipam_test import * -from vn_test import * -from tcutils.util import * -import time -import traceback -from fabric.api import env -from fabric.api import run -from fabric.state import output -from fabric.operations import get, put -from fabric.context_managers import settings, hide -import socket -from contrail_fixtures import * -env.disable_known_hosts = True -import json -import copy -from netaddr import IPNetwork - - -class CNFixture(fixtures.Fixture): - - ''' - Fixture to handle creation, verification and deletion of control node BGP peering. - ''' - - def __init__(self, connections, inputs, router_name, router_ip, router_type='contrail', router_asn='64512'): - self.connections = connections - self.inputs = inputs - self.quantum_h = self.connections.quantum_h - self.vnc_lib_h = self.connections.vnc_lib - self.api_s_inspect = self.connections.api_server_inspect - self.agent_inspect = self.connections.agent_inspect - self.cn_inspect = self.connections.cn_inspect - self.logger = inputs.logger - self.already_present = False - self.verify_is_run = False - self.router_asn = router_asn - self.router_name = router_name - self.router_ip = router_ip - self.router_type = router_type - # end __init__ - - def setUp(self): - super(CNFixture, self).setUp() - if not self.is_cn_present(self.router_ip): - self.create_cn_node(self.router_ip, self.router_type) - self.logger.info('Creating Peering in control node with ip %s' % - (self.router_ip)) - else: - self.logger.info( - 'Control nodel %s already present, not creating it' % - (self.router_ip)) - self.already_present = True - - # end setUp - - def create_cn_node(self, router_ip, router_type): - ''' - Procedure to create control node - ''' - bgp_addr_fams = AddressFamilies(['inet-vpn']) - bgp_sess_attrs = [ - BgpSessionAttributes(address_families=bgp_addr_fams)] - bgp_sessions = [BgpSession(attributes=bgp_sess_attrs)] - bgp_peering_attrs = BgpPeeringAttributes(session=bgp_sessions) - #rt_inst_obj = self._get_rt_inst_obj() - vnc_lib = self.vnc_lib_h - router_type = self.router_type - rt_inst_obj = vnc_lib.routing_instance_read( - fq_name=['default-domain', 'default-project', - 'ip-fabric', '__default__']) - - router_params = BgpRouterParams( - vendor=router_type, autonomous_system=int(self.router_asn), - identifier=str( - IPNetwork(router_ip).ip), - address=str( - IPNetwork(router_ip).ip), - port=179, address_families=bgp_addr_fams) - - bgp_router_obj = BgpRouter(self.router_name, rt_inst_obj, - bgp_router_parameters=router_params) - - cur_id = vnc_lib.bgp_router_create(bgp_router_obj) - cur_obj = vnc_lib.bgp_router_read(id=cur_id) - # full-mesh with existing bgp routers - fq_name = rt_inst_obj.get_fq_name() - bgp_router_list = vnc_lib.bgp_routers_list(parent_fq_name=fq_name) - bgp_router_ids = [bgp_dict['uuid'] - for bgp_dict in bgp_router_list['bgp-routers']] - bgp_router_objs = [] - for id in bgp_router_ids: - bgp_router_objs.append(vnc_lib.bgp_router_read(id=id)) - - for other_obj in bgp_router_objs: - if other_obj.uuid == cur_id: - continue - - cur_obj.add_bgp_router(other_obj, bgp_peering_attrs) - - vnc_lib.bgp_router_update(cur_obj) - # end create_cn_node - - def del_cn_node(self, router_ip): - ''' - Delete control node - ''' - vnc_lib = self.vnc_lib_h - #rt_inst_obj = self._get_rt_inst_obj() - rt_inst_obj = vnc_lib.routing_instance_read( - fq_name=['default-domain', 'default-project', - 'ip-fabric', '__default__']) - - fq_name = rt_inst_obj.get_fq_name() + [self.router_name] - cur_obj = vnc_lib.bgp_router_read(fq_name=fq_name) - - # remove full-mesh with existing bgp routers - fq_name = rt_inst_obj.get_fq_name() - bgp_router_list = vnc_lib.bgp_routers_list(parent_fq_name=fq_name) - bgp_router_ids = [bgp_dict['uuid'] - for bgp_dict in bgp_router_list['bgp-routers']] - bgp_router_objs = [] - for id in bgp_router_ids: - bgp_router_objs.append(vnc_lib.bgp_router_read(id=id)) - - for other_obj in bgp_router_objs: - if other_obj.uuid == cur_obj.uuid: - # our refs will be dropped on delete further down - continue - - other_obj.del_bgp_router(cur_obj) - - vnc_lib.bgp_router_delete(id=cur_obj.uuid) - # end del_cn_node - - def verify_on_setup(self): - result = True - if not self.verify_peer_in_control_nodes(): - result = result and False - self.logger.error( - "Either Control node %s does not have any BGP peer or is not in Established state" % (self.router_ip)) - self.verify_is_run = True - # TODO Verify in APi Server - # TODO Verify in Agent - return result - # end verify - - def is_cn_present(self, router_ip): - """ - Check if control node is already present - """ - result = False - present_router_list = [] - rt_inst_obj = self.vnc_lib_h.routing_instance_read( - fq_name=['default-domain', 'default-project', - 'ip-fabric', '__default__']) - fq_name = rt_inst_obj.get_fq_name() - bgp_router_list = self.vnc_lib_h.bgp_routers_list( - parent_fq_name=fq_name) - bgp_router_ids = [bgp_dict['uuid'] - for bgp_dict in bgp_router_list['bgp-routers']] - for id in bgp_router_ids: - present_router_list.append( - self.vnc_lib_h.bgp_router_read(id=id).bgp_router_parameters.address) - if router_ip in present_router_list: - result = True - - return result - # end is_cn_present - - @retry(delay=5, tries=6) - def verify_peer_in_control_nodes(self): - """ - Check the configured control node has any peer and if so the state is Established. - """ - result = True - for entry1 in self.inputs.bgp_ips: - cn_bgp_entry = self.cn_inspect[ - entry1].get_cn_bgp_neigh_entry(encoding='BGP') - if not cn_bgp_entry: - result = False - self.logger.error( - 'Control Node %s does not have any BGP Peer' % - (self.router_ip)) - else: - for entry in cn_bgp_entry: - if entry['state'] != 'Established': - result = result and False - self.logger.error('With Peer %s peering is not Established. Current State %s ' % ( - entry['peer'], entry['state'])) - else: - self.logger.info( - 'With Peer %s peering is Current State is %s ' % - (entry['peer'], entry['state'])) - return result - # end verify_vn_in_control_node - - def restart_control_node(self, host_ips=[]): - ''' - Restart the control node service - ''' - result = True - service_name = 'contrail-control' - if len(host_ips) == 0: - host_ips = [self.router_ip] - for host in host_ips: - username = self.inputs.host_data[host]['username'] - password = self.inputs.host_data[host]['password'] - self.logger.info('Restarting %s.service in %s' % - (service_name, self.inputs.host_data[host]['name'])) - issue_cmd = 'service %s restart' % (service_name) - self.inputs.run_cmd_on_server(host, issue_cmd, username, password) - # end restart_service - - def stop_control_node(self, host_ips=[]): - ''' - Stop the control node service - ''' - result = True - service_name = 'contrail-control' - if len(host_ips) == 0: - host_ips = [self.router_ip] - for host in host_ips: - username = self.inputs.host_data[host]['username'] - password = self.inputs.host_data[host]['password'] - self.logger.info('Stoping %s.service in %s' % - (service_name, self.inputs.host_data[host]['name'])) - issue_cmd = 'service %s stop' % (service_name) - self.inputs.run_cmd_on_server(host, issue_cmd, username, password) - # end stop_service - - def start_control_node(self, host_ips=[]): - ''' - Start the control node service - ''' - result = True - service_name = 'contrail-control' - if len(host_ips) == 0: - host_ips = [self.router_ip] - for host in host_ips: - username = self.inputs.host_data[host]['username'] - password = self.inputs.host_data[host]['password'] - self.logger.info('Starting %s.service in %s' % - (service_name, self.inputs.host_data[host]['name'])) - issue_cmd = 'service %s start' % (service_name) - self.inputs.run_cmd_on_server(host, issue_cmd, username, password) - # end start_service - - def cleanUp(self): - super(CNFixture, self).cleanUp() - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - self.del_cn_node(self.router_ip) - self.logger.info("Deleting the Control Node %s " % - (self.router_ip)) - # TODO Add verification after cleanup here - else: - self.logger.info('Skipping the deletion of the Control Node %s ' % - (self.router_ip)) - # end cleanUp diff --git a/fixtures/ec2_base.py b/fixtures/ec2_base.py deleted file mode 100644 index e85bb100b..000000000 --- a/fixtures/ec2_base.py +++ /dev/null @@ -1,116 +0,0 @@ -from fabric.api import local, run -from fabric.context_managers import shell_env, settings -import time - - -class EC2Base(object): - - def __init__(self, tenant=None, logger=None, inputs=None): - self.inputs = inputs - if not tenant: - tenant = self.inputs.stack_tenant - self.tenant = tenant - self.logger = logger - self.openstack_ip = self.inputs.openstack_ip - self.os_username = self.inputs.host_data[self.openstack_ip]['username'] - self.os_password = self.inputs.host_data[self.openstack_ip]['password'] - if not self._set_ec2_keys(tenant): - if not self.create_ec2_keys(tenant): - self.logger.error('ec2-key create failed for vpc tenant') - self.tenant_id = None - - def run_cmd_on_os_node(self, cmd): - ''' - Run cmd on openstack node - ''' - with settings( - host_string='%s@%s' % (self.os_username, self.openstack_ip), password=self.os_password, - warn_only=True, abort_on_prompts=False): - output = run(cmd) - return output - # end run_cmd_on_os_node - - def _set_ec2_keys(self, tenant): - # export ec2 secret key and access key for admin or VPC - keys = self.run_cmd_on_os_node('(source /etc/contrail/openstackrc; keystone ec2-credentials-list)' - ).split('\n')[3:] - found = False - - for key in keys: - key = [k for k in filter(None, key.split(' ')) if k != '|'] - if key[0] == tenant: - found = True - self.logger.info('Exported ec2 keys for %s' % tenant) - self.access_key = key[1] - self.secret_key = key[2] - break - return found - # end set_ec2_keys - - def _shell_with_ec2_env(self, command, ret): - # shell to run Euca commands on machine with ec2 credentials - with settings( - host_string='%s@%s' % (self.os_username, self.openstack_ip), password=self.os_password, - warn_only=True, abort_on_prompts=False): - with shell_env(EC2_ACCESS_KEY=self.access_key, - EC2_SECRET_KEY=self.secret_key, - EC2_URL='http://%s:8773/services/Cloud' % self.openstack_ip): - out = run(command) - self.logger.debug('Command : %s' % (command)) - self.logger.debug('Output : %s' % (out)) - if 'Unauthorized' in out or 'Not Authorized' in out: - # A bad WA for bugs 1890 and 1984 - self.inputs.restart_service( - 'memcached', [self.inputs.openstack_ip]) - self.inputs.restart_service( - 'openstack-nova-api', [self.inputs.openstack_ip]) - # If openstack is not built by us - self.inputs.restart_service( - 'nova-api', [self.inputs.openstack_ip]) - time.sleep(5) - self.logger.debug('Trying the command again') - out = run(command) - self.logger.debug('Command : %s' % (command)) - self.logger.debug('Output : %s' % (out)) - if ret: - return out - # end _shell_with_ec2_env - - def create_ec2_keys(self, tenant_name): - key_data = {} - # create ec2 credentials for VPC - tenantId = self._get_tenant_id(tenant_name) - output = self.run_cmd_on_os_node('(source /etc/contrail/openstackrc; keystone ec2-credentials-create \ - --tenant-id %s)' % tenantId, ).split('\n') - self.logger.info('EC2 keys created for %s' % tenant_name) - for row in output: - if row[0] == '+': - continue - items = [k for k in filter(None, row.split(' ')) if k != '|'] - key_data[items[0]] = items[1] - self.logger.info('Exported ec2 keys for %s' % tenant_name) - self.access_key = key_data['access'] - self.secret_key = key_data['secret'] - self.logger.debug(key_data) - return key_data - # end create_ec2_keys - - def delete_ec2_keys(self, accessKey): - self.run_cmd_on_os_node('(source /etc/contrail/openstackrc; keystone ec2-credentials-delete \ - --access %s)' % accessKey) - self.logger.info('EC2 keys deleted for VPC') - # end delete_ec2_keys - - def _get_tenant_id(self, tenantName): - tenants = self.run_cmd_on_os_node('(source /etc/contrail/openstackrc; keystone tenant-get %s)' - % tenantName, ).split('\n') - - for tenant in tenants: - tenant = [k for k in filter(None, tenant.split(' ')) if k != '|'] - if tenant[0] == 'id': - self.tenant_id = tenant[1] - break - - return self.tenant_id - # end _get_tenant_id -# end class EC2Base diff --git a/fixtures/error_string_code.py b/fixtures/error_string_code.py deleted file mode 100644 index 155d1fdd3..000000000 --- a/fixtures/error_string_code.py +++ /dev/null @@ -1,2 +0,0 @@ - -ec2_api_error_noPubNw = "InvalidRequest: public network not provisioned" diff --git a/fixtures/floating_ip.py b/fixtures/floating_ip.py deleted file mode 100644 index 14f1f54ba..000000000 --- a/fixtures/floating_ip.py +++ /dev/null @@ -1,575 +0,0 @@ -import fixtures -from vnc_api.vnc_api import * -from cfgm_common import exceptions as vncExceptions -from project_test import * -import time -from contrail_fixtures import * -import ast -import sys -from tcutils.util import retry -try: - from webui_test import * -except ImportError: - pass - -#@contrail_fix_ext () - -class FloatingIPFixture(fixtures.Fixture): - - def __init__(self, inputs=None, pool_name=None, vn_id=None, - connections=None, vn_name=None, project_name=None, - option=None, uuid=None): - self.connections = connections - self.inputs = inputs or connections.inputs - if not project_name: - project_name = self.inputs.project_name - self.api_s_inspect = self.connections.api_server_inspect - self.orch = self.connections.orch - self.quantum_h = self.connections.quantum_h - self.agent_inspect = self.connections.agent_inspect - self.cn_inspect = self.connections.cn_inspect - self.vnc_lib_h = self.connections.vnc_lib - self.analytics_obj = self.connections.analytics_obj - - self.project_name = project_name - self.domain_name = self.inputs.domain_name - self.vn_id = vn_id - self.vn_name = vn_name - self.logger = self.inputs.logger - self.already_present = False - self.verify_is_run = False - self.fip = {} - self.option = option - self.fip_pool_id = uuid - if self.option == 'neutron': - pool_name = 'floating-ip-pool' - self.pool_name = pool_name or 'floating-ip-pool' - if self.inputs.verify_thru_gui(): - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.webui = WebuiTest(self.connections, self.inputs) - # end __init__ - - def read(self): - if self.fip_pool_id: - self.fip_pool_obj = self.vnc_lib_h.floating_ip_pool_read(id=self.fip_pool_id) - self.fq_name = self.get_fq_name() - self.pool_name = self.fq_name[-1] - self.vn_fq_name = self.fq_name[:-1] - self.vn_id = self.vnc_lib_h.fq_name_to_id('virtual-network', self.vn_fq_name) - self.pub_vn_name = self.vn_fq_name[-1] - self.logger.info('Fetched FIP pool %s(%s)' %(self.fq_name, self.fip_pool_id)) - self.already_present = True - # end read - - def setUp(self): - super(FloatingIPFixture, self).setUp() - self.create() - # end setUp - - def create(self): - if self.fip_pool_id: - return self.read() - self.project_obj = self.get_project_obj() - if not self.is_fip_pool_present(self.pool_name): - if self.inputs.is_gui_based_config(): - self.create_floatingip_pool_webui(self.pool_name, self.vn_name) - else: - self.create_floatingip_pool(self.pool_name, self.vn_id) - else: - self.logger.debug('FIP pool %s already present, not creating it' % - (self.pool_name)) - self.already_present = True - - def get_project_obj(self): - if not getattr(self, 'project_obj', None): - self.project_obj = self.vnc_lib_h.project_read(fq_name=[self.domain_name, self.project_name]) - return self.project_obj - - def create_floatingip_pool_webui(self, pool_name, vn_name): - self.webui.create_floatingip_pool(self, pool_name, vn_name) - # end create_floatingip_pool_webui - - def create_and_assoc_fip_webui(self, fip_pool_vn_id, vm_id, vm_name, project=None): - self.webui.create_and_assoc_fip( - self, fip_pool_vn_id, vm_id, vm_name, project=None) - # end create_and_assoc_fip_webui - - def verify_on_setup(self): - result = True - if not self.verify_fip_pool_in_api_server(): - result &= False - self.logger.error( - ' Verification of FIP pool %s in API Server failed' % - (self.pool_name)) - if not self.verify_fip_pool_in_control_node(): - result &= False - self.logger.error( - ' Verification of FIP pool %s in Control-node failed' % - (self.pool_name)) - self.verify_is_run = True - return result - # end verify_on_setup - - def create_floatingip_pool(self, fip_pool_name, vn_id): - self.logger.info('Creating Floating IP pool %s in API Server' % - (fip_pool_name)) - - # create floating ip pool from public network - self.pub_vn_obj = self.vnc_lib_h.virtual_network_read(id=vn_id) - self.pub_vn_name = self.pub_vn_obj.name - self.fip_pool_obj = FloatingIpPool(fip_pool_name, self.pub_vn_obj) - self.fip_pool_id = self.vnc_lib_h.floating_ip_pool_create( - self.fip_pool_obj) - - # allow current project to pick from pool - self.project_obj = self.get_project_obj() - self.project_obj.add_floating_ip_pool(self.fip_pool_obj) - self.vnc_lib_h.project_update(self.project_obj) - # end create_floatingip_pool - - def is_fip_pool_present(self, pool_name): - self.pub_vn_obj = self.vnc_lib_h.virtual_network_read(id=self.vn_id) - self.pub_vn_name = self.pub_vn_obj.name - try: - fip_pool_dict = self.vnc_lib_h.floating_ip_pools_list( - parent_id=self.vn_id) - # Bug 532 - if not fip_pool_dict['floating-ip-pools']: - return False - - fip_fq_name = None - for pool in fip_pool_dict['floating-ip-pools']: - if pool['fq_name'][-1] == pool_name: - fip_fq_name = pool['fq_name'] - break - else: - return False - - if fip_fq_name: - self.fip_pool_obj = self.vnc_lib_h.floating_ip_pool_read( - fq_name=fip_fq_name) - self.fip_pool_id = self.fip_pool_obj.uuid - except vncExceptions.HttpError: - return None - return True - # end get_fip_pool_if_present - - def get_uuid(self): - return self.fip_pool_id - - def get_fq_name(self): - return self.fip_pool_obj.get_fq_name() - - def get_vn_id(self): - return self.vn_id - - @retry(delay=2, tries=15) - def verify_fip_pool_in_api_server(self): - result = True - self.pub_vn_obj = self.vnc_lib_h.virtual_network_read(id=self.vn_id) - self.pub_vn_name = self.pub_vn_obj.name - self.cs_fip_pool_obj = self.api_s_inspect.get_cs_alloc_fip_pool( - fip_pool_name=self.pool_name, - vn_name=self.pub_vn_obj.name, project=self.project_name, refresh=True) - if not self.cs_fip_pool_obj: - self.logger.warn("Floating IP pool %s not found in API Server " % - (self.pool_name)) - result = result and False - return result - self.cs_fip_pool_id = self.cs_fip_pool_obj['floating-ip-pool']['uuid'] - self.cs_fvn_obj = self.api_s_inspect.get_cs_vn( - vn=self.pub_vn_obj.name, refresh=True, project=self.project_name) - if result: - self.logger.info( - 'FIP Pool verificatioin in API Server passed for Pool %s' % - (self.pool_name)) - return result - # end verify_fip_pool_in_api_server - - @retry(delay=2, tries=15) - def verify_fip_pool_in_control_node(self): - result = True - self.pub_vn_obj = self.vnc_lib_h.virtual_network_read(id=self.vn_id) - self.pub_vn_name = self.pub_vn_obj.name - for cn in self.inputs.bgp_ips: - cn_object = self.cn_inspect[cn].get_cn_config_fip_pool( - vn_name=self.pub_vn_name, fip_pool_name=self.pool_name, project=self.project_name) - if not cn_object: - self.logger.warn( - "Control-node ifmap object for FIP pool %s , VN %s not found" % - (self.pool_name, self.pub_vn_name)) - result = result and False - else: - self.logger.debug( - 'Control-node Ifmap-view has FIP pool %s information' % (self.pool_name)) - - return result - # end verify_fip_pool_in_control_node - - def delete_floatingip_pool(self): - fip_pool_id = self.fip_pool_id - fip_pool_obj = self.vnc_lib_h.floating_ip_pool_read(id=fip_pool_id) - self.project_obj = self.get_project_obj() - self.project_obj.del_floating_ip_pool(fip_pool_obj) - self.vnc_lib_h.project_update(self.project_obj) - self.vnc_lib_h.floating_ip_pool_delete(id=fip_pool_id) - # end delete_floatingip_pool - - @retry(delay=5, tries=3) - def verify_fip_pool_not_in_control_node(self): - result = True - for cn in self.inputs.bgp_ips: - cn_object = self.cn_inspect[cn].get_cn_config_fip_pool( - vn_name=self.pub_vn_name, fip_pool_name=self.pool_name, project=self.project_name) - if cn_object: - self.logger.warn( - "Control-node ifmap object for FIP pool %s , VN %s is found!" % - (self.pool_name, self.pub_vn_name)) - result = result and False - else: - self.logger.debug( - 'Control-node Ifmap-view does not have FIP pool %s information' % (self.pool_name)) - return result - # end verify_fip_pool_not_in_control_node - - def get_associated_fips(self): - fips_dict = self.fip_pool_obj.get_floating_ips() - return [fip['uuid'] for fip in fips_dict] - - def create_and_assoc_fip(self, fip_pool_vn_id=None, vm_id=None, project=None): - ''' Create and associate a floating IP to a VM with vm_id from VN fip_pool_vn_id - - Recommended to call verify_fip() after this method to make sure that the floating IP is correctly installed - ''' - fip_pool_vn_id = fip_pool_vn_id or self.vn_id - try: - fip_obj = self.create_floatingip(fip_pool_vn_id, project) - self.logger.debug('Associating FIP %s to %s' %(fip_obj[0], vm_id)) - self.assoc_floatingip(fip_obj[1], vm_id) - return fip_obj[1] - except: - self.logger.error('Failed to create or asscociate FIP. Error: %s' % - (sys.exc_info()[0])) - return None - # end create_and_assoc_fip - - def verify_fip(self, fip_id, vm_fixture, fip_vn_fixture): - result = True - fip = self.orch.get_floating_ip(fip_id) - self.fip[fip_id] = fip - if not self.verify_fip_in_control_node(fip, vm_fixture, fip_vn_fixture): - result &= False - if not self.verify_fip_in_agent(fip, vm_fixture, fip_vn_fixture): - result &= False - if not self.verify_fip_in_api_server(fip_id): - result &= False - return result - # end verify_fip - - def verify_no_fip(self, fip_id, fip_vn_fixture, fip=None): - result = True - fip = fip or self.fip[fip_id] - if not self.verify_fip_not_in_control_node(fip, fip_vn_fixture): - self.logger.error( - 'FIP %s absense verification failed on one or more control-nodes' % (fip)) - result &= False - if not self.verify_fip_not_in_agent(fip, fip_vn_fixture): - self.logger.error( - 'FIP %s absense verification failed on one or more agents ' % (fip)) - result &= False - self.logger.error( - 'FIP %s absense verification failed on API server ' % (fip)) - if not self.verify_fip_not_in_api_server(fip_id): - result &= False - return result - # end verify_no_fip - - @retry(delay=5, tries=3) - def verify_fip_in_control_node(self, fip, vm_fixture, fip_vn_fixture): - self.ctrl_nodes= vm_fixture.get_ctrl_nodes_in_rt_group() - agent_label = vm_fixture.get_agent_label() - for cn in self.ctrl_nodes: - ri_name = fip_vn_fixture.get_vrf_name() - cn_routes = self.cn_inspect[cn].get_cn_route_table_entry( - ri_name=ri_name, prefix=fip) - if not cn_routes: - self.logger.warn('No route found for %s in Control-node %s ' % - (fip, cn)) - return False - if cn_routes[0]['next_hop'] != vm_fixture.get_compute_host(): - self.logger.warn( - 'Expected next-hop for %s in Control-node %s : %s, Found : %s' % - (fip, cn, vm_node_data_ip, cn_routes[0]['next_hop'])) - return False - if cn_routes[0]['label'] not in agent_label[vm_fixture.vn_fq_name]: - self.logger.warn( - 'Expected label for %s in Control-node %s : %s, Found : %s' % - (fip, cn, agent_label[vm_fixture.vn_fq_name], cn_routes[0]['label'])) - return False - self.logger.info(' Route for FIP %s is fine on Control-node %s ' % - (fip, cn)) - # end for - self.logger.info( - 'FIP %s verification for passed on all Control-nodes' % (fip)) - return True - # end verify_fip_in_control_node - - @retry(delay=5, tries=3) - def verify_fip_not_in_control_node(self, fip, fip_vn_fixture): - for cn in self.inputs.bgp_ips: - ri_name = fip_vn_fixture.get_vrf_name() - cn_routes = self.cn_inspect[cn].get_cn_route_table_entry( - ri_name=ri_name, prefix=fip) - if cn_routes: - self.logger.warn( - ' FIP %s is still found in route table for Control node %s' % (fip, cn)) - return False - self.logger.info( - 'FIP %s is removed from route table for Control node %s' % (fip, cn)) - return True - # verify_fip_not_in_control_node - - @retry(delay=5, tries=3) - def verify_fip_in_agent(self, fip, vm_fixture, fip_vn_fixture): - label = vm_fixture.get_agent_label() - for compute_ip in self.inputs.compute_ips: - inspect_h = self.agent_inspect[compute_ip] - vn = inspect_h.get_vna_vn(vn_name=fip_vn_fixture.vn_name, project=self.project_name) - if vn is None: - continue - agent_vrf_objs = inspect_h.get_vna_vrf_objs( - vn_name=fip_vn_fixture.vn_name, project=self.project_name) - agent_vrf_obj = self.get_matching_vrf( - agent_vrf_objs['vrf_list'], fip_vn_fixture.get_vrf_name()) - agent_vrf_id = agent_vrf_obj['ucindex'] - agent_path = inspect_h.get_vna_active_route( - vrf_id=agent_vrf_id, ip=fip) - if not agent_path: - self.logger.debug( - 'Not able to get active route from agent.Retry...') - return False - agent_label = agent_path['path_list'][0]['label'] - self.logger.debug('agent_label query returned:%s' % - agent_path['path_list'][0]) - if not agent_label: - self.logger.debug( - 'Not able to retrieve label value from agent.Retry...') - return False - if agent_label not in label[vm_fixture.vn_fq_name]: - self.logger.warn( - 'The route for VM IP %s in Node %s is having incorrect label. Expected : %s, Seen : %s' % - (vm_fixture.vm_ip, compute_ip, label[vm_fixture.vn_fq_name], agent_label)) - return False - - self.logger.debug('Route for FIP IP %s is present in agent %s ' % - (fip, compute_ip)) - self.logger.debug( - 'FIP %s verification for VM %s in Agent %s passed ' % - (fip, vm_fixture.vm_name, compute_ip)) - # end for - return True - # end verify_fip_in_agent - - @retry(delay=5, tries=6) - def verify_fip_in_uve(self, fip, vm_fixture, fip_vn_fixture): - found_ip = 0 - found_vn = 0 - result = False - vm_intf = self.analytics_obj.get_ops_vm_uve_interface( - collector=self.inputs.collector_ip, uuid=vm_fixture.vm_id) - for item in vm_intf: - try: - intf = self.analytics_obj.get_intf_uve(item) - for item1 in intf['floating_ips']: - ip_list = [item1['ip_address']] - if item1.has_key('ip6_address'): - ip_list.extend([item1['ip6_address']]) - if fip in ip_list: - found_ip = 1 - if item1['virtual_network'] == fip_vn_fixture.vn_fq_name: - found_vn = 1 - except Exception as e: - self.logger.exception("Exception: %s"%e) - return False - - if found_ip and found_vn: - self.logger.info('FIP %s and Source VN %s found in %s UVE' % - (fip, fip_vn_fixture.vn_name, vm_fixture.vm_name)) - result = True - else: - self.logger.warn( - 'FIP %s and/or Source VN %s NOT found in %s UVE' % - (fip, fip_vn_fixture.vn_name, vm_fixture.vm_name)) - return result - # end verify_fip_in_uve - - @retry(delay=5, tries=3) - def verify_fip_not_in_agent(self, fip, fip_vn_fixture): - for compute_ip in self.inputs.compute_ips: - inspect_h = self.agent_inspect[compute_ip] - vn = inspect_h.get_vna_vn(vn_name=fip_vn_fixture.vn_name, project=self.project_name) - if vn is None: - continue - agent_vrf_objs = inspect_h.get_vna_vrf_objs( - vn_name=fip_vn_fixture.vn_name, project=self.project_name) - agent_vrf_obj = self.get_matching_vrf( - agent_vrf_objs['vrf_list'], fip_vn_fixture.get_vrf_name()) - agent_vrf_id = agent_vrf_obj['ucindex'] - if inspect_h.get_vna_active_route(vrf_id=agent_vrf_id, ip=fip): - self.logger.warn('Route for FIP %s present in Agent %s' % - (fip, compute_ip)) - return False - self.logger.info('Route for FIP %s is removed from agent %s' % - (fip, compute_ip)) - return True - # end verify_fip_not_in_agent - - def get_matching_vrf(self, vrf_objs, vrf_name): - return [x for x in vrf_objs if x['name'] == vrf_name][0] - - def disassoc_and_delete_fip(self, fip_id): - ''' Disassociate and then delete the Floating IP . - Strongly recommeded to call verify_no_fip() after this call - ''' - self.disassoc_floatingip(fip_id) - self.delete_floatingip(fip_id) -# time.sleep(10) - # end disassoc_and_delete_fip - - def disassoc_and_delete_fip_webui(self, vm_id): - self.webui.disassoc_floatingip(self, vm_id) - # end disassoc_and_delete_fip_webui - - def create_floatingips(self, fip_pool_vn_id, count=1): - ''' Creates 1 or more floating ips from a pool. - - ''' - # allocate 'count' number of floating ips - fip_dicts = [] - for i in range(count): - fip_resp = self.create_floatingip(fip_pool_vn_id) - if fip_resp: - fip_dicts.append(fip_resp['floatingip']) - # end for - return fip_dicts - # end create_floatingips - - def create_floatingip(self, fip_pool_vn_id, project_obj=None): - ''' Creates a single floating ip from a pool. - - ''' - if project_obj is None: - project_obj = self.get_project_obj() - fip_resp = self.orch.create_floating_ip(pool_vn_id=fip_pool_vn_id, - project_obj=project_obj, pool_obj=self.fip_pool_obj) - self.logger.debug('Created Floating IP : %s' % str(fip_resp)) - return fip_resp - # end create_floatingip - - def verify_fip_in_api_server(self, fip_id): - ''' Verify floating ip presence and links in API Server - - ''' - cs_fip_obj = self.api_s_inspect.get_cs_fip(fip_id, refresh=True) - if not cs_fip_obj: - return False - self.logger.info('FIP verification passed in API server') - return True - # end - - def verify_fip_not_in_api_server(self, fip_id): - ''' Verify floating ip removed in API Server - ''' - cs_fip_obj = self.api_s_inspect.get_cs_fip(fip_id, refresh=True) - if cs_fip_obj: - return False - self.logger.info('FIP removal verification passed in API server') - return True - # end - - def delete_floatingips(self, fip_obj_list): - ''' Removes floating ips from a pool. Need to pass a floatingIP object-list - - ''' - for i in fip_obj_list: - index = fip_obj_list.index(i) - self.delete_floatingip(fip_obj_list[index]['id']) - # end for - # end delete_floatingips - - def delete_floatingip(self, fip_id): - self.logger.debug('Deleting FIP ID %s' %(fip_id)) - self.orch.delete_floating_ip(fip_id) - # end delete_floatingip - - def assoc_floatingip(self, fip_id, vm_id): - return self.orch.assoc_floating_ip(fip_id, vm_id) - # end assoc_floatingip - - def disassoc_floatingip(self, fip_id): - return self.orch.disassoc_floating_ip(fip_id) - # end - - def assoc_project(self, project, domain='default-domain'): - result = True - self.logger.info('Associting Floting IP with project %s' % (project)) - - # Create the project object - project_fq_name = [domain, project] - self.new_project_obj = self.vnc_lib_h.project_read( - fq_name=project_fq_name) - - # Associate project with floating IP pool - result = self.new_project_obj.add_floating_ip_pool( - self.fip_pool_obj) - self.vnc_lib_h.project_update(self.new_project_obj) - self.new_project_obj = self.vnc_lib_h.project_read( - fq_name=project_fq_name) - return self.new_project_obj - # end assoc_project - - def deassoc_project(self, project, domain='default-domain'): - result = True - self.logger.info('De-associting Floting IP with project %s' % - (project)) - - # Create the project object - project_fq_name = [domain, project] - self.new_project_obj = self.vnc_lib_h.project_read( - fq_name=project_fq_name) - - # Deassociate project with floating IP pool - result = self.new_project_obj.del_floating_ip_pool( - self.fip_pool_obj) - self.vnc_lib_h.project_update(self.new_project_obj) - self.new_project_obj = self.vnc_lib_h.project_read( - fq_name=project_fq_name) - return self.new_project_obj - # end assoc_project - - def cleanUp(self): - super(FloatingIPFixture, self).cleanUp() - self.delete() - # end cleanUp - - def delete(self, verify=False): - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - self.logger.info('Deleting the FIP pool %s' % - (self.pool_name)) - if self.inputs.is_gui_based_config(): - self.webui.delete_floatingip_pool(self) - else: - self.delete_floatingip_pool() - if self.verify_is_run or verify: - assert self.verify_fip_pool_not_in_control_node() - else: - self.logger.info('Skipping deletion of FIP pool %s' % - (self.pool_name)) diff --git a/fixtures/heat_test.py b/fixtures/heat_test.py deleted file mode 100644 index f0760caa0..000000000 --- a/fixtures/heat_test.py +++ /dev/null @@ -1,213 +0,0 @@ -from __future__ import print_function -from tcutils.util import * -import argparse -import logging -import six -import sys -from common.openstack_libs import ks_client as ksclient -import heatclient -from heatclient import client as heat_client -from heatclient.common import utils -from heatclient import exc -from oslo.utils import strutils -logger = logging.getLogger(__name__) -from tcutils.util import get_plain_uuid, get_dashed_uuid -import os -import fixtures -from contrail_fixtures import contrail_fix_ext - - -class HeatFixture(fixtures.Fixture): - - def __init__( - self, - connections, - username, - password, - project_fq_name, - inputs, - cfgm_ip, - openstack_ip): - self.connections = connections - httpclient = None - self.heat_port = '8004' - self.heat_api_version = '1' - self.username = username - self.password = password - self.vnc_lib_h = self.connections.vnc_lib - self.project_obj = self.vnc_lib_h.project_read(fq_name=project_fq_name) - self.project_id = get_plain_uuid(self.project_obj.uuid) - self.cfgm_ip = cfgm_ip - insecure = bool(os.getenv('OS_INSECURE', True)) - self.openstack_ip = openstack_ip - self.inputs = inputs - self.openstack_ip = self.inputs.host_data[self.openstack_ip]['host_ip'] - self.obj = None - self.heat_url = 'http://%s:%s/v1/%s' % ( - self.openstack_ip, self.heat_port, self.project_id) - if not self.inputs.ha_setup: - self.auth_url = os.getenv('OS_AUTH_URL') or \ - 'http://' + openstack_ip + ':5000/v2.0' - else: - self.auth_url = os.getenv('OS_AUTH_URL') or \ - 'http://' + openstack_ip + ':5000/v2.0' - self.kc = ksclient.Client( - username=self.inputs.stack_user, - password=self.inputs.stack_password, - tenant_name=self.inputs.project_name, - auth_url=self.auth_url, - insecure=insecure) - self.logger = self.inputs.logger - # end __init__ - - def setUp(self): - super(HeatFixture, self).setUp() - self.auth_token = self.kc.auth_token - kwargs = { - 'token': self.auth_token, - } - - self.obj = heat_client.Client( - self.heat_api_version, self.heat_url, **kwargs) - # end setUp - - def cleanUp(self): - super(HeatFixture, self).cleanUp() - - def get_handle(self): - return self.obj - # end get_handle - - def list_stacks(self): - stack_list = [] - for i in self.obj.stacks.list(): - stack_list.append(i) - return stack_list - # end list_stacks - - -class HeatStackFixture(fixtures.Fixture): - - def __init__( - self, - connections, - inputs, - stack_name, - project_fq_name, - template=None, - env=None): - self.connections = connections - self.vnc_lib_h = self.connections.vnc_lib - self.project_obj = self.vnc_lib_h.project_read(fq_name=project_fq_name) - self.project_id = get_plain_uuid(self.project_obj.uuid) - self.inputs = inputs - self.stack_name = stack_name - self.template = template - self.logger = self.inputs.logger - self.env = env - self.already_present = False -# end __init__ - - def setUp(self): - super(HeatStackFixture, self).setUp() - fields = {} - fields = {'stack_name': self.stack_name, - 'template': self.template, 'environment': self.env} - self.heat_obj = self.useFixture( - HeatFixture(connections=self.connections, username=self.inputs.username, password=self.inputs.password, - project_fq_name=self.inputs.project_fq_name, inputs=self.inputs, cfgm_ip=self.inputs.cfgm_ip, openstack_ip=self.inputs.openstack_ip)) - self.heat_client_obj = self.heat_obj.obj - for i in self.heat_client_obj.stacks.list(): - if i.stack_name == self.stack_name: - self.logger.info('Stack %s exists. Not creating'%i.stack_name) - self.already_present = True - return i - if self.already_present != True: - stack_obj = self.heat_client_obj.stacks.create(**fields) - self.logger.info('Creating Stack %s' % self.stack_name) - self.wait_till_stack_created(self.stack_name) - return stack_obj - # end create_stack - - def cleanUp(self): - super(HeatStackFixture, self).cleanUp() - do_cleanup = True - self.logger.info('Deleting Stack %s' % self.stack_name) - if self.already_present: - do_cleanup = False - if do_cleanup: - self.heat_obj = self.useFixture( - HeatFixture(connections=self.connections, username=self.inputs.username, password=self.inputs.password, - project_fq_name=self.inputs.project_fq_name, inputs=self.inputs, cfgm_ip=self.inputs.cfgm_ip, openstack_ip=self.inputs.openstack_ip)) - self.heat_client_obj = self.heat_obj.obj - self.heat_client_obj.stacks.delete(self.stack_name) - self.wait_till_stack_is_deleted(self.stack_name) - else: - self.logger.info('Skipping the deletion of Stack %s' %self.stack_name) - # end delete_stack - - def update(self, stack_name, new_parameters): - fields = {} - fields = {'stack_name': self.stack_name, - 'template': self.template, 'environment': {}, - 'parameters': new_parameters} - self.heat_client_obj = self.heat_obj.obj - for i in self.heat_client_obj.stacks.list(): - if i.stack_name == stack_name: - result= True - stack_obj = self.heat_client_obj.stacks.update(i.id, **fields) - self.logger.info('Updating Stack %s' % self.stack_name) - self.wait_till_stack_updated(self.stack_name) - return stack_obj - else: - result= False - assert result, 'Stack %s not seen'%self.stack_name - #end update - - @retry(delay=5, tries=10) - def wait_till_stack_updated(self, stack_name=None): - result = False - for stack_obj in self.heat_obj.list_stacks(): - if stack_obj.stack_name == stack_name: - if stack_obj.stack_status == 'UPDATE_COMPLETE': - self.logger.info( - 'Stack %s updated successfully.' % stack_obj.stack_name) - result = True - break - else: - self.logger.info('Stack %s is in %s state. Retrying....' % ( - stack_obj.stack_name, stack_obj.stack_status)) - return result - # end wait_till_stack_updated - - @retry(delay=5, tries=10) - def wait_till_stack_created(self, stack_name=None): - result = False - for stack_obj in self.heat_obj.list_stacks(): - if stack_obj.stack_name == stack_name: - if stack_obj.stack_status == 'CREATE_COMPLETE': - self.logger.info( - 'Stack %s created successfully.' % stack_obj.stack_name) - result = True - break - else: - self.logger.info('Stack %s is in %s state. Retrying....' % ( - stack_obj.stack_name, stack_obj.stack_status)) - return result - # end wait_till_stack_created - - @retry(delay=5, tries=10) - def wait_till_stack_is_deleted(self, stack_name=None): - result = True - for stack_obj in self.heat_obj.list_stacks(): - if stack_obj.stack_name == stack_name: - result = False - self.logger.info('Stack %s is in %s state. Retrying....' % ( - stack_obj.stack_name, stack_obj.stack_status)) - break - else: - continue - if result == True: - self.logger.info('Stack %s is deleted.' % stack_name) - return result - # end wait_till_stack_is_deleted diff --git a/fixtures/host_endpoint.py b/fixtures/host_endpoint.py deleted file mode 100644 index 99eae7892..000000000 --- a/fixtures/host_endpoint.py +++ /dev/null @@ -1,395 +0,0 @@ -import logging -import fixtures -from fabric.api import env -from fabric.api import run, sudo -from fabric.contrib.files import exists -from fabric.context_managers import settings, hide -import re -import time - -from tcutils.util import retry, search_arp_entry -from tcutils.tcpdump_utils import start_tcpdump_for_intf,\ - stop_tcpdump_for_intf - -class HostEndpointFixture(fixtures.Fixture): - - ''' HostEndpointFixture sets up a namespace (say ns1) - - Connection will be of the form - (Physical network)----p1p2(brns1)ovsns1tap1----tap1(ns1) - - - openvswitch is required on host_ip to act as a bridge - arping and vlan packages are also required - - ''' - - def __init__(self, - host_ip, - namespace, - username='root', - password='c0ntrail123', - interface='p1p2', - ns_intf='tap1', - ns_mac_address=None, - ns_ip_address=None, - ns_netmask=None, - ns_gateway=None, - connections=None, - vlan_id=None, - tor_name = None, - ): - self.host_ip = host_ip - self.username = username - self.password = password - self.phy_interface = interface - self.namespace = namespace - self.identifier = '%s-%s' % (host_ip, namespace) - self.bridge = 'br%s' % (interface) - self.ns_intf = ns_intf - self.bridge_intf = '%s%s%s' % (interface, namespace, self.ns_intf) - self.ns_mac_address = ns_mac_address - self.ns_ip_address = ns_ip_address - self.ns_netmask = ns_netmask - self.ns_gateway = ns_gateway - self.vlan_id = vlan_id - if vlan_id: - self.interface = interface + '.' + str(vlan_id) - else: - self.interface = interface - - if connections: - self.logger = connections.inputs.logger - self.connections = connections - else: - self.logger = logging.getLogger(__name__) - - - self.name = '[%s-%s]' % (self.bridge, self.namespace) - self.tor_name = tor_name - # end __init__ - - def ovs_vsctl(self, args): - output = None - if exists('/var/run/openvswitch/db-%s.sock ' % (self.tor_name)): - prefix = '--db=unix:/var/run/openvswitch/db-%s.sock ' % (self.tor_name) - else: - prefix = '' - args = prefix + args - output = run('ovs-vsctl %s' % (args)) - return output - # end ovs_vsctl - - def add_vlan_config(self): - output = run('ifconfig | grep "^%s "' % (self.interface)) - if not self.interface in output: - run('vconfig add %s %s' % (self.phy_interface, self.vlan_id)) - # end add_vlan_config - - def delete_vlan_config(self): - br_ports = self.ovs_vsctl('list-ports %s | grep "^%s$"' % (self.bridge, - self.interface)) - if br_ports: - # It means that some other links are present on the bridge. - # Maybe some other ns. Do not remove the vlan config - pass - else: - run('vconfig rem %s' % (self.interface)) - # end delete_vlan_config - - def add_bridge(self, bridge=None): - ''' It is assumed that if the bridge is created, - the corresponding uplink interface(self.interface) - from the bridge is also present. - ''' - if not bridge: - bridge = self.bridge - output = self.ovs_vsctl('list-br | grep "^%s$"' % (bridge)) - if output: - # bridge is already present - pass - else: - self.ovs_vsctl('add-br %s' % (bridge)) - time.sleep(1) - run('ip link set %s up' % (bridge)) - self.ovs_vsctl('set bridge %s stp_enable=false' % (bridge)) - time.sleep(1) - self.ovs_vsctl('add-port %s %s' % (bridge, self.interface)) - time.sleep(1) - # end add_bridge - - def delete_bridge(self, bridge=None): - if not bridge: - bridge = self.bridge - # Ignore the uplink intf towards the ToR - output = self.ovs_vsctl('list-ports %s | grep -v "^%s$"' % (bridge, - self.interface)) - if output: - # There are ports possibly from other fixtures - # Dont delete - pass - else: - self.ovs_vsctl('del-port %s %s' % (self.bridge, self.interface)) - time.sleep(1) - self.ovs_vsctl('del-br %s' % (self.bridge)) - time.sleep(1) - # end delete_bridge - - - def setUp(self): - super(HostEndpointFixture, self).setUp() - self.logger.info('Setting up namespace %s on BMS host %s' % ( - self.namespace, self.host_ip)) - with settings( - host_string='%s@%s' % (self.username, self.host_ip), - password=self.password, - warn_only=True, abort_on_prompts=False): - if self.vlan_id: - self.add_vlan_config() - - run('ip netns add %s' % (self.namespace)) - time.sleep(1) - self.add_bridge() - - run('ip link add %s type veth peer name %s' % (self.ns_intf, - self.bridge_intf)) - self.ovs_vsctl('add-port %s %s' % (self.bridge, self.bridge_intf)) - time.sleep(1) - run('ip link set netns %s %s' % (self.namespace, self.ns_intf)) - time.sleep(1) - if self.ns_mac_address: - self.set_interface_mac(self.ns_mac_address) - time.sleep(1) - if self.ns_ip_address: - self.set_interface_ip(self.ns_ip_address, self.ns_netmask, - gateway=self.ns_gateway) - run('ip link set dev %s up' % (self.bridge_intf)) - time.sleep(1) - run('ip link set dev %s up' % (self.interface)) - time.sleep(1) - run('ip netns exec %s ip link set dev %s up' % (self.namespace, - self.ns_intf)) - run('ip netns exec %s ifconfig lo up' % (self.namespace)) - time.sleep(1) - - self.info = self.get_interface_info() - # end setUp - - def cleanUp(self): - super(HostEndpointFixture, self).cleanUp() - self.logger.info('Deleting namespace %s on BMS host %s' % ( - self.namespace, self.host_ip)) - with settings( - host_string='%s@%s' % (self.username, self.host_ip), - password=self.password, - warn_only=True, abort_on_prompts=False): - self.ovs_vsctl('del-port %s %s' % (self.bridge, self.bridge_intf)) - time.sleep(1) - if self.vlan_id: - self.delete_vlan_config() - self.delete_bridge() - run('ip netns exec %s dhclient -r -v tap1' % (self.namespace)) - time.sleep(1) - run('ip netns exec %s ip link delete tap1' % (self.namespace)) - time.sleep(1) - run('ip netns pids %s | xargs kill -9 ' % (self.namespace)) - time.sleep(1) - run('ip netns delete %s' % (self.namespace)) - time.sleep(1) - # end cleanUp - - def run_cmd(self, cmd, pty=True, timeout=None, as_sudo=False): - self.logger.debug("Running Command on namespace %s-%s: (%s)" % ( - self.host_ip, self.namespace, cmd)) - if not timeout: - timeout = env.timeout - with settings( - host_string='%s@%s' % (self.username, self.host_ip), - password=self.password, - warn_only=True, abort_on_prompts=False, - timeout=timeout): - cmd = 'ip netns exec %s %s' % (self.namespace, cmd) - if as_sudo: - output = sudo('%s' % (cmd), pty=pty) - else: - output = run('%s' % (cmd), pty=pty) - self.logger.debug(output) - return output - # end run_cmd - - def set_interface_mac(self, mac, interface=None): - if not interface: - interface = self.ns_intf - if not mac: - self.logger.debug('ns %s : No MAC to set on interface %s' % ( - self.namespace, interface)) - return - - with settings( - host_string='%s@%s' % (self.username, self.host_ip), - password=self.password, - warn_only=True, abort_on_prompts=False): - run('ip netns exec %s ip link set %s address %s' % ( - self.namespace, self.ns_intf, mac)) - - def set_interface_ip(self, ip, netmask, gateway=None, interface=None): - if not interface: - interface = self.ns_intf - if not ip: - self.logger.debug('ns %s : No IP to set on interface %s' % ( - self.namespace, interface)) - return - with settings( - host_string='%s@%s' % (self.username, self.host_ip), - password=self.password, - warn_only=True, abort_on_prompts=False): - run('ip netns exec %s ifconfig %s %s netmask %s' % ( - self.namespace, self.ns_intf, ip, netmask)) - if gateway: - run('ip netns exec %s route add default gw %s' % (self.namespace, - gateway)) - # end set_interface_ip - - def get_interface_info(self, interface=None): - '''Returns interface info as a dict from ifconfig output - Ex : - info = { 'up' : True, - 'hwaddr' : '00:00:00:00:00:01', - 'inet_addr': '10.1.1.10' - } - ''' - if not interface: - interface = self.ns_intf - info = {'up': False, - 'hwaddr': None, - 'inet_addr': None} - with settings( - host_string='%s@%s' % (self.username, self.host_ip), - password=self.password, - warn_only=True, abort_on_prompts=False): - output = run('ip netns exec %s ifconfig %s' % (self.namespace, - interface)) - info['hwaddr'] = re.search(r'Hwaddr ([:0-9a-z]*)', output, - re.M | re.I).group(1) - s_obj = re.search(r'inet addr:([\.0-9]*)', - output, re.M | re.I) - if s_obj: - info['inet_addr'] = s_obj.group(1) - if 'UP ' in output: - info['up'] = True - return info - # end get_interface_info - - def run_dhclient(self, interface=None, timeout=200, update_dns=False): - if not interface: - interface = self.ns_intf - self.run_cmd('ifconfig %s 0.0.0.0' % (interface)) - output = self.run_cmd('dhclient -r -v %s' % (interface)) - # Disable updating of resolv.conf - if not update_dns: - output = self.run_cmd('resolvconf --disable-updates') - output = self.run_cmd('timeout %s dhclient -v %s' % ( - timeout, interface), timeout=20) - self.logger.info('Dhcp transaction : %s' % (output)) - if not update_dns: - self.run_cmd('resolvconf --enable-updates') - - if not 'bound to' in output: - self.logger.warn('DHCP did not complete !!') - return (False, output) - else: - self.info = self.get_interface_info() - - return (True, output) - # end run_dhclient - - def arping(self, ip, interface=None): - if not interface: - interface = self.ns_intf - cmd = 'arping -i %s -c 1 -r %s' % (interface, ip) - output = self.run_cmd(cmd) - self.logger.debug('On %s, arping to %s returned %s' % ( - self.identifier, ip, output)) - return (output.succeeded, output) - - def ping(self, ip, other_opt='', size='56', count='5'): - src_ip = self.info['inet_addr'] - cmd = 'ping -s %s -c %s %s %s' % ( - str(size), str(count), other_opt, ip) - output = self.run_cmd(cmd) - expected_result = ' 0% packet loss' - try: - if expected_result not in output: - self.logger.warn("Ping to IP %s from host %s failed" % - (ip, src_ip)) - return False - else: - self.logger.info('Ping to IP %s from host %s passed' % - (ip, src_ip)) - return True - except Exception as e: - self.logger.warn("Got exception in ping from host ns ip %s: :%s" % ( - src_ip, e)) - return False - return False - # end ping - - @retry(delay=1, tries=10) - def ping_with_certainty(self, ip, other_opt='', size='56', count='5', - expectation=True): - retval = self.ping(ip, other_opt=other_opt, - size=size, - count=count) - return ( retval== expectation ) - # end ping_with_certainty - - def get_arp_entry(self, ip_address=None, mac_address=None): - output = self.run_cmd('arp -an') - return search_arp_entry(output, ip_address, mac_address) - # end get_arp_entry - - def get_gateway_ip(self): - cmd = '''netstat -anr |grep ^0.0.0.0 | awk '{ print $2 }' ''' - gw_ip = self.run_cmd(cmd) - return gw_ip - # end get_gateway_ip - - def get_gateway_mac(self): - return self.get_arp_entry(ip_address=self.get_gateway_ip())[1] - - def clear_arp(self, all_entries=True, ip_address=None, mac_address=None): - if ip_address or mac_address: - (output, ip, mac) = self.get_arp_entry(ip_address, mac_address) - cmd = 'arp -d %s' % (ip_address) - elif all_entries: - cmd = 'ip -s -s neigh flush all' - - output = self.run_cmd(cmd) - return output - # end clear_arp - - def start_tcpdump(self, interface=None, filters=''): - if not interface: - interface = self.bridge_intf - (session, pcap) = start_tcpdump_for_intf(self.host_ip, self.username, - self.password, interface, filters, self.logger) - return (session, pcap) - - def stop_tcpdump(self, session, pcap): - stop_tcpdump_for_intf(session, pcap, self.logger) - - def add_static_arp(self, ip, mac): - self.run_cmd('arp -s %s %s' % (ip, mac), as_sudo=True) - self.logger.info('Added static arp %s:%s on BMS %s' % (ip, mac, - self.identifier)) - -if __name__ == "__main__": - host_ip = '10.204.217.16' - # h_f = HostEndpointFixture(host_ip,'ns1', interface='p1p2', - # ns_mac_address='fe:d3:bc:f0:ac:05', ns_ip_address='10.1.1.7', - # ns_netmask='255.255.255.0') - h_f = HostEndpointFixture(host_ip, 'ns1', interface='p1p2', - ns_mac_address='fe:d3:bc:f0:ac:05', vlan_id=5) - h_f.setUp() - h_f.run_dhclient() - h_f.cleanUp() diff --git a/fixtures/ipam_test.py b/fixtures/ipam_test.py deleted file mode 100644 index 6f15005d1..000000000 --- a/fixtures/ipam_test.py +++ /dev/null @@ -1,252 +0,0 @@ -import fixtures -from vn_test import * -from project_test import * -from tcutils.util import * -from vnc_api.vnc_api import * -from netaddr import * -from contrail_fixtures import * -import inspect -from common.policy import policy_test_utils -try: - from webui_test import * -except ImportError: - pass - -class IPAMFixture(fixtures.Fixture): - - def __init__(self, name=None, connections=None, project_obj=None, - ipamtype=IpamType("dhcp"), vdns_obj=None, uuid=None): - self.name = name - self.connections = connections or project_obj.connections - self.inputs = self.connections.inputs - self.logger = self.connections.logger - self.api_s_inspect = self.connections.api_server_inspect - self.ipamtype = ipamtype - self.already_present = False - self.cn_inspect = self.connections.cn_inspect - self.agent_inspect = self.connections.agent_inspect - self.project_name = self.connections.project_name - self.vnc = self.connections.get_vnc_lib_h() - self.vdns_obj = vdns_obj - self.ipam_id = uuid - self.verify_is_run = False - self.ri_name = None - self.fq_name = [self.connections.domain_name, self.project_name, self.name] - if self.inputs.verify_thru_gui(): - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.webui = WebuiTest(self.connections, self.inputs) - if self.inputs.orchestrator == 'vcenter': - # Overide for vcenter, IP allocation is in vcenter - # represented as 'vCenter-ipam' in contrail-cfgm - self.name = 'vCenter-ipam' - # end __init__ - - def read(self): - if self.ipam_id: - self.obj = self.vnc.network_ipam_read(id=self.ipam_id) - self.fq_name = self.obj.get_fq_name() - self.name = self.fq_name[-1] - self.project_name = self.fq_name[-2] - self.logger.info('Found IPAM %s(%s)'%(self.fq_name, self.ipam_id)) - self.already_present = True - - def setUp(self): - super(IPAMFixture, self).setUp() - self.create() - # end setup - - def create(self): - if self.ipam_id: - return self.read() - if not self.name: - self.fq_name = NetworkIpam().get_fq_name() - self.name = str(self.fq_name[2]) - - for ipam in self.vnc.network_ipams_list()['network-ipams']: - if self.name == ipam['fq_name'][2] and self.project_name == ipam['fq_name'][1]: - self.fq_name = ipam['fq_name'] - self.already_present = True - self.ipam_id = ipam['uuid'] - self.obj = NetworkIpam(name=self.name, parent_type='project', - fq_name=self.fq_name, - network_ipam_mgmt=self.ipamtype) - self.logger.info('IPAM %s already present.Not creating it' % - self.name) - break - if not self.already_present: - self.obj = NetworkIpam(name=self.name, parent_type='project', - fq_name=self.fq_name, - network_ipam_mgmt=self.ipamtype) - if self.inputs.is_gui_based_config(): - self.webui.create_ipam(self) - else: - self.vnc.network_ipam_create(self.obj) - for ipam in self.vnc.network_ipams_list()['network-ipams']: - if self.name == ipam['fq_name'][2] and self.project_name == ipam['fq_name'][1]: - self.fq_name = ipam['fq_name'] - self.ipam_id = ipam['uuid'] - break - self.obj = self.vnc.network_ipam_read(fq_name=self.fq_name) - if self.vdns_obj: - self.obj.add_virtual_DNS(self.vdns_obj) - if self.ipamtype: - self.old_ipam_type = self.obj.get_network_ipam_mgmt() - self.obj.set_network_ipam_mgmt(self.ipamtype) - self.vnc.network_ipam_update(self.obj) - - def getObj(self): - return self.obj - - def update_vdns(self, vdns_obj): - self.obj = self.vnc.network_ipam_read(id=self.ipam_id) - vdns_server = IpamDnsAddressType(virtual_dns_server_name=vdns_obj.get_fq_name_str()) - ipam_mgmt_obj = IpamType(ipam_dns_method='virtual-dns-server', ipam_dns_server=vdns_server) - self.obj.set_network_ipam_mgmt(ipam_mgmt_obj) - self.obj.add_virtual_DNS(vdns_obj) - self.vnc.network_ipam_update(self.obj) - - def get_uuid(self): - return self.ipam_id - - def get_fq_name(self): - return self.fq_name - - def verify_on_setup(self): - result = True - if not self.verify_ipam_in_api_server(): - result = result and False - self.logger.error( - "One or more verifications in API Server for IPAM: %s failed" % (self.name)) - if not self.verify_ipam_in_control_nodes(): - result = result and False - self.logger.error( - "One or more verifications in Control-nodes for IPAM: %s failed" % (self.name)) - self.verify_is_run = True - return result - # end verify - - def cleanUp(self): - super(IPAMFixture, self).cleanUp() - self.delete() - - def delete(self, verify=False): - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if self.already_present: - do_cleanup = False - if do_cleanup: - if self.inputs.is_gui_based_config(): - self.webui.delete_ipam(self) - else: - self.vnc.network_ipam_delete(self.fq_name) - if self.verify_is_run or verify: - assert self.verify_ipam_not_in_api_server() - assert self.verify_ipam_not_in_control_nodes() - else: - if self.vdns_obj: - self.obj.del_virtual_DNS(self.vdns_obj) - if self.ipamtype: - self.obj.set_network_ipam_mgmt(self.old_ipam_type) - self.vnc.network_ipam_update(self.obj) - self.logger.info('Skipping the deletion of IPAM %s' % self.fq_name) - - # end cleanUp - - @retry(delay=5, tries=3) - def verify_ipam_in_api_server(self): - """ Checks for IPAM:in API Server. - - False If IPAM Name is not found - False If all Subnet prefixes are not found - """ - self.api_s_vn_obj = self.api_s_inspect.get_cs_ipam( - project=self.project_name, ipam=self.name, refresh=True) - if not self.api_s_vn_obj: - self.logger.warn("IPAM %s is not found in API-Server" % - (self.name)) - return False - if self.api_s_vn_obj['network-ipam']['uuid'] != self.ipam_id: - self.logger.warn("IPAM Object ID %s not found in API-Server" % - (self.ipam_id)) - return False - self.logger.info("Verifications in API Server for IPAM: %s passed" % - (self.name)) - return True - # end Verify_ipam_in_api_server - - @retry(delay=5, tries=3) - def verify_ipam_not_in_api_server(self): - '''Verify that IPAM is removed in API Server. - - ''' - if self.inputs.orchestrator == 'vcenter': - # vcenter IPAM object is never deleted - return True - try: - if self.vnc.network_ipam_read(self.fq_name): - self.logger.warn("IPAM %s is still found in API-Server" % - (self.name)) - return False - except NoIdError: - self.logger.info("IPAM: %s is not found in API Server" % - (self.name)) - return True - # end verify_ipam_not_in_api_server - - @retry(delay=5, tries=3) - def verify_ipam_in_control_nodes(self): - # Checks for IPAM details in Control-nodes. - fqname = str(":".join(self.fq_name)) - self.ri_name = fqname + ':' + self.name - for cn in self.inputs.bgp_ips: - cn_config_vn_obj = self.cn_inspect[cn].get_cn_config_ipam( - ipam=self.name, project=self.project_name) - if not cn_config_vn_obj: - self.logger.warn( - 'Control-node %s does not have IPAM %s info ' % - (cn, self.name)) - return False - self.logger.debug("Control-node %s : IPAM object is : %s" % - (cn, cn_config_vn_obj)) - if fqname not in cn_config_vn_obj['node_name']: - self.logger.warn( - 'IFMAP View of Control-node is not having the IPAM detail of %s' % (fqname)) - return False - self.logger.info('Verifications in Control node for IPAM: %s passed' % - (self.name)) - return True - # end verify_ipam_in_control_nodes - - @retry(delay=5, tries=10) - def verify_ipam_not_in_control_nodes(self): - # Verify that IPAM details are not in any Control-node - if self.inputs.orchestrator == 'vcenter': - # vcenter IPAM object is never deleted - return True - fqname = str(":".join(self.fq_name)) - self.ri_name = fqname + ':' + self.name - result = True - for cn in self.inputs.bgp_ips: - cn_object = self.cn_inspect[ - cn].get_cn_routing_instance(ri_name=self.ri_name) - if cn_object: - self.logger.warn( - "Routing instance for IPAM %s is still found in Control-node %s" % (self.name, cn)) - result = result and False - # end for - if self.cn_inspect[cn].get_cn_config_ipam(ipam=self.name, project=self.project_name): - self.logger.warn("Control-node config DB still has IPAM %s" % - (self.name)) - result = result and False - - if result: - self.logger.info("IPAM:%s is not found in control node" % - (self.name)) - return result - # end verify_ipam_not_in_control_nodes - -# end IPAMFixture diff --git a/fixtures/keystone_tests.py b/fixtures/keystone_tests.py deleted file mode 100755 index 55a23ab0f..000000000 --- a/fixtures/keystone_tests.py +++ /dev/null @@ -1,178 +0,0 @@ -import os -from common.openstack_libs import ks_client as keystone_client -from common.openstack_libs import ks_exceptions -from common.openstack_libs import keystoneclient -from common import log_orig as logging -from tcutils.util import retry, get_dashed_uuid - -LOG = logging.getLogger(__name__) - -class KeystoneCommands(): - - '''Handle all tenant managements''' - - def __init__(self, username=None, password=None, tenant=None, auth_url=None, token=None, endpoint=None, insecure=True): - - if token: - self.keystone = keystoneclient.Client( - token=token, endpoint=endpoint) - else: - self.keystone = keystone_client.Client( - username=username, password=password, tenant_name=tenant, auth_url=auth_url, - insecure=insecure) - - def get_handle(self): - return self.keystone - - def get_role_dct(self, role_name): - all_roles = self.roles_list() - for x in all_roles: - if (x.name == role_name): - return x - return None - - def get_user_dct(self, user_name): - all_users = self.user_list() - for x in all_users: - if (x.name == user_name): - return x - return None - - def get_tenant_dct(self, tenant_name): - all_tenants = self.tenant_list() - for x in all_tenants: - if (x.name == tenant_name): - return x - return None - - def create_project(self, name): - return get_dashed_uuid(self.keystone.tenants.create(name).id) - - def delete_project(self, name, obj=None): - if not obj: - obj = self.keystone.tenants.find(name=name) - self.keystone.tenants.delete(obj) - - def create_tenant_list(self, tenants=[]): - for tenant in tenants: - return_vlaue = self.create_project(tenant) - - def delete_tenant_list(self, tenants=[]): - for tenant in tenants: - self.delete_project(tenant) - - def update_tenant(self, tenant_id, tenant_name=None, description=None, - enabled=None): - - self.keystone.tenants.update( - tenant_id, tenant_name=tenant_name, description=description, enabled=enabled) - - def add_user_to_tenant(self, tenant, user, role): - ''' inputs have to be string ''' - user = self.get_user_dct(user) - role = self.get_role_dct(role) - tenant = self.get_tenant_dct(tenant) - self._add_user_to_tenant(tenant, user, role) - - def _add_user_to_tenant(self, tenant, user, role): - ''' inputs could be id or obj ''' - try: - self.keystone.tenants.add_user(tenant, user, role) - except ks_exceptions.Conflict as e: - LOG.logger.info(str(e)) - - def remove_user_from_tenant(self, tenant, user, role): - - user = self.get_user_dct(user) - role = self.get_role_dct(role) - tenant = self.get_tenant_dct(tenant) - self.keystone.tenants.remove_user(tenant, user, role) - - def tenant_list(self, limit=None, marker=None): - - return self.keystone.tenants.list() - - def create_roles(self, role): - - self.keystone.roles.create(role) - - def delete_roles(self, role): - - role = self.get_role_dct(role) - self.keystone.roles.delete(role) - - def add_user_role(self, user_name, role_name, tenant_name=None): - - user = self.get_user_dct(user_name) - role = self.get_role_dct(role_name) - if tenant_name: - tenant = self.get_tenant_dct(tenant_name) - - self.keystone.roles.add_user_role(user, role, tenant) - - def get_role_for_user(self, user, tenant_name=None): - - user = self.get_user_dct(user) - if tenant_name: - tenant = self.get_tenant_dct(tenant_name) - return self.keystone.roles.roles_for_user(user, tenant) - - def remove_user_role(self, user, role, tenant=None): - - user = self.get_user_dct(user) - role = self.get_role_dct(role) - if tenant: - tenant = self.get_tenant_dct(tenant) - - self.keystone.roles.remove_user_role(user, role, tenant) - - def roles_list(self): - - return self.keystone.roles.list() - - def create_user(self, user, password, email='', tenant_name=None, enabled=True): - - tenant_id = self.get_tenant_dct(tenant_name).id - self.keystone.users.create(user, password, email, tenant_id, enabled) - - @retry(delay=3, tries=5) - def delete_user(self, user): - - user = self.get_user_dct(user) - try: - self.keystone.users.delete(user) - return True - except ks_exceptions.ClientException, e: - # TODO Remove this workaround - if 'Unable to add token to revocation list' in str(e): - LOG.logger.warn('Exception %s while deleting user' % ( - str(e))) - return False - # end delete_user - - def update_user_tenant(self, user, tenant): - - user = self.get_user_dct(user) - tenant = self.get_tenant_dct(tenant) - self.keystone.users.update_tenant(user, tenant) - - def user_list(self, tenant_id=None, limit=None, marker=None): - - return self.keystone.users.list() - - def services_list(self, tenant_id=None, limit=None, marker=None): - return self.keystone.services.list() - - def get_id(self): - return get_dashed_uuid(self.keystone.auth_tenant_id) - - def get_project_id(self, name): - try: - obj = self.keystone.tenants.find(name=name) - return get_dashed_uuid(obj.id) - except ks_exceptions.NotFound: - return None - - def get_endpoint(self, service): - ''' Given the service-name return the endpoint ip ''' - return self.keystone.service_catalog.get_urls(service_type=service) diff --git a/fixtures/lbaas_fixture.py b/fixtures/lbaas_fixture.py deleted file mode 100644 index 023581e49..000000000 --- a/fixtures/lbaas_fixture.py +++ /dev/null @@ -1,1041 +0,0 @@ -import vnc_api_test -from compute_node_test import ComputeNodeFixture -from tcutils.util import get_random_name, retry - -custom_attributes_dict = { - 'max_conn': 'maxconn', - 'max_conn_rate': 'maxconnrate', - 'max_sess_rate': 'maxsessrate', - 'max_ssl_conn': 'maxsslconn', - 'max_ssl_rate': 'maxsslrate', - 'ssl_ciphers': 'ssl-default-bind-ciphers', - 'tune_http_max_header': 'tune.http.maxhdr', - 'tune_ssl_max_record': 'tune.ssl.maxrecord', - 'server_timeout': 'timeout server', - 'client_timeout': 'timeout client', - 'connect_timeout': 'timeout connect', - 'http_server_close': 'option http-server-close', - 'rate_limit_sessions': 'rate-limit sessions', -} - -class LBaasFixture(vnc_api_test.VncLibFixture): - - '''Fixture to handle LBaas object - - Optional: - :param name : name of the LBaas Pool (random name) - :param uuid : UUID of the LBaas Pool - :param network_id : uuid of the network on which pool belongs to - :param members: dict of list of members vmi_ids or ip address - {'vmis': ['...', '...'], 'address': ['...'], 'vms': ['...']} - :param custom_attr : dict of key value pairs (Check custom_attributes_dict - @ https://github.com/Juniper/contrail-controller/blob/master/src/vnsw/opencontrail-vrouter-netns/opencontrail_vrouter_netns/haproxy_validator.py - for supported KV pairs) - :param api_type : one of 'neutron'(default) or 'contrail' - :param lb_method : LB method (ROUND_ROBIN,LEAST_CONNECTIONS,SOURCE_IP) - :param protocol : Protocol one of HTTP, TCP or HTTPS - :param port : L4 Port number - :param vip_name : VIP name (vip-$(LB_Name)) - :param vip_net_id : vip network id, mandatory to create vip - :param vip_protocol : Protocol one of HTTP, TCP or HTTPS - :param vip_port : L4 Port number - :param healthmonitors : List of dicts - id : healthmonitor id in case its precreated - or, the below set of keys - probe_type : Health monitor probe type (PING,TCP,HTTP,HTTPS) - delay : Health monitor - delay in secs between probes - max_retries : Health monitor - max no of retries - timeout : Health monitor - timeout for each probe, must be < delay - :param fip_id : UUID of FloatingIP object - :param fip_net_id : UUID of the FloatingIP network object - - Inherited optional parameters: - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - - ''' - - def __init__(self, **kwargs): - super(LBaasFixture, self).__init__(self, **kwargs) - self.name = kwargs.get('name', get_random_name('LB')) - self.uuid = kwargs.get('uuid', None) - self.network_id = kwargs.get('network_id', None) - self.lb_method = kwargs.get('lb_method', 'ROUND_ROBIN') - self.members = kwargs.get('members', None) - self.protocol = kwargs.get('protocol', 'HTTP') - self.port = kwargs.get('port', None) - self.vip_net_id = kwargs.get('vip_net_id', None) - self.vip_name = kwargs.get('vip_name', 'vip-'+self.name) - self.vip_protocol = kwargs.get('vip_protocol', self.protocol) - self.vip_port = kwargs.get('vip_port', self.port) - self.healthmonitors = kwargs.get('healthmonitors', list()) - self.fip_id = kwargs.get('fip_id', None) - self.fip_net_id = kwargs.get('fip_net_id', None) - self.api_type = kwargs.get('api_type', 'neutron') - self.custom_attr = kwargs.get('custom_attr', dict()) - self.already_present = False - self.member_ips = list() - self.member_ids = list() - self.deleted_member_ids = list() - self.is_vip_active = False - self.is_fip_active = False - self.vip_ip = None - self.vip_id = None - self.fip_id = None - self.fip_ip = None - self.hmons = dict() - - # temporary place till vnc_api_test is re-done - super(LBaasFixture, self).setUp() - self.network_h = self.get_network_handle() - self.vnc_api_h = self.get_handle() - - if self.uuid: - self.read(self.uuid) - self.parent_fq_name = [self.domain, self.project_name] - self.fq_name = self.parent_fq_name + [self.name] - self.parent_type = 'project' - - def setUp(self): - super(LBaasFixture, self).setUp() - self.create() - - def cleanUp(self): - super(LBaasFixture, self).cleanUp() - self.inputs.fixture_cleanup = 'force' - if (self.already_present or self.inputs.fixture_cleanup == 'no') and\ - self.inputs.fixture_cleanup != 'force': - self.logger.info('Skipping deletion of Load Balancer %s :' - %(self.fq_name)) - else: - self.delete() - - def get_network_handle(self): - if self.api_type == 'contrail': - return self.get_handle() - else: - return self.get_neutron_handle() - - def read(self, uuid): - self.logger.debug('Fetching info about Load Balancer Pool %s'%uuid) - self.obj= self.network_h.get_lb_pool(uuid) - if not self.obj: - raise Exception('load balancer pool with id %s not found'%uuid) - self.uuid = self.obj.get('id', None) or getattr(self.obj, 'uuid', None) - self.name = self.obj.get('name', None) or getattr(self.obj, 'name',None) - self.protocol = self.obj.get('protocol', None) - self.lb_method = self.obj.get('lb_method', None) - self.network_id = self.network_h.get_vn_of_subnet(\ - self.obj.get('subnet_id', None)) - vip_id = self.obj.get('vip_id', None) - if vip_id: - self.vip_id = vip_id - self.vip = self.network_h.show_vip(self.vip_id) - self.vip_port = self.vip.get('protocol_port', None) - self.vip_protocol = self.vip.get('protocol', None) - self._populate_vars_from_vip_obj() - fip = self.network_h.list_floatingips(port_id=self.vip_port_id) - if fip: - self.fip_id = fip[0]['id'] - fip = self.network_h.get_floatingip(self.fip_id) - self.fip_net_id = fip['floating_network_id'] - self.fip_ip = fip['floating_ip_address'] - self.member_ids = self.obj.get('members', []) - self.member_ips = [self.network_h.show_lb_member(x)['address'] - for x in self.member_ids] - if self.member_ids: - self.port = self.network_h.show_lb_member( - self.member_ids[0])['protocol_port'] - health_monitors = self.obj.get('health_monitors', []) - for hmon in health_monitors: - self.hmons[hmon] = self.network_h.get_health_monitor(hmon) - custom_attr_list = self.obj.get('custom_attributes', []) - for attr_dict in custom_attr_list and custom_attr_list[0]: - self.custom_attr.update({k:v for k,v in attr_dict.iteritems()}) - self.logger.info('LB %s, members %s, vip %s, fip %s, protocol %s, port ' - '%s healthmonitors %s'%(self.name, self.member_ips, - self.vip_ip, self.fip_ip, self.protocol, - self.port, self.hmons.keys())) - - def create(self): - try: - self.obj = self.network_h.get_lb_pool(name=self.name) - self.uuid = self.obj.get('id') - self.read(self.uuid) - self.already_present = True - self.logger.info('Load Balancer %s is already present'%self.name) - except: - self.logger.info('Creating Load Balancer %s'%self.name) - self.obj = self.network_h.create_lb_pool(self.name, - lb_method=self.lb_method, - protocol=self.protocol, - network_id=self.network_id, - custom_attr=self.custom_attr) - self.uuid = self.obj.get('id', None) or getattr(self, 'id', None) - if self.vip_net_id and not self.vip_id: - self.check_and_create_vip() - if self.members: - for vmi in self.members.get('vmis', []): - self.create_member(vmi=vmi) - for vm in self.members.get('vms', []): - self.create_member(vm=vm) - for address in self.members.get('address', []): - self.create_member(address=address) - for hmon in self.healthmonitors: - self.create_hmon(hmon) - if self.vip_id and (self.fip_net_id or self.fip_id): - self.create_fip_on_vip() - self.logger.info('LoadBalancer: %s, members: %s, vip: %s, fip:%s ' - 'hmons: %s'%(self.name, self.member_ips, self.vip_ip, - self.fip_ip, self.hmons.keys())) - - def create_fip_on_vip(self, fip_net_id=None, fip_id=None): - if not self.is_vip_active: - raise Exception('LB %s doesnt have vip set'%self.uuid) - fip_net_id = fip_net_id or self.fip_net_id - fip_id = fip_id or self.fip_id - if fip_id: - fip = self.network_h.assoc_floatingip(fip_id, - self.vip_port_id)['floatingip'] - elif fip_net_id: - fip = self.network_h.create_floatingip(fip_net_id, - port_id=self.vip_port_id)['floatingip'] - self.fip_ip = fip['floating_ip_address'] - self.fip_id = fip['id'] - self.fip_net_id = fip['floating_network_id'] - self.is_fip_active = True - self.logger.info('Assoc VIP %s with FIP %s'%(self.vip_ip, self.fip_ip)) - - def delete_fip_on_vip(self): - self.network_h.delete_floatingip(self.fip_id) - self.is_fip_active = False - self.logger.info('Disassoc VIP %s with FIP %s'%(self.vip_ip, - self.fip_ip)) - - def create_hmon(self, hmon_dict): - if hmon_dict.get('id', None): - if hmon_dict['id'] not in self.hmons.keys(): - hmon_obj = self.network_h.get_health_monitor(hmon_dict['id']) - else: - hmon_obj = self.hmons[hmon_dict['id']] - else: - hmon_obj = self.network_h.create_health_monitor( - hmon_dict['delay'], - hmon_dict['max_retries'], - hmon_dict['probe_type'], - hmon_dict['timeout']) - assert hmon_obj, 'Create Healthmonitor failed' - self.logger.info('Created Health Monitor %s'%hmon_obj['id']) - self.hmons[hmon_obj['id']] = hmon_obj - self.associate_hmon(hmon_obj['id']) - - def associate_hmon(self, hmon_id): - self.network_h.associate_health_monitor(self.uuid, hmon_id) - - def delete_hmon(self, hmon_id): - self.disassociate_hmon(hmon_id) - self.logger.info('Deleting Health Monitor %s'%hmon_id) - self.network_h.delete_health_monitor(hmon_id) - self.hmons.pop(hmon_id) - - def disassociate_hmon(self, hmon_id): - self.network_h.disassociate_health_monitor(self.uuid, hmon_id) - - def create_member(self, address=None, vmi=None, vm=None, port=None): - port = port or self.port - if vm: - vm_obj = self.connections.orch.get_vm_by_id(vm) - address = self.connections.orch.get_vm_ip(vm_obj)[0] - if vmi: - address = self.network_h.get_port_ips(vmi)[0] - if not port: - raise Exception('Protocol port is not defined') - if address not in self.member_ips: - self.logger.info('Creating LB Member %s'%address) - obj = self.network_h.create_lb_member(address, port, self.uuid) - self.member_ids.append(obj.get('id')) - self.member_ips.append(address) - return obj.get('id') - - def delete_member(self, member_id=None, address=None, vmi=None, vm=None): - if not member_id: - if vmi: - address = self.network_h.get_port_ips(vmi)[0] - if vm: - vm_obj = self.connections.orch.get_vm_by_id(vm) - address = self.connections.orch.get_vm_ip(vm_obj)[0] - member_ids = [x['id'] for x in self.network_h.list_lb_members( - address=address, fields='id')] - member_id = list(set(member_ids) & set(self.member_ids))[0] - else: - address = self.network_h.show_lb_member(member_id, - fields='address')['address'] - self.logger.info('Deleting LB Member %s'%address) - self.network_h.delete_lb_member(member_id) - self.deleted_member_ids.append(member_id) - self.member_ids.remove(member_id) - self.member_ips.remove(address) - - def _populate_vars_from_vip_obj(self): - self.vip_id = self.vip.get('id') - self.vip_ip = self.vip.get('address') - self.vip_port_id = self.vip.get('port_id', None) - self.vip_net_id = self.network_h.get_vn_of_subnet(\ - self.vip.get('subnet_id', None)) - self.is_vip_active = True - self.si_uuid = None - self.label = None - self.active_vr = None - self.standby_vr = None - self.active_vm = None - self.standby_vm = None - self.control_node = None - - def check_and_create_vip(self): - try: - self.vip = self.network_h.show_vip(name=self.vip_name) - self._populate_vars_from_vip_obj() - self.logger.info('VIP is already present') - except: - self.logger.debug('Creating VIP %s'%self.name) - self.vip_id = self.create_vip(self.vip_name, - protocol=self.vip_protocol, - port=self.vip_port, - network_id=self.vip_net_id) - return self.vip_id - - def create_vip(self, name=None, protocol=None, port=None, network_id=None): - name = name or self.vip_name - protocol = protocol or self.vip_protocol - port = port or self.vip_port - net_id = network_id or self.vip_net_id - self.vip = self.network_h.create_vip(name, protocol, port, self.uuid, - network_id=network_id) - self._populate_vars_from_vip_obj() - self.logger.info('Created vip(%s) %s' %(self.vip_id, self.vip_ip)) - return self.vip_id - - def delete_vip(self): - self.logger.info('Deleting vip(%s) %s' %(self.vip_id, self.vip_ip)) - self.network_h.delete_vip(self.vip_id) - self.is_vip_active = False - self.is_fip_active = False - - def reset_vip(self, verify=False): - self.delete_vip() - self.create_vip() - - def delete_custom_attr(self, key): - self.custom_attr.pop(key, None) - self.update_custom_attr() - - def add_custom_attr(self, key, value): - self.custom_attr.update({key:value}) - self.update_custom_attr() - - def update_custom_attr(self, custom_attr_dict=dict()): - self.custom_attr = custom_attr_dict or self.custom_attr - self.network_h.update_lb_pool(self.uuid, {'custom_attributes': - [self.custom_attr]}) - - # The test is expected to add start_active_vrouter in addCleanup - def stop_active_vrouter(self): - active_vr = self.get_active_vrouter() - self.inputs.stop_service('supervisor-vrouter', [active_vr]) - self._populate_vars_from_vip_obj() - - def start_active_vrouter(self): - active_vr = self.get_active_vrouter() - self.inputs.start_service('supervisor-vrouter', [active_vr]) - - # The test is expected to add start_standby_vrouter in addCleanup - def stop_standby_vrouter(self): - standby_vr = self.get_standby_vrouter() - self.inputs.stop_service('supervisor-vrouter', [standby_vr]) - self._populate_vars_from_vip_obj() - - def start_standby_vrouter(self): - standby_vr = self.get_standby_vrouter() - self.inputs.start_service('supervisor-vrouter', [standby_vr]) - - def delete(self): - self.logger.info('Deleting LoadBalancer %s(%s)'%(self.name, self.uuid)) - for member_id in list(self.member_ids): - self.delete_member(member_id) - for hmon_id in self.hmons.keys(): - self.delete_hmon(hmon_id) - if self.is_fip_active: - self.delete_fip_on_vip() - if self.is_vip_active: - self.delete_vip() - self.network_h.delete_lb_pool(self.uuid) - if getattr(self, 'verify_is_run', None): - assert self.verify_on_cleanup() - self.uuid = None - - def verify_on_setup(self): - assert self.verify_in_api_server() - if self.is_vip_active: - assert self.verify_in_agent() - assert self.verify_in_control_node() - self.logger.info('LoadBalancer(%s): verify_on_setup passed'%self.uuid) - self.verify_is_run = True - return True - - def verify_on_cleanup(self): - assert self.verify_not_in_api_server() - if self.vip_id: - assert self.verify_vip_not_in_agent() - assert self.verify_vip_not_in_control_node() - if self.fip_id: - assert self.verify_fip_not_in_agent() - assert self.verify_fip_not_in_control_node() - self.logger.info('LoadBalancer(%s): verify_on_cleanup passed'%self.uuid) - return True - - def verify_not_in_api_server(self): - assert self.verify_member_not_in_api_server() - assert self.verify_hm_not_in_api_server() - if self.fip_id: - assert self.verify_fip_not_in_api_server() - if self.vip_id: - assert self.verify_vip_not_in_api_server() - assert self.verify_pool_not_in_api_server() - return True - - @retry(delay=6, tries=10) - def verify_fip_not_in_api_server(self): - self.api_h = self.connections.api_server_inspect - if self.api_h.get_cs_fip(self.fip_id, refresh=True): - return False - self.logger.debug('FIP removal verification passed in API server') - return True - - @retry(delay=6, tries=10) - def verify_pool_not_in_api_server(self): - self.api_h = self.connections.api_server_inspect - pool = self.api_h.get_lb_pool(self.uuid, refresh=True) - if pool: - self.logger.warn("Loadbalancer pool %s still present in API" - " server even after pool delete" %(self.uuid)) - return False - self.logger.debug( - "Load Balancer pool %s got deleted in API server" %(self.uuid)) - return True - - @retry(delay=6, tries=10) - def verify_member_not_in_api_server(self): - self.api_h = self.connections.api_server_inspect - for member_id in self.deleted_member_ids: - member = self.api_h.get_lb_member(member_id, refresh=True) - if member: - self.logger.warn("LB member %s still present in API server" - " even after member delete" % (member_id)) - return False - self.logger.debug( - "LB member %s got deleted in API server" % (member_id)) - return True - - @retry(delay=6, tries=10) - def verify_vip_not_in_api_server(self): - self.api_h = self.connections.api_server_inspect - vip = self.api_h.get_lb_vip(self.vip_id, refresh=True) - if vip: - self.logger.warn("LB VIP %s still present in API server" - " even after vip delete" % (self.vip_id)) - return False - self.logger.debug( - "LB vip %s got deleted in API server" % (self.vip_id)) - return True - - @retry(delay=6, tries=10) - def verify_hm_not_in_api_server(self): - self.api_h = self.connections.api_server_inspect - for hmon_id in self.hmons.keys(): - hmon = self.api_h.get_lb_healthmonitor(hmon_id, refresh=True) - if hmon: - self.logger.warn("LB health monitor %s still present"%(hmon_id)) - return False - self.logger.debug("LB health monitor %s got deleted" %(hmon_id)) - return True - - def verify_in_api_server(self): - assert self.verify_lb_pool_in_api_server() - if self.member_ids: - assert self.verify_member_in_api_server() - if self.is_vip_active: - assert self.verify_vip_in_api_server() - assert self.verify_si_launched() - if self.is_fip_active: - assert self.verify_fip_in_api_server() - if self.hmons: - assert self.verify_hm_in_api_server() - return True - - @retry(delay=6, tries=10) - def verify_lb_pool_in_api_server(self): - self.api_h = self.connections.api_server_inspect - pool = self.api_h.get_lb_pool(self.uuid, refresh=True) - if not pool: - self.logger.warn("LB %s not found in api server" % (self.uuid)) - return False - if self.member_ids: - if sorted(self.member_ids) != sorted(pool.members()): - self.logger.warn("LB %s members doesnt match, expected %s" - " got %s"%(self.uuid, self.member_ids, - sorted(pool.members()))) - return False - if self.is_vip_active: - if self.vip_id != pool.vip(): - self.logger.warn("LB %s VIP id doesnt match, expected %s" - " got %s"%(self.uuid, self.vip_id, pool.vip())) - return False - prop = pool.properties() - if self.protocol != prop['protocol']: - self.logger.warn("LB %s protocol doesnt match, expected %s got %s" - %(self.uuid, self.protocol, prop['protocol'])) - return False - if self.lb_method != prop['loadbalancer_method']: - self.logger.warn("LB %s lb_method doesnt match, expected %s got %s" - %(self.uuid, self.lb_method, - prop['loadbalancer_method'])) - return False - if self.network_id != self.network_h.get_vn_of_subnet( - prop['subnet_id']): - self.logger.warn("LB %s pool subnet ids doesnt match") - return False - if self.hmons: - if sorted(self.hmons.keys()) != sorted(pool.hmons()): - self.logger.warn("LB %s health monitors dont match, expected %s" - " got %s"%(self.uuid, self.hmons.keys(), - pool.members())) - return False - if self.custom_attr: - custom_attrs = pool.custom_attrs() - if self.custom_attr != custom_attrs: - self.logger.warn("LB %s custom_attributes doesnt match," - "expected %s, got %s"%(self.uuid, - self.custom_attr, custom_attrs)) - return False - self.logger.debug("LB(%s) got created in api server"% (self.uuid)) - return True - - @retry(delay=6, tries=10) - def verify_vip_in_api_server(self): - self.api_h = self.connections.api_server_inspect - vip = self.api_h.get_lb_vip(self.vip_id) - if not vip: - self.logger.warn("LB Vip %s not found in api server" %(self.vip_id)) - return False - if vip.vmi() != self.vip_port_id: - self.logger.warn("vip(%s) port ids dont match, expected %s got %s" - %(self.vip_id, self.vip_port_id, vip.vmi())) - return False - if vip.ip() != self.vip_ip: - self.logger.warn("vip(%s) address dont match, expected %s got %s" - %(self.vip_id, self.vip_ip, vip.ip())) - return False - self.logger.debug("LB VIP %s got created in api server" %(self.vip_id)) - return True - - @retry(delay=6, tries=10) - def verify_fip_in_api_server(self): - self.api_h = self.connections.api_server_inspect - fip_obj = self.api_h.get_cs_fip(self.fip_id, refresh=True) - if not fip_obj: - self.logger.warn("Fip %s not found in api server" %(self.fip_ip)) - return False - vmi = fip_obj.vmi() - if self.vip_port_id not in vmi: - self.logger.warn("FIP doesnt have VIP port id refs, expect %s " - "got %s"%(self.vip_port_id, vmi)) - return False - if len(vmi) != 3: - self.logger.warn("FIP doesnt have the netns instance vmis refs") - return False - return True - - @retry(delay=6, tries=10) - def verify_member_in_api_server(self): - self.api_h = self.connections.api_server_inspect - for member_id in self.member_ids: - member = self.api_h.get_lb_member(member_id) - if not member: - self.logger.warn("LB member %s not found" %(member_id)) - return False - if member.ip() not in self.member_ips: - self.logger.warn("member %s ip dont match, expected one of %s" - "got %s"%(member_id, self.member_ips, member.ip())) - return False - self.logger.debug("LB member %s created successfully" % (member_id)) - return True - - @retry(delay=6, tries=10) - def verify_hm_in_api_server(self): - self.api_h = self.connections.api_server_inspect - for hm_id, hm_obj in self.hmons.iteritems(): - hm = self.api_h.get_lb_healthmonitor(hm_id) - if not hm: - self.logger.warn("Health Monitor %s not found"%hm_id) - return False - self.logger.debug("LB Health Monitor %s created successfully"%hm_id) - return True - - @retry(6, 10) - def verify_si_launched(self, refresh=False): - svc_mon_h = self.connections.get_svc_mon_h(refresh) - si = svc_mon_h.get_service_instance(name=self.get_si_name(), - refresh=True) - if si and si.is_launched(): - self.logger.debug('Load Balancer: SI got launched') - return True - self.logger.warn('LB(%s): SI status is not active in svc-mon'%self.uuid) - return False - - def get_si_name(self): - return self.uuid - - def get_si(self): - if not getattr(self, 'si_uuid', None): - self.si_uuid = None - self.api_h = self.connections.api_server_inspect - pool = self.api_h.get_lb_pool(self.uuid, refresh=True) - if pool: - self.si_uuid = pool.si() - self.logger.debug('LB %s: SI uuid is %s'%(self.uuid, self.si_uuid)) - return self.si_uuid - - def get_vms(self): - self.api_h = self.connections.api_server_inspect - si = self.api_h.get_cs_si_by_id(self.get_si(), refresh=True) - if si: - return si.get_vms() - return [] - - def get_active_standby_instance(self): - self.active_vm = None; self.standby_vm = None - self.api_h = self.connections.api_server_inspect - for vm_id in self.get_vms(): - vmis = self.api_h.get_cs_vmi_of_vm(vm_id, refresh=True) - pref = vmis[0].properties('local_preference') - if pref == 200: - self.active_vm = vm_id - else: - self.standby_vm = vm_id - return (self.active_vm, self.standby_vm) - - def get_standby_instance(self): - if not getattr(self, 'standby_vm', None): - self.get_active_standby_instance() - if not self.standby_vm: - self.logger.warn('Unable to get standby vm for LB %s'%self.uuid) - return self.standby_vm - - def get_active_instance(self): - if not getattr(self, 'active_vm', None): - self.get_active_standby_instance() - if not self.active_vm: - self.logger.warn('Unable to get active vm for LB %s'%self.uuid) - return self.active_vm - - def get_active_vrouter(self, refresh=False): - if not getattr(self, 'active_vr', None) or refresh: - svc_mon_h = self.connections.get_svc_mon_h(refresh) - try: - self.active_vr = self.inputs.get_host_ip( - svc_mon_h.get_service_instance( - name=self.get_si_name(), - refresh=refresh).active_vrouter()) - if self.active_vr == 'None': - self.active_vr = None - except: - self.logger.warn('Fail to get vrouter for active lbaas') - self.active_vr = None - return self.active_vr - - def get_standby_vrouter(self, refresh=False): - if not getattr(self, 'standby_vr', None) or refresh: - svc_mon_h = self.connections.get_svc_mon_h(refresh) - try: - self.standby_vr = self.inputs.get_host_ip( - svc_mon_h.get_service_instance( - name=self.get_si_name(), - refresh=refresh).standby_vrouter()) - if self.standby_vr == 'None': - self.standby_vr = None - except: - self.logger.warn('Fail to get vrouter for standby lbaas') - self.standby_vr = None - return self.standby_vr - - def get_vip_label(self, refresh=False): - if not getattr(self, 'label', None) or refresh: - self.label = None - vm_id = self.get_active_instance() - active_vr = self.get_active_vrouter() - if not (active_vr and vm_id): - self.logger.warn('LB: Unable to fetch either of ' - 'active vm/vrouter info') - return None - inspect_h = self.connections.agent_inspect[active_vr] - vmis = inspect_h.get_vna_tap_interface_by_vm(vm_id) - if vmis: - self.label = [vmi['label'] for vmi in vmis - if vmi['ip_addr'] == self.vip_ip][0] - if not self.label: - self.logger.warn('LB: Unable to fetch label of vip intf') - return self.label - - def get_ctrl_nodes(self, ri_name): - rt_list = [] - peer_list = [] - ri = self.vnc_api_h.routing_instance_read(fq_name=ri_name) - rt_list = [rt['to'][0] for rt in ri.get_route_target_refs()] - ctrl_node = ComputeNodeFixture(self.connections, - self.get_active_vrouter() - ).get_active_controller() - cn_inspect = self.connections.cn_inspect[ctrl_node] - peer_list.append(ctrl_node) - for rt in rt_list: - rt_group_entry = cn_inspect.get_cn_rtarget_group(rt) - if rt_group_entry['peers_interested'] is not None: - for peer in rt_group_entry['peers_interested']: - if peer in self.inputs.host_names: - peer = self.inputs.get_host_ip(peer) - peer_list.append(peer) - else: - self.logger.info('%s is not defined as a control node' - ' in the topology' % peer) - self.logger.debug('Interested control nodes %s'%peer_list) - return list(set(peer_list)) - - def verify_in_control_node(self): - assert self.verify_vip_in_control_node() - if self.is_fip_active: - assert self.verify_fip_in_control_node() - self.logger.debug('LB %s: vip %s: verify_in_control_node passed' - %(self.uuid, self.vip_id)) - return True - - @retry(6, 10) - def verify_vip_in_control_node(self): - exp_label = self.get_vip_label() - if not exp_label: - self.logger.warn('LB: Unable to fetch vip label') - return False - vn_fqname = self.id_to_fq_name(self.vip_net_id) - ri_fqname = vn_fqname + vn_fqname[-1:] - for ctrl_node in self.get_ctrl_nodes(ri_fqname): - cn_inspect = self.connections.cn_inspect[ctrl_node] - routes = cn_inspect.get_cn_route_table_entry(prefix=self.vip_ip, - ri_name=':'.join(ri_fqname)) - if not routes: - self.logger.warn('LB: ctrl node %s: vip %s not found in RI %s' - %(ctrl_node, self.vip_ip, ri_fqname)) - return False - match = False - for route in routes: - if route['label'] == exp_label: - match = True - if match == False: - self.logger.warn('LB: label(%s) doesnt match expected(%s)' - %(route['label'], exp_label)) - return False - return True - - @retry(6, 10) - def verify_fip_in_control_node(self): - exp_label = self.get_vip_label() - if not exp_label: - self.logger.warn('LB: Unable to fetch vip label') - return False - vn_fqname = self.id_to_fq_name(self.fip_net_id) - ri_fqname = vn_fqname + vn_fqname[-1:] - for ctrl_node in self.get_ctrl_nodes(ri_fqname): - cn_inspect = self.connections.cn_inspect[ctrl_node] - routes = cn_inspect.get_cn_route_table_entry(prefix=self.fip_ip, - ri_name=':'.join(ri_fqname)) - if not routes: - self.logger.warn('LB: ctrl node %s: fip %s not found in RI %s' - %(ctrl_node, self.fip_ip, ri_fqname)) - return False - match = False - for route in routes: - if route['label'] == exp_label: - match = True - if match == False: - self.logger.warn('LB: label(%s) doesnt match expected(%s)' - %(route['label'], exp_label)) - return False - return True - - @retry(6, 10) - def verify_vip_not_in_control_node(self): - vn_fqname = self.id_to_fq_name(self.vip_net_id) - ri_fqname = vn_fqname + vn_fqname[-1:] - for ctrl_node in self.inputs.bgp_ips: - cn_inspect = self.connections.cn_inspect[ctrl_node] - routes = cn_inspect.get_cn_route_table_entry(prefix=self.vip_ip, - ri_name=':'.join(ri_fqname)) - if routes: - self.logger.warn('ctrl node %s: vip %s not deleted in RI %s' - %(ctrl_node, self.vip_ip, ri_fqname)) - return False - return True - - @retry(6, 10) - def verify_fip_not_in_control_node(self): - vn_fqname = self.id_to_fq_name(self.fip_net_id) - ri_fqname = vn_fqname + vn_fqname[-1:] - for ctrl_node in self.inputs.bgp_ips: - cn_inspect = self.connections.cn_inspect[ctrl_node] - routes = cn_inspect.get_cn_route_table_entry(prefix=self.fip_ip, - ri_name=':'.join(ri_fqname)) - if routes: - self.logger.warn('ctrl node %s: fip %s not deleted in RI %s' - %(ctrl_node, self.fip_ip, ri_fqname)) - return False - return True - - def verify_in_agent(self): - assert self.verify_netns_instance_launched() - assert self.verify_vip_in_agent() - if self.is_fip_active: - assert self.verify_fip_in_agent() - self.logger.debug('LB %s: vip %s: verify_in_agent passed' - %(self.uuid, self.vip_id)) - return True - - def is_instance_launched(self, vm_id, vrouter): - if not vm_id or not vrouter: - self.logger.warn('is_instance_launched: si vm_id or vrouter' - ' info not available') - return False - cmd_str = 'ip netns list | grep %s:%s | grep -v grep'%(vm_id,self.uuid) - output = self.inputs.run_cmd_on_server(vrouter, cmd_str) - if not output: - self.logger.debug('netns instance %s:%s not found' - %(vm_id, self.uuid)) - return False - if len(output.strip().split('\n')) > 1: - self.logger.debug('Multiple %s:%s netns instances found' - %(vm_id, self.uuid)) - return False - cmd_str = 'ps ax | grep haproxy | grep %s | grep -v grep' % self.uuid - if not self.inputs.run_cmd_on_server(vrouter, cmd_str): - self.logger.debug('haproxy not found for LB %s'%self.uuid) - return False - if not self.is_custom_attr_in_haproxy_conf(vrouter): - return False - return True - - def is_custom_attr_in_haproxy_conf(self, vrouter): - haproxy_cfg = '/var/lib/contrail/loadbalancer/%s/haproxy.conf'%self.uuid - for key,value in self.custom_attr.iteritems(): - cmd = custom_attributes_dict[key] - if cmd.startswith('option '): - value = '' if value == 'True' else 'no' - cmd_str = 'grep "%s" %s | grep -v grep'%(cmd, haproxy_cfg) - ret = self.inputs.run_cmd_on_server(vrouter, cmd_str) - if not ret or 'No such file or directory' in ret or\ - cmd not in ret or str(value) not in ret: - self.logger.debug('custom attr (%s, %s) not found ' - 'for LB %s @ %s'%(key, value, self.uuid, - vrouter)) - return False - return True - - @retry(6, 10) - def verify_netns_instance_launched(self): - active_vr = self.get_active_vrouter() - active_vm = self.get_active_instance() - if not self.is_instance_launched(active_vm, active_vr): - self.logger.warn('Netns launch verification failed on %s'%active_vr) - return False - if len(self.inputs.compute_ips) > 1: - standby_vr = self.get_standby_vrouter() - standby_vm = self.get_standby_instance() - if not self.is_instance_launched(standby_vm, standby_vr): - self.logger.warn('Netns launch verification failed ' - ' on %s'%standby_vr) - return False - self.logger.debug('Netns instances got launched') - return True - - @retry(6, 10) - def verify_fip_in_agent(self): - exp_label = self.get_vip_label() - active_vr = self.get_active_vrouter() - if not active_vr or not exp_label or exp_label < 1: - self.logger.warn('LB(%s): unable to find active vr'%self.uuid) - return False - inspect_h = self.connections.agent_inspect[active_vr] - vn_fq_name = ':'.join(self.id_to_fq_name(self.fip_net_id)) - route = inspect_h.get_vna_active_route(ip=self.fip_ip, - prefix='32', - vn_fq_name=vn_fq_name) - if not route or exp_label != route['path_list'][0]['active_label']: - self.logger.warn('LB: agent: label doesnt match for fip ip %s, ' - 'expected %s: actual %s'%(self.fip_ip, - exp_label, route['path_list'][0]['active_label'] - if route else None)) - return False - return True - # end verify_fip_not_in_agent - - @retry(6, 10) - def verify_vip_in_agent(self): - exp_label = self.get_vip_label() - active_vr = self.get_active_vrouter() - if not active_vr or not exp_label: - self.logger.warn('LB(%s): unable to find active vr'%self.uuid) - return False - inspect_h = self.connections.agent_inspect[active_vr] - vn_fq_name = ':'.join(self.id_to_fq_name(self.vip_net_id)) - route = inspect_h.get_vna_active_route(ip=self.vip_ip, - prefix='32', - vn_fq_name=vn_fq_name) - if not route or exp_label != route['path_list'][0]['active_label']: - self.logger.warn('LB: agent: label doesnt match for vip ip %s, ' - 'expected %s: actual %s'%(self.vip_ip, - exp_label, route['path_list'][0]['active_label'] - if route else None)) - return False - return True - - @retry(6, 10) - def verify_fip_not_in_agent(self): - vn_fq_name = ':'.join(self.id_to_fq_name(self.fip_net_id)) - for compute_ip in self.inputs.compute_ips: - inspect_h = self.connections.agent_inspect[compute_ip] - route = inspect_h.get_vna_active_route(ip=self.fip_ip, - prefix='32', - vn_fq_name=vn_fq_name) - if route: - self.logger.warn('FIP %s still present in Agent %s' - %(self.fip_ip, compute_ip)) - return False - self.logger.debug('FIP %s is removed from agent %s' - %(self.fip_ip, compute_ip)) - return True - # end verify_fip_not_in_agent - - @retry(6, 10) - def verify_vip_not_in_agent(self): - active_vr = self.get_active_vrouter() - if not active_vr: - self.logger.warn('LB(%s): unable to find active vr'%self.uuid) - return True - inspect_h = self.connections.agent_inspect[active_vr] - vn_fq_name = ':'.join(self.id_to_fq_name(self.vip_net_id)) - route = inspect_h.get_vna_active_route(ip=self.vip_ip, - prefix='32', - vn_fq_name=vn_fq_name) - if route: - self.logger.warn('LB: vip route %s still found in %s' - %(self.vip_ip, vn_fq_name)) - return False - return True - - @retry(6, 10) - def verify_netns_instance_deleted(self): - active_vr = self.get_active_vrouter() - active_vm = self.get_active_instance() - assert not self.is_instance_launched(active_vm, active_vr) - if len(self.inputs.compute_ips) > 1: - standby_vr = self.get_standby_vrouter() - standby_vm = self.get_standby_instance() - assert not self.is_instance_launched(standby_vm, standby_vr) - self.logger.debug('Netns instance got deleted') - return True - -def setup_test_infra(): - import logging - from common.contrail_test_init import ContrailTestInit - from common.connections import ContrailConnections - from common.log_orig import ContrailLogger - logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN) - logging.getLogger('paramiko.transport').setLevel(logging.WARN) - logging.getLogger('keystoneclient.session').setLevel(logging.WARN) - logging.getLogger('keystoneclient.httpclient').setLevel(logging.WARN) - logging.getLogger('neutronclient.client').setLevel(logging.WARN) - logger = ContrailLogger('event') - logger.setUp() - mylogger = logger.logger - inputs = ContrailTestInit('./sanity_params.ini', logger=mylogger) - connections = ContrailConnections(inputs=inputs, logger=mylogger) - return connections - -def tracefunc(frame, event, arg, indent=[0]): - if event == "call": - indent[0] += 2 - if frame.f_code.co_name.startswith('verify_'): - print "-" * indent[0] + "> call function", frame.f_code.co_name - elif event == "return": -# if frame.f_code.co_name.startswith('verify_'): -# print "<" + "-" * indent[0], "exit function", frame.f_code.co_name, frame.f_code.co_names - indent[0] -= 2 - return tracefunc - -if __name__ == "__main__": - import sys - from vn_test import VNFixture - from vm_test import VMFixture -# sys.settrace(tracefunc) -# obj = LBaasFixture(api_type='neutron', name='LB', connections=setup_test_infra(), network_id='4b39a2bd-4528-40e8-b848-28084e59c944', members={'vms': ['a72ad607-f1ca-44f2-b31e-e825a3f2d408'], 'address': ['192.168.1.10']}, vip_net_id='4b39a2bd-4528-40e8-b848-28084e59c944', protocol='TCP', port='22', healthmonitors=[{'delay':5, 'timeout':5, 'max_retries':5, 'probe_type':'PING'}]) - conn = setup_test_infra() - vnfix = VNFixture(connections=conn) - vnfix.setUp() - vip_fix = VNFixture(connections=conn) - vip_fix.setUp() - fip_fix = VNFixture(connections=conn, router_external=True) - fip_fix.setUp() - subnet = vnfix.get_cidrs()[0] - vm_fix = VMFixture(connections=conn, vn_obj=vnfix.obj) - vm_fix.setUp() - obj = LBaasFixture(api_type='neutron', name='LB', connections=conn, network_id=vnfix.uuid, - members={'address': [get_random_ip(subnet)], 'vms': [vm_fix.vm_id]}, - vip_net_id=vip_fix.uuid, fip_net_id=fip_fix.uuid, protocol='TCP', port='22', - healthmonitors=[{'delay':5, 'timeout':5, 'max_retries':5, 'probe_type':'PING'}], - custom_attr={'max_conn': 100, 'max_sess_rate': 20, 'server_timeout': 50000, 'rate_limit_sessions': 10, 'http_server_close': "True"}) - obj.setUp() -# obj = LBaasFixture(api_type='neutron', uuid='58e5fb2c-ec47-4eb8-b4bf-9c66b0473f78', connections=setup_test_infra()) - obj.verify_on_setup() - obj.delete_custom_attr('max_sess_rate') - obj.add_custom_attr('client_timeout', 20000) - obj.delete_custom_attr('server_timeout') - obj.add_custom_attr('max_sess_rate', 20000) - obj.delete_custom_attr('rate_limit_sessions') - obj.add_custom_attr('rate_limit_sessions', 20) - obj.delete_custom_attr('max_conn') - obj.add_custom_attr('max_conn', 20) - obj.delete_custom_attr('http_server_close') - obj.add_custom_attr('http_server_close', "False") - obj.verify_on_setup() - obj.create_fip_on_vip() - obj.verify_on_setup() - obj.delete_fip_on_vip() - obj.verify_on_setup() - obj.delete_vip() - obj.verify_on_setup() - obj.check_and_create_vip() - obj.verify_on_setup() - obj.delete_member(address=obj.member_ips[1]) - obj.verify_on_setup() - obj.create_member(address=get_random_ip(subnet)) - obj.verify_on_setup() - obj.delete_hmon(obj.hmons.keys()[0]) - obj.verify_on_setup() - obj.create_hmon({'delay': 5, 'max_retries': 5, 'probe_type': 'PING', 'timeout': 10}) - obj.verify_on_setup() - obj.cleanUp() - vm_fix.cleanUp() - vnfix.cleanUp() - vip_fix.cleanUp() - fip_fix.cleanUp() diff --git a/fixtures/lif_fixture.py b/fixtures/lif_fixture.py deleted file mode 100644 index 82a76c454..000000000 --- a/fixtures/lif_fixture.py +++ /dev/null @@ -1,152 +0,0 @@ -import vnc_api_test - -class LogicalInterfaceFixture(vnc_api_test.VncLibFixture): - - '''Fixture to handle Logical Interface object in - a phyiscal port - - Mandatory: - :param name : name of the lif - :param pif_id : Physical interface UUID - One of pif_id or pif_obj is mandatory - :param pif_obj : PhysicalInterface object which is to be the parent - of this object - One of pif_id or pif_obj is mandatory - - Optional: - :params vlan_id : Default is 0 - :param vmi_ids : List of vmi ids part of this lif, default is [] - - Inherited parameters: - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - ''' - - def __init__(self, *args, **kwargs): - super(LogicalInterfaceFixture, self).__init__(self, *args, **kwargs) - self.name = args[0] - self.pif_id = kwargs.get('pif_id',None) - self.pif_obj = kwargs.get('pif_obj',None) - if not (self.pif_obj or self.pif_id): - raise TypeError('One of pif_id or pif_obj is mandatory') - vlan_id = kwargs.get('vlan_id', 0) - self.vmi_ids = kwargs.get('vmi_ids', []) - - self.vlan_id = int(vlan_id) - - self.already_present = False - - self.vn_obj = None - # end __init__ - - def setUp(self): - super(LogicalInterfaceFixture, self).setUp() - if self.pif_obj: - self.pif_id = self.pif_obj.uuid - else: - self.pif_obj = self.vnc_api_h.physical_interface_read(id=self.pif_id) - lif_fq_name = self.pif_obj.fq_name[:] - lif_fq_name.append(self.name) - self.fq_name = lif_fq_name - - try: - self.obj = self.vnc_api_h.logical_interface_read( - fq_name=lif_fq_name) - self.already_present = True - self.logger.debug('Logical port %s already present' % ( - lif_fq_name)) - except vnc_api_test.NoIdError: - self.create_lif() - - if self.vmi_ids: - for vmi_id in self.vmi_ids: - vmi_obj = self.vnc_api_h.virtual_machine_interface_read( - id=vmi_id) - self.obj.add_virtual_machine_interface(vmi_obj) - self.vnc_api_h.logical_interface_update(self.obj) - # end setUp - - def create_lif(self): - self.logger.info('Creating Logical port %s' % (self.fq_name)) - lif_obj = vnc_api_test.LogicalInterface(name=self.name, - parent_obj=self.pif_obj, - display_name=self.name) - lif_obj.set_logical_interface_vlan_tag(self.vlan_id) - self.uuid = self.vnc_api_h.logical_interface_create(lif_obj) - self.obj = self.vnc_api_h.logical_interface_read(id=self.uuid) - # end create_lif - - def set_vlan_tag(self, vlan_id=0): - self.vlan_id = vlan_id - self.obj = self.vnc_api_h.logical_interface_read(id=self.uuid) - lif_obj.set_logical_interface_vlan_tag(vlan_id) - self.vnc_api_h.logical_interface_update(lif_obj) - # end set_vlan_tag - - def cleanUp(self): - super(LogicalInterfaceFixture, self).cleanUp() - do_cleanup = True - if self.already_present: - do_cleanup = False - self.logger.debug('Skipping deletion of logical port %s' % ( - self.fq_name)) - self.clear_vmi_mapping() - if do_cleanup: - self.delete_lif() - # end cleanUp - - def clear_vmi_mapping(self): - ''' Disassociate all vmis from this lif - ''' - self.logger.debug('Disassociating all vmis from %s' % (self.fq_name)) - self.obj = self.vnc_api_h.logical_interface_read(id=self.uuid) - self.obj.set_virtual_machine_interface_list([]) - self.vnc_api_h.logical_interface_update(self.obj) - # end clear_vmi_mapping - - - def delete_lif(self): - self.clear_vmi_mapping() - self.logger.info('Deleting Logical port %s' % (self.fq_name)) - self.vnc_api_h.logical_interface_delete(id=self.uuid) - # end delete_lif - - - def add_virtual_machine_interface(self, vmi_id): - self.logger.info('Adding VMI %s to logical interface %s' % ( - vmi_id, self.fq_name)) - vmi_obj = self.vnc_api_h.virtual_machine_interface_read(id=vmi_id) - self.obj.add_virtual_machine_interface(vmi_obj) - self.vnc_api_h.logical_interface_update(self.obj) - - def delete_virtual_machine_interface(self, vmi_id): - self.logger.info('Deleting VMI %s from logical interface %s' % ( - vmi_id, self.fq_name)) - vmi_obj = self.vnc_api_h.virtual_machine_interface_read(id=vmi_id) - self.obj.del_virtual_machine_interface(vmi_obj) - self.vnc_api_h.logical_interface_update(self.obj) - -# end LogicalInterfaceFixture - -if __name__ == "__main__": - device_id = 'e122f6b2-5d5c-4f2e-b665-d69dba447bdf' - from pif_fixture import PhysicalInterfaceFixture - from port_fixture import PortFixture - pif_obj = PhysicalInterfaceFixture(name='ge-0/0/0', device_id=device_id) - pif_obj.setUp() - - vn_id = '1c83bed1-7d24-4414-9aa2-9d92975bc86f' - subnet_id = '49fea486-57ab-4056-beb3-d311a385814e' - port_fixture = PortFixture( - vn_id=vn_id, api_type='contrail', mac_address="00:00:00:00:00:01", - fixed_ips=[{'subnet_id': subnet_id, 'ip_address': '10.1.1.20'}]) - port_fixture.setUp() - lif_obj = LogicalInterfaceFixture( - name='ge-0/0/0.0', pif_id=pif_obj.uuid, vmi_ids=[port_fixture.uuid]) - lif_obj.setUp() diff --git a/fixtures/mock_generator.py b/fixtures/mock_generator.py deleted file mode 100644 index 3abd23f64..000000000 --- a/fixtures/mock_generator.py +++ /dev/null @@ -1,69 +0,0 @@ -import fixtures -from tcutils.util import * -from contrail_fixtures import * -from fabric.context_managers import settings, hide - - -class MockGeneratorFixture(fixtures.Fixture): - - ''' - Fixture to handle creation, verification and deletion of mock generator. - ''' - - def __init__(self, connections, inputs, num_generators, - num_instances_per_generator, num_networks, - num_flows_per_instance): - self.connections = connections - self.inputs = inputs - self.logger = inputs.logger - self.num_generators = num_generators - self.MAX_GENERATORS_PER_PROCESS = 300 - self.num_instances_per_generator = num_instances_per_generator - self.num_networks = num_networks - self.num_flows_per_instance = num_flows_per_instance - # end __init__ - - def setUp(self): - super(MockGeneratorFixture, self).setUp() - ncomputes = len(self.inputs.compute_ips) - ngens_per_host = self.num_generators / ncomputes - nprocess_per_host = ngens_per_host / self.MAX_GENERATORS_PER_PROCESS - if ngens_per_host % self.MAX_GENERATORS_PER_PROCESS: - nprocess_per_host = nprocess_per_host + 1 - for host_ip in self.inputs.compute_ips: - index = self.inputs.compute_ips.index(host_ip) - ncollectors = len(self.inputs.collector_ips) - collector_ip = self.inputs.collector_ips[index % ncollectors] - collector = collector_ip + ':8086' - cmd = "/opt/contrail/vrouter-venv/bin/run_mock_generator --collectors " + \ - collector - username = self.inputs.host_data[host_ip]['username'] - password = self.inputs.host_data[host_ip]['password'] - for num in range(nprocess_per_host): - if num == nprocess_per_host - 1 and ngens_per_host % self.MAX_GENERATORS_PER_PROCESS: - ngens = ngens_per_host % self.MAX_GENERATORS_PER_PROCESS - else: - ngens = self.MAX_GENERATORS_PER_PROCESS - cmd_ngen = " --num_generators " + str(ngens) - cmd_instances = " --num_instances_per_generator " + \ - str(self.num_instances_per_generator) - cmd_networks = " --num_networks " + str(self.num_networks) - cmd_flows = " --num_flows_per_instance " + \ - str(self.num_flows_per_instance) - issue_cmd = cmd + cmd_ngen + \ - cmd_instances + cmd_networks + cmd_flows - self.logger.info('Starting %s in %s' % - (issue_cmd, self.get_node_name(host_ip))) - output = self.inputs.run_cmd_on_server(host_ip, issue_cmd, - username, password, False) - # end setUp - - def get_node_name(self, ip): - return self.inputs.host_data[ip]['name'] - # end get_node_name - - def cleanUp(self): - super(MockGeneratorFixture, self).cleanUp() - # end cleanUp - -# end class MockGeneratorFixture diff --git a/fixtures/multiple_vn_vm_test.py b/fixtures/multiple_vn_vm_test.py deleted file mode 100644 index 4d3353df4..000000000 --- a/fixtures/multiple_vn_vm_test.py +++ /dev/null @@ -1,247 +0,0 @@ -# Need to import path to test/fixtures and test/scripts/ -# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/' -# -# n specific tests, -# You can do 'python -m testtools.run -l tests' -# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD -# -import os -from time import sleep - -from common.openstack_libs import nova_client as mynovaclient -from common.openstack_libs import nova_exception as novaException -import fixtures -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from vm_test import * -from common.connections import ContrailConnections -from floating_ip import * -from policy_test import * -from contrail_fixtures import * -from tcutils.util import * -import threading -import Queue - - -class create_multiple_vn_and_multiple_vm_fixture(fixtures.Fixture): - -# @classmethod - def __init__(self, connections, inputs, policy_objs=[], subnets=[], project_name=None, image_name='ubuntu', flavor='contrail_flavor_tiny', vn_name='vn', vm_name='vm', vn_count=1, vm_count=2, subnet_count=2, af=None, userdata=None): - """ - creates a dict of the format: {vn_name:{vm_name:vm_obj,...}} - """ - self.connections = connections - self.inputs = inputs - if not project_name: - project_name = self.inputs.project_name - self.project_name = project_name - self.vn_name = vn_name - self.vn_count = vn_count - self.stack = af or self.inputs.get_af() - self.subnet_count = subnet_count - self.vm_name = vm_name - self.vm_count = vm_count - self.image_name = image_name - self.flavor = flavor - self.nova_h = self.connections.nova_h - self.q = Queue.Queue() - self.vn_threads = [] - self.vm_threads = [] - self.userdata = userdata - self.nova_h.get_image(self.image_name) - self.random_subnets = [] - - def calculateSubnetAF(self, af): - while True: - network=get_random_cidr(af=af, mask=SUBNET_MASK[af]['min']) - for rand_net in self.random_subnets: - if not cidr_exclude(network, rand_net): - break - else: - break - net, plen = network.split('/') - plen = int(plen) - max_plen = SUBNET_MASK[af]['max'] - reqd_plen = max_plen - (int(self.subnet_count) - 1).bit_length() - if plen > reqd_plen: - max_subnets = 2 ** (max_plen - plen) - raise Exception("Network prefix %s can be subnetted " - "only to maximum of %s subnets" % (network, max_subnets)) - - subnets = list(IPNetwork(network).subnet(plen)) - return map(lambda subnet: subnet.__str__(), subnets[:]) - - def calculateSubnet(self): - self.subnet_list = [] - if 'v4' in self.stack or 'dual' in self.stack: - self.subnet_list.extend(self.calculateSubnetAF(af='v4')) - if 'v6' in self.stack or 'dual' in self.stack: - self.subnet_list.extend(self.calculateSubnetAF(af='v6')) - self.random_subnets.extend(self.subnet_list) - - def createMultipleVN(self): - - self.vn_obj_dict = {} - self.vn_keylist = [] - self.vn_valuelist = [] - for x in range(self.vn_count): - try: - vn_name = self.vn_name - vn_name = vn_name + str(x) - self.calculateSubnet() - vn_obj = VNFixture( - project_name=self.project_name, connections=self.connections, - vn_name=vn_name, inputs=self.inputs, subnets=self.subnet_list, af=self.stack) - vn_obj.setUp() - self.vn_keylist.append(vn_name) - self.vn_valuelist.append(vn_obj) - except Exception as e: - print e - raise - count = 0 - - self.vn_obj_dict = dict(zip(self.vn_keylist, self.vn_valuelist)) - - def createMultipleVM(self): - - self.vm_obj_dict = {} - self.vm_keylist = [] - self.vm_valuelist = [] - - self.vm_per_vn_dict = {} - self.vm_per_vn_list = [] - # for each vn, creating the number of vms - start = 0 - count = 0 - try: - for k in self.vn_keylist: - self.vn_obj = self.vn_obj_dict[k].obj - for c in range(self.vm_count): - vm_name = '%s_%s_%s' % (k, self.vm_name, c) - vm_fixture = VMFixture(connections=self.connections, - vn_obj=self.vn_obj, vm_name=vm_name, project_name=self.inputs.project_name, - userdata=self.userdata, image_name=self.image_name, flavor=self.flavor) - t = threading.Thread(target=vm_fixture.setUp, args=()) - self.vm_threads.append(t) - count += 1 - self.vm_keylist.append(vm_name) - self.vm_valuelist.append(vm_fixture) - self.vm_obj_dict = dict( - zip(self.vm_keylist, self.vm_valuelist)) - self.vm_per_vn_list.append(self.vm_obj_dict) - self.vm_per_vn_dict = dict( - zip(self.vn_keylist, self.vm_per_vn_list)) - except Exception as e: - print e - for thread in self.vm_threads: - time.sleep(3) - thread.start() - - for thread in self.vm_threads: - thread.join(5) - - def verify_vns_on_setup(self): - try: - result = True - verify_threads = [] - for vn_name, vn_obj in self.vn_obj_dict.items(): - t = threading.Thread(target=vn_obj.verify_on_setup, args=()) - verify_threads.append(t) - for thread in verify_threads: - time.sleep(0.5) - thread.daemon = True - thread.start() - for thread in verify_threads: - thread.join(10) - for vn_name, vn_obj in self.vn_obj_dict.items(): - if not vn_obj.verify_result: - result = result and False - except Exception as e: - print e - result = result and False - finally: - return result - - def verify_vms_on_setup(self): - try: - result = True - verify_threads = [] - for vm_fix in self.vm_valuelist: - t = threading.Thread(target=vm_fix.verify_on_setup, args=()) - verify_threads.append(t) - for thread in verify_threads: - time.sleep(0.5) - # thread.daemon = True - thread.start() - for thread in verify_threads: - thread.join(60) - for vm_fix in self.vm_valuelist: - if not vm_fix.verify_vm_flag: - result = result and False - except Exception as e: - print e - result = result and False - finally: - return result - - def wait_till_vms_are_up(self): - try: - result = True - verify_threads = [] - for vm_fix in self.vm_valuelist: - t = threading.Thread(target=vm_fix.wait_till_vm_is_up, args=()) - verify_threads.append(t) - for thread in verify_threads: - time.sleep(0.5) - # thread.daemon = True - thread.start() - for thread in verify_threads: - thread.join(20) - for vm_fix in self.vm_valuelist: - if not vm_fix.verify_vm_flag: - result = result and False - except Exception as e: - print e - result = result and False - finally: - return result - - def setUp(self): - super(create_multiple_vn_and_multiple_vm_fixture, self).setUp() - self.createMultipleVN() - time.sleep(5) - self.createMultipleVM() - time.sleep(5) - - def cleanUp(self): - super(create_multiple_vn_and_multiple_vm_fixture, self).cleanUp() - vm_thread_to_delete = [] - vn_thread_to_delete = [] - try: - for vm_fix in self.vm_valuelist: - print 'deleteing vm' - t = threading.Thread(target=vm_fix.cleanUp, args=()) - vm_thread_to_delete.append(t) - if vm_thread_to_delete: - for vm_thread in vm_thread_to_delete: - time.sleep(3) - vm_thread.start() - for vm_thread in vm_thread_to_delete: - vm_thread.join() - except Exception as e: - print e - time.sleep(10) - - try: - for vn_name, vn_obj in self.vn_obj_dict.items(): - vn_obj.cleanUp() - except Exception as e: - print e - try: - for vn_name, vn_obj in self.vn_obj_dict.items(): - assert vn_obj.verify_not_in_result - except Exception as e: - print e diff --git a/fixtures/nova_test.py b/fixtures/nova_test.py deleted file mode 100644 index 3729175e9..000000000 --- a/fixtures/nova_test.py +++ /dev/null @@ -1,748 +0,0 @@ -import os -from common.openstack_libs import nova_client as mynovaclient -from common.openstack_libs import nova_exception as novaException -from fabric.context_managers import settings, hide, cd, shell_env -from fabric.api import run, local, env -from fabric.operations import get, put -from fabric.contrib.files import exists -from tcutils.util import * -from tcutils.cfgparser import parse_cfg_file -from tcutils.timeout import timeout, TimeoutError -import socket -import time -import re -from common import vcenter_libs - -#from contrail_fixtures import contrail_fix_ext - -#@contrail_fix_ext (ignore_verify=True, ignore_verify_on_setup=True) - - -class NovaHelper(): - - def __init__(self, inputs, - project_name, - key='key1', - username=None, - password=None): - httpclient = None - self.inputs = inputs - self.username = username or inputs.stack_user - self.password = password or inputs.stack_password - self.project_name = project_name - self.cfgm_ip = inputs.cfgm_ip - self.openstack_ip = inputs.openstack_ip - # 1265563 keypair name can only be alphanumeric. Fixed in icehouse - self.key = self.project_name+self.username+key - self.obj = None - if not self.inputs.ha_setup: - self.auth_url = os.getenv('OS_AUTH_URL') or \ - 'http://' + self.openstack_ip + ':5000/v2.0' - else: - self.auth_url = os.getenv('OS_AUTH_URL') or \ - 'http://' + self.inputs.auth_ip + ':5000/v2.0' - self.logger = inputs.logger - self.images_info = parse_cfg_file('configs/images.cfg') - self.flavor_info = parse_cfg_file('configs/flavors.cfg') - self.endpoint_type = inputs.endpoint_type - self._connect_to_openstack() - self.hypervisor_type = os.environ.get('HYPERVISOR_TYPE') \ - if os.environ.has_key('HYPERVISOR_TYPE') \ - else None - # end __init__ - - def _connect_to_openstack(self): - insecure = bool(os.getenv('OS_INSECURE',True)) - self.obj = mynovaclient.Client('2', - username=self.username, - project_id=self.project_name, - api_key=self.password, - auth_url=self.auth_url, - insecure=insecure, - endpoint_type=self.endpoint_type - ) - if 'keypair' not in env: - env.keypair = dict() - if not env.keypair.get(self.key, False): - try: - f = '/tmp/%s'%self.key - lock = Lock(f) - lock.acquire() - env.keypair[self.key] = self._create_keypair(self.key) - finally: - lock.release() - self.compute_nodes = self.get_compute_host() - self.zones = self._list_zones() - self.hosts_list = [] - self.hosts_dict = self._list_hosts() - # end setUp - - def get_hosts(self, zone=None): - if zone and self.hosts_dict.has_key(zone): - return self.hosts_dict[zone][:] - else: - return self.hosts_list - - def get_zones(self): - return self.zones[:] - - def _list_hosts(self): - nova_computes = self.obj.hosts.list() - nova_computes = filter(lambda x: x.zone != 'internal', nova_computes) - host_dict = dict() - for compute in nova_computes: - self.hosts_list.append(compute.host_name) - host_list = host_dict.get(compute.zone, None) - if not host_list: host_list = list() - host_list += [compute.host_name] - host_dict[compute.zone] = host_list - return host_dict - - def _list_zones(self): - zones = self.obj.availability_zones.list() - zones = filter(lambda x: x.zoneName != 'internal', zones) - return map(lambda x: x.zoneName, zones) - - def get_handle(self): - return self.obj - # end get_handle - - @retry(delay=5, tries=20) - def check_if_image_active(self, image_id): - ''' Check whether the given image id is in 'active' state ''' - self.logger.debug('Check whether image by uuid %s is active'%image_id) - image = self.obj.images.get(image_id) - if image.status.lower() == 'active': - return (True, image) - self.logger.info('Image %s is not active.'%image.name) - return (False, None) - - def find_image(self, image_name): - got_image = None - images_list = self.obj.images.list() - for image in images_list: - if image.name == image_name: - (rv, got_image) = self.check_if_image_active(image.id) - if rv is True: - return got_image - # end for - if not got_image: - self.logger.debug('Image by name %s either not found or not active'% - (image_name)) - return got_image - # end find_image - - def get_image(self, image_name='ubuntu'): - got_image = self.find_image(image_name) - if not got_image: - self._install_image(image_name=image_name) - got_image = self.find_image(image_name) - return got_image - # end get_image - - def get_flavor(self, name): - try: - flavor = self.obj.flavors.find(name=name) - except novaException.NotFound: - self._install_flavor(name=name) - flavor = self.obj.flavors.find(name=name) - return flavor - # end get_flavor - - def get_vm_if_present(self, vm_name=None, project_id=None, vm_id=None): - try: - vm_list = self.obj.servers.list(search_opts={"all_tenants": True}) - for vm in vm_list: - if project_id and vm.tenant_id != self.strip(project_id): - continue - if (vm_name and vm.name == vm_name) or (vm_id and vm.id == vm_id): - return vm - except novaException.NotFound: - return None - except Exception: - self.logger.exception('Exception while finding a VM') - return None - return None - # end get_vm_if_present - - def get_vm_by_id(self, vm_id): - try: - vm = self.obj.servers.find(id=vm_id) - if vm: - return vm - except novaException.NotFound: - return None - except Exception: - self.logger.exception('Exception while finding a VM') - return None - # end get_vm_by_id - - def _install_flavor(self, name): - flavor_info = self.flavor_info[name] - try: - self.obj.flavors.create(name=name, - vcpus=flavor_info['vcpus'], - ram=flavor_info['ram'], - disk=flavor_info['disk']) - except Exception, e: - self.logger.exception('Exception adding flavor %s' % (name)) - raise e - # end _install_flavor - - def _install_image(self, image_name): - result = False - self.logger.debug('Installing image %s'%image_name) - image_info = self.images_info[image_name] - webserver = image_info['webserver'] or \ - os.getenv('IMAGE_WEB_SERVER', '10.204.217.158') - location = image_info['location'] - params = image_info['params'] - image = image_info['name'] - image_type = image_info['type'] - build_path = 'http://%s/%s/%s' % (webserver, location, image) - - #workaround for bug https://bugs.launchpad.net/juniperopenstack/+bug/1447401 [START] - #Can remove this when above bug is fixed - if image_type == 'docker': - for host in self.hosts_dict['nova/docker']: - username = self.inputs.host_data[host]['username'] - password = self.inputs.host_data[host]['password'] - ip = self.inputs.host_data[host]['host_ip'] - with settings( - host_string='%s@%s' % (username, ip), - password=password, warn_only=True, abort_on_prompts=False): - self.load_docker_image_on_host(build_path) - #workaround for bug https://bugs.launchpad.net/juniperopenstack/+bug/1447401 [END] - - username = self.inputs.host_data[self.openstack_ip]['username'] - password = self.inputs.host_data[self.openstack_ip]['password'] - build_path = 'http://%s/%s/%s' % (webserver, location, image) - with settings( - host_string='%s@%s' % (username, self.openstack_ip), - password=password, warn_only=True, abort_on_prompts=False): - return self.copy_and_glance(build_path, image_name, image, params, image_type) - # end _install_image - - def load_docker_image_on_host(self, build_path): - run('pwd') - unzip = '' - if '.gz' in build_path: - unzip = ' gunzip | ' - image_gz = build_path.split('/')[-1] - image_tar = image_gz.split('.gz')[0] - image_name = image_tar.split('.tar')[0] - # Add the image to docker - cmd = "wget %s -P /tmp" % build_path - self.execute_cmd_with_proxy(cmd) - cmd = "gunzip /tmp/%s" % image_gz - self.execute_cmd_with_proxy(cmd) - cmd = "docker load -i /tmp/%s" % image_tar - self.execute_cmd_with_proxy(cmd) - - def get_image_account(self, image_name): - ''' - Return the username and password considered for the image name - ''' - return([self.images_info[image_name]['username'], - self.images_info[image_name]['password']]) - # end get_image_account - - def get_default_image_flavor(self, image_name): - return self.images_info[image_name]['flavor'] - - def execute_cmd_with_proxy(self, cmd): - if self.inputs.http_proxy: - with shell_env(http_proxy=self.inputs.http_proxy): - sudo(cmd) - else: - sudo(cmd) - - def copy_and_glance(self, build_path, generic_image_name, image_name, params, image_type): - """copies the image to the host and glances. - Requires Image path - """ - run('pwd') - unzip = '' - if '.gz' in build_path: - unzip = ' gunzip | ' - - cmd = '(source /etc/contrail/openstackrc; wget -O - %s | %s glance image-create --name "%s" \ - --is-public True %s)' % (build_path, unzip, generic_image_name, params) - - self.execute_cmd_with_proxy(cmd) - - return True - - def _create_keypair(self, key_name): - username = self.inputs.host_data[self.cfgm_ip]['username'] - password = self.inputs.host_data[self.cfgm_ip]['password'] - try: - # Check whether the rsa.pub and keypair matches - # On pre icehouse novaclient #1223934 observed so get() fails - # keypair = self.obj.keypairs.get(keypair=key_name) - keypairs = [x for x in self.obj.keypairs.list() if x.id == key_name] - if not keypairs: - raise novaException.NotFound('keypair not found') - pkey_in_nova = keypairs[0].public_key.strip() - with settings(host_string='%s@%s' % (username, self.cfgm_ip), - password=password, warn_only=True, abort_on_prompts=True): - if exists('.ssh/id_rsa.pub'): - get('.ssh/id_rsa.pub', '/tmp/') - pkey_in_host = open('/tmp/id_rsa.pub', 'r').read().strip() - if pkey_in_host == pkey_in_nova: - self.logger.debug('keypair exists') - return True - self.logger.error('Keypair and rsa.pub doesnt match.') - raise Exception('Keypair and rsa.pub doesnt match.' - ' Seems rsa keys are updated outside of test env.' - ' Delete nova keypair and restart the test') - except novaException.NotFound: - pass - #with hide('everything'): - if True: - with settings( - host_string='%s@%s' % (username, self.cfgm_ip), - password=password, warn_only=True, abort_on_prompts=True): - rsa_pub_arg = '.ssh/id_rsa' - self.logger.debug('Creating keypair') - if exists('.ssh/id_rsa.pub'): # If file exists on remote m/c - self.logger.debug('Public key exists. Getting public key') - get('.ssh/id_rsa.pub', '/tmp/') - else: - self.logger.debug('Making .ssh dir') - run('mkdir -p .ssh') - self.logger.debug('Removing id_rsa*') - run('rm -f .ssh/id_rsa*') - self.logger.debug('Creating key using : ssh-keygen -f -t rsa -N') - run('ssh-keygen -f %s -t rsa -N \'\'' % (rsa_pub_arg)) - self.logger.debug('Getting the created keypair') - get('.ssh/id_rsa.pub', '/tmp/') - self.logger.debug('Reading publick key') - pub_key = open('/tmp/id_rsa.pub', 'r').read() - self.obj.keypairs.create(key_name, public_key=pub_key) - return True - # end _create_keypair - - def get_nova_services(self, **kwargs): - try: - nova_services = self.obj.services.list(**kwargs) - nova_services = filter(lambda x: x.state != 'down' and x.status != 'disabled', - nova_services) - self.logger.info('Servies List from the nova obj: %s' % - nova_services) - return nova_services - except: - self.logger.debug('Unable to retrieve services from nova obj') - self.logger.debug('Using \"nova service-list\" to retrieve' - ' services info') - pass - - service_list = [] - username = self.inputs.host_data[self.openstack_ip]['username'] - password = self.inputs.host_data[self.openstack_ip]['password'] - with hide('everything'): - with settings( - host_string='%s@%s' % (username, self.openstack_ip), - password=password): - services_info = run( - 'source /etc/contrail/openstackrc; nova service-list') - services_info = services_info.split('\r\n') - get_rows = lambda row: map(str.strip, filter(None, row.split('|'))) - columns = services_info[1].split('|') - columns = map(str.strip, filter(None, columns)) - columns = map(str.lower, columns) - columns_no_binary = map(str.lower, columns) - columns_no_binary.remove('binary') - rows = map(get_rows, services_info[3:-1]) - nova_class = type('NovaService', (object,), {}) - for row in rows: - datadict = dict(zip(columns, row)) - for fk, fv in kwargs.items(): - if datadict[fk] != fv: - break - else: - if datadict['status'] == 'disabled' and \ - datadict['state'] == 'down': - break - service_obj = nova_class() - for key, value in datadict.items(): - setattr(service_obj, key, value) - - # Append the service into the list. - service_list.append(service_obj) - return service_list - - def create_vm(self, project_uuid, image_name, vm_name, vn_ids, - node_name=None, sg_ids=None, count=1, userdata=None, - flavor=None, port_ids=None, fixed_ips=None, zone=None): - - if node_name == 'disable': - zone = None - elif zone and node_name: - if zone not in self.zones: - raise RuntimeError("Zone %s is not available" % zone) - if node_name not in self.hosts_dict[zone]: - raise RuntimeError("Zone %s doesn't have compute with name %s" - % (zone, node_name)) - elif node_name: - nova_services = self.get_nova_services(binary='nova-compute') - for compute_svc in nova_services: - if compute_svc.host == node_name: - zone = True - break - elif (compute_svc.host in self.inputs.compute_ips and - self.inputs.host_data[node_name]['host_ip'] == compute_svc.host): - zone = True - break - if not zone: - raise RuntimeError( - "Compute host %s is not listed in nova serivce list" % node_name) - - zone = self.get_compute_node_zone(node_name) - else: - zone, node_name = self.lb_node_zone(zone) - - try: - f = '/tmp/%s'%image_name - lock = Lock(f) - lock.acquire() - image_name = self.get_image_name_for_zone(image_name=image_name, zone=zone) - image = self.get_image(image_name=image_name) - if not flavor: - flavor = self.get_default_image_flavor(image_name=image_name) - flavor = self.get_flavor(name=flavor) - finally: - lock.release() - - if userdata: - with open(userdata) as f: - userdata = f.readlines() - userdata = ''.join(userdata) - if fixed_ips: - #ToDo: msenthil - An ugly hack, have to change the logic - af_list = ['v6' if is_v6(x) else 'v4' for x in fixed_ips] - if vn_ids: - nics_list = [{'net-id': x, '%s-fixed-ip'%z: y} - for x, y, z in zip(vn_ids, fixed_ips, af_list)] - elif port_ids: - nics_list = [{'port-id': x, '%s-fixed-ip'%z: y} - for x, y, z in zip(port_ids, fixed_ips, af_list)] - elif port_ids: - nics_list = [{'port-id': x} for x in port_ids] - elif vn_ids: - nics_list = [{'net-id': x} for x in vn_ids] - - zone = zone + ":" + node_name - self.obj.servers.create(name=vm_name, image=image, - security_groups=sg_ids, - flavor=flavor, nics=nics_list, - key_name=self.key, availability_zone=zone, - min_count=count, max_count=count, userdata=userdata) - vm_objs = self.get_vm_list(name_pattern=vm_name, - project_id=project_uuid) - [vm_obj.get() for vm_obj in vm_objs] - self.logger.info("VM Object: (%s) Nodename: (%s) Zone: (%s)" % ( - str(vm_objs), node_name, zone)) - return vm_objs - # end create_vm - - def add_security_group(self, vm_id, secgrp): - self.obj.servers.add_security_group(vm_id, secgrp) - - def remove_security_group(self, vm_id, secgrp): - self.obj.servers.remove_security_group(vm_id, secgrp) - - def get_vm_obj(self, vm_obj, wait_time=30): - ''' It has been noticed that sometimes get() takes upto 20-30mins - in error scenarios - This method sets a timeout for the same - ''' - with timeout(seconds=wait_time): - try: - vm_obj.get() - except TimeoutError, e: - self.logger.error('Timed out while getting VM %s detail' % ( - vm_obj.name)) - # end get_vm_obj - - @retry(delay=5, tries=5) - def get_vm_detail(self, vm_obj): - try: - self.get_vm_obj(vm_obj) - if vm_obj.addresses == {} or vm_obj.status == 'BUILD': - self.logger.debug('VM %s : Status=%s, Addresses : %s' % ( - vm_obj.name, vm_obj.status, vm_obj.addresses)) - return False - else: - return True - except novaException.ClientException: - print 'Fatal Nova Exception' - self.logger.exception('Exception while getting vm detail') - return False - # end def - - @retry(tries=1, delay=60) - def _get_vm_ip(self, vm_obj, vn_name=None): - ''' Returns a list of IPs for the VM in VN. - - ''' - vm_ip_dict = self.get_vm_ip_dict(vm_obj) - if not vn_name: - address = list() - for ips in vm_ip_dict.itervalues(): - address.extend(ips) - return (True, address) - if vn_name in vm_ip_dict.keys() and vm_ip_dict[vn_name]: - return (True, vm_ip_dict[vn_name]) - self.logger.error('VM does not seem to have got an IP in VN %s' % (vn_name)) - return (False, []) - # end get_vm_ip - - def get_vm_ip(self, vm_obj, vn_name=None): - return self._get_vm_ip(vm_obj, vn_name)[1] - - def get_vm_ip_dict(self, vm_obj): - ''' Returns a dict of all IPs with key being VN name ''' - vm_obj.get() - ip_dict={} - for key,value in vm_obj.addresses.iteritems(): - ip_dict[key] = list() - for dct in value: - ip_dict[key].append(dct['addr']) - return ip_dict - - def strip(self, uuid): - return uuid.replace('-', '') - - def get_vm_list(self, name_pattern='', project_id=None): - ''' Returns a list of VM objects currently present. - - ''' - final_vm_list = [] - vm_list = self.obj.servers.list(search_opts={"all_tenants": True}) - for vm_obj in vm_list: - match_obj = re.match(r'%s' % - name_pattern, vm_obj.name, re.M | re.I) - if project_id: - if match_obj and vm_obj.tenant_id == self.strip(project_id): - final_vm_list.append(vm_obj) - else: - if match_obj: - final_vm_list.append(vm_obj) - # end for - return final_vm_list - - # end get_vm_list - - def get_nova_host_of_vm(self, vm_obj): - for hypervisor in self.get_nova_hypervisor_list(): - if vm_obj.__dict__['OS-EXT-SRV-ATTR:hypervisor_hostname'] is not None: - if vm_obj.__dict__['OS-EXT-SRV-ATTR:hypervisor_hostname']\ - == hypervisor.hypervisor_hostname: - if hypervisor.hypervisor_type == 'QEMU' or hypervisor.hypervisor_type == 'docker': - host_name = vm_obj.__dict__['OS-EXT-SRV-ATTR:host'] - return host_name - if 'VMware' in hypervisor.hypervisor_type: - host_name = vcenter_libs.get_contrail_vm_by_vm_uuid(self.inputs,vm_obj.id) - return host_name - else: - if vm_obj.__dict__['OS-EXT-STS:vm_state'] == "error": - self.logger.error('VM %s has failed to come up' %vm_obj.name) - self.logger.error('Fault seen in nova show is: %s' %vm_obj.__dict__['fault']) - else: - self.logger.error('VM %s has failed to come up' %vm_obj.name) - self.logger.error('Nova failed to get host of the VM') - # end get_nova_host_of_vm - - def get_nova_hypervisor_list(self): - #return self.obj.hypervisors.find().hypervisor_type - return self.obj.hypervisors.list() - #end - - def kill_remove_container(self, compute_host_ip, vm_id): - get_container_id_cmd = "docker ps -f name=nova-%s | cut -d ' ' -f1"\ - % vm_id - with settings( - host_string='%s@%s' % - (self.inputs.host_data[compute_host_ip]['username'], - compute_host_ip), - password=self.inputs.host_data[compute_host_ip]['password'], - warn_only=True, abort_on_prompts=False): - output = run(get_container_id_cmd) - container_id = output.split("\n")[-1] - run("docker kill %s" % container_id) - run("docker rm -f %s" % container_id) - - def delete_vm(self, vm_obj): - compute_host = self.get_nova_host_of_vm(vm_obj) - if self.get_compute_node_zone(compute_host) == 'nova/docker': - # Workaround for the bug https://bugs.launchpad.net/nova-docker/+bug/1413371 - self.kill_remove_container(compute_host, - vm_obj.id) - vm_obj.delete() - # end _delete_vm - - def get_key_file(self): - return self.tmp_key_file - - def put_key_file_to_host(self, host_ip): - username = self.inputs.host_data[self.cfgm_ip]['username'] - password = self.inputs.host_data[self.cfgm_ip]['password'] - with hide('everything'): - with settings(host_string='%s@%s' % ( - username, self.cfgm_ip), - password=password, - warn_only=True, abort_on_prompts=False): - get('.ssh/id_rsa', '/tmp/') - get('.ssh/id_rsa.pub', '/tmp/') - with hide('everything'): - with settings( - host_string='%s@%s' % (self.inputs.host_data[host_ip]['username'], - host_ip), password=self.inputs.host_data[ - host_ip]['password'], - warn_only=True, abort_on_prompts=False): - # Put the key only is the test node and cfgm node in which key - # is generated is different. - if self.inputs.cfgm_ips[0] != host_ip: - put('/tmp/id_rsa', '/tmp/id_rsa') - put('/tmp/id_rsa.pub', '/tmp/id_rsa.pub') - run('chmod 600 /tmp/id_rsa') - self.tmp_key_file = '/tmp/id_rsa' - - @threadsafe_generator - def get_compute_host(self): - while True: - nova_services = self.get_nova_services(binary='nova-compute') - if not nova_services: - self.logger.info('nova-compute service doesnt exist, check openstack-status') - raise RuntimeError('nova-compute service doesnt exist') - for compute_svc in nova_services: - yield (compute_svc.host, compute_svc.zone) - # end get_compute_host - - def wait_till_vm_is_active(self, vm_obj): - return self.wait_till_vm_status(vm_obj, 'ACTIVE') - # end wait_till_vm_is_active - - @retry(tries=30, delay=5) - def wait_till_vm_status(self, vm_obj, status='ACTIVE'): - try: - vm_obj.get() - if vm_obj.status == 'ACTIVE' or vm_obj.status == 'ERROR': - self.logger.info('VM %s is in %s state now' % - (vm_obj, vm_obj.status)) - return (True,vm_obj.status) - else: - self.logger.debug('VM %s is still in %s state' % - (vm_obj, vm_obj.status)) - return False - except novaException.NotFound: - self.logger.debug('VM console log not formed yet') - return False - except novaException.ClientException: - self.logger.error('Fatal Nova Exception while getting VM detail') - return False - # end wait_till_vm_status - - @retry(tries=40, delay=2) - def wait_till_vm_is_up(self, vm_obj): - try: - vm_obj.get() - - for hyper in self.obj.hypervisors.list(): - if hyper.hypervisor_hostname == getattr(vm_obj, - 'OS-EXT-SRV-ATTR:hypervisor_hostname') and ((u'VMware' in - hyper.hypervisor_type) or (u'docker' in hyper.hypervisor_type)): - # can't get console logs for VM in VMware nodes - # https://bugs.launchpad.net/nova/+bug/1199754 - return self.wait_till_vm_is_active(vm_obj) - - if 'login:' in vm_obj.get_console_output(): - self.logger.info('VM has booted up..') - return True - else: - self.logger.debug('VM not yet booted fully .. ') - return False - except novaException.NotFound: - self.logger.debug('VM console log not formed yet') - return False - except novaException.ClientException: - self.logger.error('Fatal Nova Exception while getting VM detail') - return False - # end wait_till_vm_is_up - - def get_vm_console_output(self, vm_obj): - try: - vm_obj.get() - return vm_obj.get_console_output() - except novaException.NotFound: - self.logger.debug('VM console log not formed yet') - return None - except novaException.ClientException: - self.logger.error('Fatal Nova Exception while getting VM detail') - return None - # end get_vm_console_output - - - def get_vm_in_nova_db(self, vm_obj, node_ip): - issue_cmd = 'mysql -u root --password=%s -e \'use nova; select vm_state, uuid, task_state from instances where uuid=\"%s\" ; \' ' % ( - self.inputs.get_mysql_token(), vm_obj.id) - username = self.inputs.host_data[node_ip]['username'] - password = self.inputs.host_data[node_ip]['password'] - output = self.inputs.run_cmd_on_server( - server_ip=node_ip, issue_cmd=issue_cmd, username=username, password=password) - return output - # end get_vm_in_nova_db - - @retry(tries=10, delay=5) - def is_vm_deleted_in_nova_db(self, vm_obj, node_ip): - output = self.get_vm_in_nova_db(vm_obj, node_ip) - if 'deleted' in output and 'NULL' in output: - self.logger.info('VM %s is removed in Nova DB' % (vm_obj.name)) - return True - else: - self.logger.warn('VM %s is still found in Nova DB : %s' % - (vm_obj.name, output)) - return False - # end is_vm_in_nova_db - - def get_compute_node_zone(self, node_name): - for zone in self.hosts_dict: - if node_name in self.hosts_dict[zone]: - return zone - - def get_image_name_for_zone(self, image_name='ubuntu', zone='nova'): - image_info = self.images_info[image_name] - if zone == 'nova/docker': - return image_info['name_docker'] - else: - return image_name - - def lb_node_zone(self, zone=None): - if zone or self.hypervisor_type: - if (not zone) and self.hypervisor_type: - if self.hypervisor_type == 'docker': - zone = 'nova/docker' - elif self.hypervisor_type == 'qemu': - zone = 'nova' - else: - self.logger.warn("Test on hypervisor type %s not supported yet, \ - running test on qemu hypervisor" - % (self.hypervisor_type)) - zone = 'nova' - if zone not in self.zones: - raise RuntimeError("Zone %s is not available" % zone) - if not len(self.hosts_dict[zone]): - raise RuntimeError("Zone %s doesnt have any computes" % zone) - - while(True): - (node, node_zone) = next(self.compute_nodes) - if node_zone == zone: - node_name = node - break - else: - (node_name, zone) = next(self.compute_nodes) - - return (zone, node_name) - -# end NovaHelper diff --git a/fixtures/openstack.py b/fixtures/openstack.py deleted file mode 100644 index a7e28f3f0..000000000 --- a/fixtures/openstack.py +++ /dev/null @@ -1,372 +0,0 @@ -import os -from orchestrator import Orchestrator, OrchestratorAuth -from contrailapi import ContrailApi -from nova_test import NovaHelper -from quantum_test import QuantumHelper -from keystone_tests import KeystoneCommands -from common.openstack_libs import ks_exceptions -from vcenter import VcenterAuth, VcenterOrchestrator - -class OpenstackOrchestrator(ContrailApi): - - def __init__(self, inputs, username, password, project_name, project_id, - vnclib=None, logger=None, auth_server_ip=None): - self.logger = logger or logging.getLogger(__name__) - super(OpenstackOrchestrator, self).__init__(inputs, vnclib, self.logger) - self.inputs = inputs - self.quantum_h = None - self.nova_h = None - self.username = username - self.password = password - self.project_name = project_name - self.project_id = project_id - self.vnc_lib = vnclib - self.auth_server_ip = auth_server_ip - if not auth_server_ip: - self.auth_server_ip = self.inputs.auth_ip - #for vcenter as compute - self.vcntr_handle = self.get_vcenter_handle() - - def get_vcenter_handle(self): - if self.inputs.vcenter_dc: - vcntr = VcenterOrchestrator(user=self.inputs.vcenter_username, - pwd=self.inputs.vcenter_password, - host=self.inputs.vcenter_server, - port=self.inputs.vcenter_port, - dc_name=self.inputs.vcenter_dc, - vnc=self.vnc_lib, - inputs=self.inputs, - logger=self.logger) - else: - vcntr = None - return vcntr - - def get_network_handler(self): - if not self.quantum_h: - self.quantum_h = QuantumHelper(username=self.username, - password=self.password, - project_id=self.project_id, - auth_server_ip=self.auth_server_ip, - logger=self.logger) - self.quantum_h.setUp() - return self.quantum_h - - def get_compute_handler(self): - if not self.nova_h: - self.nova_h = NovaHelper(inputs=self.inputs, - project_name=self.project_name, - username=self.username, - password=self.password) - return self.nova_h - - def get_image_account(self, image_name): - return self.nova_h.get_image_account(image_name) - - def get_image_name_for_zone(self, image_name='ubuntu', zone='nova'): - return self.nova_h.get_image_name_for_zone(image_name, zone) - - def get_hosts(self, zone=None): - if not zone: - return self.nova_h.get_hosts() - else: - return self.nova_h.get_hosts(zone) - - def get_zones(self): - return self.nova_h.get_zones() - - def create_vm(self, vm_name, image_name, vn_objs, **kwargs): - vn_ids = [vn['network']['id'] for vn in vn_objs] - return self.nova_h.create_vm(vm_name=vm_name, image_name=image_name, vn_ids=vn_ids, **kwargs) - - def delete_vm(self, vm_obj, **kwargs): - return self.nova_h.delete_vm(vm_obj) - - def is_vm_deleted(self, vm_obj, **kwargs): - return self.nova_h.is_vm_deleted_in_nova_db(vm_obj, self._inputs.openstack_ip) - - def get_host_of_vm(self, vm_obj, **kwargs): - return self.nova_h.get_nova_host_of_vm(vm_obj) - - def get_networks_of_vm(self, vm_obj, **kwargs): - vm_obj.get() - return vm_obj.networks.keys() - - def wait_till_vm_is_active(self, vm_obj, **kwargs): - return self.nova_h.wait_till_vm_is_active(vm_obj) - - def wait_till_vm_status(self, vm_obj, status, **kwargs): - return self.nova_h.wait_till_vm_status(vm_obj, status) - - def get_console_output(self, vm_obj, **kwargs): - return self.nova_h.get_vm_console_output(vm_obj) - - def get_vm_by_id(self, vm_id, **kwargs): - return self.nova_h.get_vm_by_id(vm_id) - - def get_vm_if_present(self, vm_name=None, **kwargs): - return self.nova_h.get_vm_if_present(vm_name=vm_name, **kwargs) - - def get_vm_list(self, name_pattern='', **kwargs): - return self.nova_h.get_vm_list(name_pattern=name_pattern, **kwargs) - - def get_vm_detail(self, vm_obj, **kwargs): - return self.nova_h.get_vm_detail(vm_obj) - - def get_vm_ip(self, vm_obj, vn_name=None, **kwargs): - return self.nova_h.get_vm_ip(vm_obj, vn_name) - - def get_key_file(self): - return self.nova_h.get_key_file() - - def put_key_file_to_host(self, host_ip): - self.nova_h.put_key_file_to_host(host_ip) - - def create_vn(self, name, subnets, option='orch', **kwargs): - return self.quantum_h.create_network(name, subnets, **kwargs) - - def delete_vn(self, vn_obj, option='orch', **kwargs): - return self.quantum_h.delete_vn(vn_obj['network']['id']) - - def get_vn_id(self, vn_obj, option='orch', **kwargs): - return vn_obj['network']['id'] - - def get_vn_name(self, vn_obj, option='orch', **kwargs): - return vn_obj['network']['name'] - - def get_vn_obj_if_present(self, vn_name, option='orch', **kwargs): - return self.quantum_h.get_vn_obj_if_present(vn_name, **kwargs) - - def get_vn_list(self, **kwargs): - return super(OpenstackOrchestrator, self).get_vn_list(**kwargs) - - def get_policy(self, fq_name, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).get_policy(fq_name=fq_name, **kwargs) - return self.quantum_h.get_policy_if_present(fq_name[1], fq_name[2]) - - def get_floating_ip(self, fip_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).get_floating_ip(fip_id=fip_id, **kwargs) - fip = self.quantum_h.get_floatingip(fip_id) - return fip['floating_ip_address'] - - def create_floating_ip(self, pool_vn_id, project_obj, option='orch', **kwargs): - fip_resp = self.quantum_h.create_floatingip( - pool_vn_id, project_obj.uuid) - return (fip_resp['floatingip']['floating_ip_address'], - fip_resp['floatingip']['id']) - - def delete_floating_ip(self, fip_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).delete_floating_ip(fip_id=fip_id, **kwargs) - self.quantum_h.delete_floatingip(fip_id) - - def assoc_floating_ip(self, fip_id, vm_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).assoc_floating_ip(fip_id=fip_id, vm_id=vm_id, **kwargs) - update_dict = {} - update_dict['port_id'] = self.quantum_h.get_port_id(vm_id) - self.logger.debug('Associating FIP ID %s with Port ID %s' %(fip_id, - update_dict['port_id'])) - if update_dict['port_id']: - fip_resp = self.quantum_h.update_floatingip(fip_id, - {'floatingip': update_dict}) - return fip_resp - else: - return None - - def disassoc_floating_ip(self, fip_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).disassoc_floating_ip(fip_id=fip_id, **kwargs) - update_dict = {} - update_dict['port_id'] = None - self.logger.debug('Disassociating port from FIP ID : %s' %(fip_id)) - fip_resp = self.quantum_h.update_floatingip(fip_id, - {'floatingip': update_dict}) - return fip_resp - - def get_image_name_for_zone(self, image_name='ubuntu', zone='nova'): - return self.nova_h.get_image_name_for_zone(image_name, zone) - - def get_vm_tap_interface(self,obj): - return obj['name'] - - def add_security_group(self, vm_id, sg_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).add_security_group(vm_id=vm_id, sg_id=sg_id, **kwargs) - return self.nova_h.add_security_group(vm_id, sg_id) - - def remove_security_group(self, vm_id, sg_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).remove_security_group(vm_id=vm_id, sg_id=sg_id, **kwargs) - return self.nova_h.remove_security_group(vm_id, sg_id) - - def create_security_group(self, sg_name, parent_fqname, sg_entries, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).create_security_group(sg_name=sg_name, - parent_fqname=parent_fqname, sg_entries=sg_entries, **kwargs) - sg = self.quantum_h.create_security_group(sg_name) - if not sg: - self.logger.error("security group creation failed through quantum") - return False - self.quantum_h.delete_default_egress_rule(sg['id']) - self._create_rules_in_quantum(sg['id'],secgrp_rules=sg_entries) - return sg['id'] - - def delete_security_group(self, sg_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).delete_security_group(sg_id=sg_id, **kwargs) - self.quantum_h.delete_security_group(sg_id) - - def get_security_group_rules(self, sg_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).get_security_group_rules(sg_id=sg_id, **kwargs) - sg_info = self.quantum_h.show_security_group(sg_id) - return sg_info['security_group']['security_group_rules'] - - def delete_security_group_rules(self, sg_id, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).delete_security_group_rules(sg_id=sg_id, **kwargs) - rules = self.quantum_h.list_security_group_rules(tenant_id=self.quantum_h.project_id) - for rule in rules['security_group_rules']: - if rule['security_group_id'] == sg_id: - self.quantum_h.delete_security_group_rule(rule['id']) - - def set_security_group_rules(self, sg_id, sg_entries, option='orch', **kwargs): - if option == 'contrail': - return super(OpenstackOrchestrator, self).set_security_group_rules(sg_id=sg_id, sg_entries=sg_entries, **kwargs) - self.delete_security_group_rules(sg_id, option=option, **kwargs) - return self._create_rules_in_quantum(sg_id, sg_entries) - - def _create_rules_in_quantum(self, sg_id, secgrp_rules): - ret = False - for rule in secgrp_rules: - remote_group_id=None;remote_ip_prefix=None - if rule['protocol'] == 'any': - proto = None - else: - proto = rule['protocol'] - if rule['src_addresses'][0].has_key('security_group'): - if rule['src_addresses'][0]['security_group'] == 'local': - direction = 'egress' - port_range_min = rule['src_ports'][0]['start_port'] - port_range_max = rule['src_ports'][0]['end_port'] - else: - if rule['dst_addresses'][0]['security_group'] != None: - remote_group_id = self.get_security_group(sg_id=rule['src_addresses'][0]['security_group'].split(':')).uuid - if rule['dst_addresses'][0].has_key('security_group'): - if rule['dst_addresses'][0]['security_group'] == 'local': - direction = 'ingress' - port_range_min = rule['dst_ports'][0]['start_port'] - port_range_max = rule['dst_ports'][0]['end_port'] - else: - if rule['dst_addresses'][0]['security_group'] != None: - remote_group_id = self.get_security_group(sg_id=rule['dst_addresses'][0]['security_group'].split(':')).uuid - if (port_range_min == 0 and port_range_max == -1) \ - or (port_range_min == 0 and port_range_max == 65535): - port_range_min = None;port_range_max = None - if direction == 'ingress': - try: - for addr in rule['src_addresses']: - if addr.has_key('subnet') and addr['subnet'] != None: - remote_ip_prefix = addr['subnet']['ip_prefix'] + '/' + str(addr['subnet']['ip_prefix_len']) - ret = self.quantum_h.create_security_group_rule( - sg_id,direction=direction, - port_range_min=port_range_min, - port_range_max=port_range_max, - protocol=proto, - remote_group_id=remote_group_id, - remote_ip_prefix=remote_ip_prefix) - except: - self.logger.error("error while creating sg rule through quantum") - return False - if direction == 'egress': - try: - for addr in rule['dst_addresses']: - if addr.has_key('subnet') and addr['subnet'] != None: - remote_ip_prefix = addr['subnet']['ip_prefix'] + '/' + str(addr['subnet']['ip_prefix_len']) - ret = self.quantum_h.create_security_group_rule( - sg_id,direction=direction, - port_range_min=port_range_min, - port_range_max=port_range_max, - protocol=proto, - remote_group_id=remote_group_id, - remote_ip_prefix=remote_ip_prefix) - except: - self.logger.error("error while creating sg rule through quantum") - return False - #when remote is security group - if remote_group_id: - if not self.quantum_h.create_security_group_rule( - sg_id,direction=direction, - port_range_min=port_range_min, - port_range_max=port_range_max, - protocol=proto, - remote_group_id=remote_group_id, - remote_ip_prefix=remote_ip_prefix): - return False - return ret - -class OpenstackAuth(OrchestratorAuth): - - def __init__(self, user, passwd, project_name, - inputs=None, logger=None, auth_url=None): - self.inputs = inputs - self.user = user - self.passwd = passwd - self.project = project_name - self.logger = logger or logging.getLogger(__name__) - self.insecure = bool(os.getenv('OS_INSECURE',True)) - if inputs: - self.auth_url = 'http://%s:5000/v2.0' % (self.inputs.openstack_ip) - else: - self.auth_url = auth_url or os.getenv('OS_AUTH_URL') - self.reauth() - - def reauth(self): - self.keystone = KeystoneCommands(username=self.user, - password=self.passwd, - tenant=self.project, - auth_url=self.auth_url, - insecure=self.insecure) - - def get_project_id(self, name=None): - if not name or name == self.project: - return self.keystone.get_id() - return self.keystone.get_project_id(name) - - def create_project(self, name): - return self.keystone.create_project(name) - - def delete_project(self, name): - self.keystone.delete_project(name) - - def delete_user(self, user): - self.keystone.delete_user(user) - - def create_user(self, user, password): - try: - self.keystone.create_user(user,password,email='', - tenant_name=self.inputs.stack_tenant,enabled=True) - except: - self.logger.info("%s user already created"%(self.user)) - - def add_user_to_project(self, user, project, role='admin'): - try: - self.keystone.add_user_to_tenant(project, user, role) - except Exception as e: - self.logger.info("%s user already added to project"%(user)) - - def verify_service_enabled(self, service): - try: - for svc in self.keystone.services_list(): - if service in svc.name: - return True - else: - continue - return False - except Exception as e: - return False - - def get_auth_h(self): - return self.keystone diff --git a/fixtures/orchestrator.py b/fixtures/orchestrator.py deleted file mode 100644 index 536c852ba..000000000 --- a/fixtures/orchestrator.py +++ /dev/null @@ -1,205 +0,0 @@ -from abc import ABCMeta, abstractmethod - -class Orchestrator: - """Base class for orchestrator.""" - - __metaclass__ = ABCMeta - - def is_feature_supported(self, feature): - return True - - @abstractmethod - def get_image_account(self, image_name): - '''Returns username, password for the image.''' - pass - - @abstractmethod - def get_image_name_for_zone(self, image_name='ubuntu', zone='nova'): - '''Get image name compatible with zone ''' - pass - - @abstractmethod - def get_hosts(self, zone=None): - '''Returns a list of computes.''' - pass - - @abstractmethod - def get_zones(self): - '''Returns a list of zones/clusters into which computes are grouped.''' - pass - - @abstractmethod - def create_vm(self, vm_name, image_name, vn_objs, count=1, zone=None, node_name=None, **kwargs): - '''Returns a list of VM objects else None.''' - pass - - @abstractmethod - def delete_vm(self, vm_obj, **kwargs): - pass - - @abstractmethod - def get_host_of_vm(self, vm_obj, **kwargs): - '''Returns name of the compute, on which the VM was created.''' - pass - - @abstractmethod - def get_networks_of_vm(self, vm_obj, **kwargs): - '''Returns names of the networks, associated with the VM.''' - pass - - @abstractmethod - def get_vm_if_present(self, vm_name, **kwargs): - pass - - @abstractmethod - def get_vm_by_id(self, vm_id, **kwargs): - pass - - @abstractmethod - def get_vm_list(self, name_pattern='', **kwargs): - '''Returns a list of VM object matching pattern.''' - pass - - @abstractmethod - def get_vn_list(self, **kwargs): - '''Returns a list of VM object matching pattern.''' - pass - - @abstractmethod - def get_vm_detail(self, vm_obj, **kwargs): - '''Refreshes VM object.''' - pass - - @abstractmethod - def get_vm_ip(self, vm_obj, vn_name, **kwargs): - '''Returns a list of IP of VM in VN.''' - pass - - @abstractmethod - def is_vm_deleted(self, vm_obj, **kwargs): - pass - - @abstractmethod - def wait_till_vm_is_active(self, vm_obj, **kwargs): - pass - - @abstractmethod - def wait_till_vm_status(self, vm_obj, status, **kwargs): - pass - - @abstractmethod - def get_console_output(self, vm_obj, **kwargs): - pass - - @abstractmethod - def get_key_file(self): - '''Returns the key file path.''' - pass - - @abstractmethod - def put_key_file_to_host(self, host_ip): - '''Copy RSA key to host.''' - pass - - @abstractmethod - def create_vn(self, vn_name, subnets, **kwargs): - pass - - @abstractmethod - def delete_vn(self, vn_obj, **kwargs): - pass - - @abstractmethod - def get_vn_obj_if_present(self, vn_name, **kwargs): - pass - - @abstractmethod - def get_vn_name(self, vn_obj, **kwargs): - pass - - @abstractmethod - def get_vn_id(self, vn_obj, **kwargs): - pass - - @abstractmethod - def get_policy(self, fq_name, **kwargs): - pass - - @abstractmethod - def get_floating_ip(self, fip_id, **kwargs): - pass - - @abstractmethod - def create_floating_ip(self, pool_vn_id, pool_obj, project_obj, **kwargs): - pass - - @abstractmethod - def delete_floating_ip(self, fip_id, **kwargs): - pass - - @abstractmethod - def assoc_floating_ip(self, fip_id, vm_id, **kwargs): - pass - - @abstractmethod - def disassoc_floating_ip(self, fip_id, **kwargs): - pass - - @abstractmethod - def add_security_group(self, vm_id, sg_id, **kwargs): - pass - - @abstractmethod - def remove_security_group(self, vm_id, sg_id, **kwargs): - pass - - @abstractmethod - def create_security_group(self, sg_name, parent_fqname, sg_entries, **kwargs): - pass - - @abstractmethod - def delete_security_group(self, sg_id, **kwargs): - pass - - @abstractmethod - def get_security_group_rules(self, sg_id, **kwargs): - pass - - @abstractmethod - def delete_security_group_rules(self, sg_id, **kwargs): - pass - - @abstractmethod - def set_security_group_rules(self, sg_id, **kwargs): - pass - -class OrchestratorAuth: - __metaclass__ = ABCMeta - - @abstractmethod - def reauth(self): - pass - - @abstractmethod - def get_project_id(self, name=None): - pass - - @abstractmethod - def create_project(self, name): - pass - - @abstractmethod - def delete_project(self, name): - pass - - @abstractmethod - def create_user(self, user, passwd): - pass - - @abstractmethod - def delete_user(self, user): - pass - - @abstractmethod - def add_user_to_project(self, user, project): - pass diff --git a/fixtures/physical_device_fixture.py b/fixtures/physical_device_fixture.py deleted file mode 100644 index 7ed8041fb..000000000 --- a/fixtures/physical_device_fixture.py +++ /dev/null @@ -1,177 +0,0 @@ -from netaddr import * - -import vnc_api_test -from pif_fixture import PhysicalInterfaceFixture -from common.device_connection import ConnectionFactory - - -class PhysicalDeviceFixture(vnc_api_test.VncLibFixture): - - '''Fixture to manage Physical device objects - - Mandatory: - :param name : name of the device - :param mgmt_ip : Management IP - - Optional: - :param vendor : juniper - :param model : mx - :param asn : default is 64512 - :param ssh_username : Login username to ssh, default is root - :param ssh_password : Login password, default is Embe1mpls - :param tunnel_ip : Tunnel IP (for vtep) - :param ports : List of Ports which are available to use - - Inherited optional parameters: - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - :param logger : logger object - ''' - - def __init__(self, *args, **kwargs): - super(PhysicalDeviceFixture, self).__init__(*args, **kwargs) - self.name = args[0] - self.mgmt_ip = args[1] - self.vendor = kwargs.get('vendor', None) - self.model = kwargs.get('model', None) - self.asn = kwargs.get('asn', '64512') - self.ssh_username = kwargs.get('ssh_username', 'root') - self.ssh_password = kwargs.get('ssh_password', 'Embe1mpls') - self.tunnel_ip = kwargs.get('tunnel_ip', self.mgmt_ip) - self.ports = kwargs.get('ports', []) - self.device_details = {} - - self.phy_device = None - self.nc_handle = None - - self.already_present = False - self.physical_port_fixtures = {} - - # end __init__ - - def _get_ip_fabric_ri_obj(self): - rt_inst_obj = self.vnc_api_h.routing_instance_read( - fq_name=['default-domain', 'default-project', - 'ip-fabric', '__default__']) - return rt_inst_obj - - # end _get_ip_fabric_ri_obj - - def create_physical_device(self): - pr = vnc_api_test.PhysicalRouter(self.name) - pr.physical_router_management_ip = self.mgmt_ip - pr.physical_router_dataplane_ip = self.tunnel_ip - pr.physical_router_vendor_name = self.vendor - pr.physical_router_product_name = self.model - pr.physical_router_vnc_managed = True - uc = vnc_api_test.UserCredentials(self.ssh_username, self.ssh_password) - pr.set_physical_router_user_credentials(uc) - pr_id = self.vnc_api_h.physical_router_create(pr) - self.logger.info('Created Physical device %s with ID %s' % ( - pr.fq_name, pr.uuid)) - return pr - - def delete_device(self): - self.vnc_api_h.physical_router_delete(id=self.phy_device.uuid) - self.logger.info('Deleted physical device : %s, UUID %s' % - (self.phy_device.fq_name, self.phy_device.uuid)) - - def setUp(self): - super(PhysicalDeviceFixture, self).setUp() - pr_fq_name = ['default-global-system-config', self.name] - try: - self.phy_device = self.vnc_api_h.physical_router_read( - fq_name=pr_fq_name) - self.already_present = True - self.logger.info('Physical device %s already present' % ( - pr_fq_name)) - except vnc_api_test.NoIdError: - self.phy_device = self.create_physical_device() - if self.inputs: - self.device_details = self.get_device_details( - self.inputs.physical_routers_data) - - def get_device_details(self, physical_routers_data): - ''' - Returns the device dict of the ToR - ''' - for (device_name, device_dict) in physical_routers_data.iteritems(): - if device_name == self.name: - return device_dict - # end get_device_details - - def setup_physical_ports(self): - self.physical_port_fixtures = self.add_physical_ports() - self.addCleanup(self.delete_physical_ports) - - def cleanUp(self): - super(PhysicalDeviceFixture, self).cleanUp() - do_cleanup = True - if self.already_present: - do_cleanup = False - self.logger.info('Skipping deletion of device %s' % ( - self.phy_device.fq_name)) - if do_cleanup: - self.delete_device() - - def add_virtual_network(self, vn_id): - self.logger.debug('Adding VN %s to physical device %s' % ( - vn_id, self.name)) - self.phy_device = self.vnc_api_h.physical_router_read( - id=self.phy_device.uuid) - vn_obj = self.vnc_api_h.virtual_network_read(id=vn_id) - self.phy_device.add_virtual_network(vn_obj) - self.vnc_api_h.physical_router_update(self.phy_device) - - def delete_virtual_network(self, vn_id): - self.logger.debug('Removing VN %s from physical device %s' % ( - vn_id, self.name)) - self.phy_device = self.vnc_api_h.physical_router_read( - id=self.phy_device.uuid) - vn_ref_list = [] - for x in self.phy_device.get_virtual_network_refs(): - if not x['uuid'] == vn_id: - vn_ref_list.append(x) - self.phy_device.set_virtual_network_list(vn_ref_list) - self.vnc_api_h.physical_router_update(self.phy_device) - - def add_physical_port(self, port_name): - pif_fixture = PhysicalInterfaceFixture(port_name, - device_id=self.phy_device.uuid, - connections=self.connections) - pif_fixture.setUp() - return pif_fixture - - def delete_physical_port(self, port_name): - self.physical_port_fixtures[port_name].cleanUp() - - def delete_physical_ports(self): - for port in self.ports: - self.delete_physical_port(port) - - def add_physical_ports(self): - physical_port_fixtures = {} - for port in self.ports: - physical_port_fixtures[port] = self.add_physical_port(port) - return physical_port_fixtures - # end add_physical_ports - - def get_connection_obj(self, *args, **kwargs): - self.conn_obj = ConnectionFactory.get_connection_obj( - *args, **kwargs) - self.conn_obj.connect() - return self.conn_obj - # end get_connection_obj - - -# end PhysicalDeviceFixture - -if __name__ == "__main__": - import pdb - pdb.set_trace() diff --git a/fixtures/physical_router_fixture.py b/fixtures/physical_router_fixture.py deleted file mode 100644 index daaa0ab05..000000000 --- a/fixtures/physical_router_fixture.py +++ /dev/null @@ -1,122 +0,0 @@ -from netaddr import * - -import vnc_api_test -from pif_fixture import PhysicalInterfaceFixture -from physical_device_fixture import PhysicalDeviceFixture - -class PhysicalRouterFixture(PhysicalDeviceFixture): - - '''Fixture to manage Physical Router objects - - Mandatory: - :param name : name of the device - :param mgmt_ip : Management IP - - Optional: - :param vendor : juniper - :param model : mx - :param asn : default is 64512 - :param ssh_username : Login username to ssh, default is root - :param ssh_password : Login password, default is Embe1mpls - :param :tunnel_ip : Tunnel IP (for vtep) - :ports : List of Ports which are available to use - - Inherited optional parameters: - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - ''' - - def __init__(self, *args, **kwargs): - super(PhysicalRouterFixture, self).__init__(self, *args, **kwargs) - self.name = args[0] - self.mgmt_ip = args[1] - self.vendor = kwargs.get('vendor', 'juniper') - self.model = kwargs.get('model','mx') - self.asn = kwargs.get('asn','64512') - self.tunnel_ip = kwargs.get('tunnel_ip', self.mgmt_ip) - self.ports = kwargs.get('ports', []) - - self.bgp_router = None - self.bgp_router_already_present = False - - # end __init__ - - def create_bgp_router(self): - bgp_router = vnc_api_test.BgpRouter(self.name, parent_obj=self._get_ip_fabric_ri_obj()) - params = vnc_api_test.BgpRouterParams() - params.address = self.tunnel_ip - params.address_families = vnc_api_test.AddressFamilies(['route-target', - 'inet-vpn', 'e-vpn', 'inet6-vpn']) - params.autonomous_system = int(self.asn) - params.vendor = self.vendor - params.identifier = self.mgmt_ip - bgp_router.set_bgp_router_parameters(params) - bgp_router_id = self.vnc_api_h.bgp_router_create(bgp_router) - bgp_router_obj = self.vnc_api_h.bgp_router_read(id=bgp_router_id) - self.logger.info('Created BGP router %s with ID %s' % ( - bgp_router_obj.fq_name, bgp_router_obj.uuid)) - return bgp_router_obj - # end create_bgp_router - - def delete_bgp_router(self): - self.vnc_api_h.bgp_router_delete(id=self.bgp_router.uuid) - self.logger.info('Deleted BGP router : %s' % (self.bgp_router.uuid)) - - def add_bgp_router(self, bgp_router): - self.phy_device = self.vnc_api_h.physical_router_read(id=self.phy_device.uuid) - self.phy_device.add_bgp_router(bgp_router) - self.vnc_api_h.physical_router_update(self.phy_device) - - def delete_device(self): - self.phy_device = self.vnc_api_h.physical_router_read(id=self.phy_device.uuid) - self.phy_device.del_bgp_router(self.bgp_router) - self.vnc_api_h.physical_router_update(self.phy_device) - - super(PhysicalRouterFixture, self).delete_device(self) - - def setUp(self): - super(PhysicalRouterFixture, self).setUp() - - bgp_fq_name = ['default-domain', 'default-project', - 'ip-fabric', '__default__', self.name] - try: - self.bgp_router = self.vnc_api_h.bgp_router_read( - fq_name=bgp_fq_name) - self.already_present = True - self.logger.info('BGP router %s already present' % ( - bgp_fq_name)) - self.bgp_router_already_present = True - except vnc_api_test.NoIdError: - self.bgp_router = self.create_bgp_router() - - self.add_bgp_router(self.bgp_router) - self.router_session = self.get_connection_obj(self.vendor, - host=self.mgmt_ip, - username=self.ssh_username, - password=self.ssh_password, - logger=self.logger) - - def cleanUp(self): - super(PhysicalRouterFixture, self).cleanUp() - do_cleanup = True - if self.bgp_router_already_present: - do_cleanup = False - if do_cleanup: - self.delete_bgp_router() - - def get_irb_mac(self): - return self.router_session.get_mac_address('irb') - - def get_virtual_gateway_mac(self, ip_address): - return self.router_session.get_mac_in_arp_table(ip_address) - -# end PhysicalRouterFixture - -if __name__ == "__main__": - pass diff --git a/fixtures/pif_fixture.py b/fixtures/pif_fixture.py deleted file mode 100644 index 09480ba2e..000000000 --- a/fixtures/pif_fixture.py +++ /dev/null @@ -1,97 +0,0 @@ -import vnc_api_test - -class PhysicalInterfaceFixture(vnc_api_test.VncLibFixture): - - '''Fixture to handle Physical Interface object in - a phyiscal device - - Mandatory: - :param name : name of the physical interface - :param device_id : UUID of physical device - One of device_obj and device_id is mandatory - :param device_obj : PhysicalRouter object which would be - the parent_obj of this intf - One of device_obj and device_id is mandatory - - Inherited optional parameters: - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - - ''' - - def __init__(self, *args, **kwargs): - super(PhysicalInterfaceFixture, self).__init__(self, *args, **kwargs) - self.name = args[0] - self.device_id = kwargs.get('device_id', None) - self.device_obj = kwargs.get('device_obj', None) - if not (self.device_obj or self.device_id): - raise TypeError('One of device_id or device_obj is mandatory') - - self.already_present = False - - self.vn_obj = None - # end __init__ - - def setUp(self): - super(PhysicalInterfaceFixture, self).setUp() - if self.device_id: - self.device_obj = self.vnc_api_h.physical_router_read( - id=self.device_id) - else: - self.device_id = self.device_obj.uuid - self.device_name = self.device_obj.fq_name[-1] - self.fq_name = self.device_obj.fq_name[:] - self.fq_name.append(self.name) - try: - self.obj = self.vnc_api_h.physical_interface_read( - fq_name=self.fq_name) - self.already_present = True - self.uuid = self.obj.uuid - self.logger.debug('Physical port %s is already present' % ( - self.name)) - except vnc_api_test.NoIdError: - self.create_pif() - - # end setUp - - def create_pif(self): - self.logger.info('Creating physical port %s:' % ( - self.fq_name)) - pif_obj = vnc_api_test.PhysicalInterface(name=self.name, - parent_obj=self.device_obj, - display_name=self.name) - self.uuid = self.vnc_api_h.physical_interface_create(pif_obj) - self.obj = self.vnc_api_h.physical_interface_read(id=self.uuid) - # end create_pif - - - def cleanUp(self): - super(PhysicalInterfaceFixture, self).cleanUp() - do_cleanup = True - if self.already_present: - do_cleanup = False - self.logger.debug('Skipping deletion of physical port %s :' % ( - self.fq_name)) - if do_cleanup: - self.delete_pif() - # end cleanUp - - def delete_pif(self): - self.logger.info('Deleting physical port %s:' % ( - self.fq_name)) - self.vnc_api_h.physical_interface_delete(id=self.uuid) - # end delete_pif - -# end PhysicalInterfaceFixture - -if __name__ == "__main__": - device_id = 'e122f6b2-5d5c-4f2e-b665-d69dba447bdf' - pif_obj = PhysicalInterfaceFixture(name='ge-0/0/0', device_id=device_id) - pif_obj.setUp() - pif_obj.cleanUp() diff --git a/fixtures/policy_test.py b/fixtures/policy_test.py deleted file mode 100644 index e3ca543e0..000000000 --- a/fixtures/policy_test.py +++ /dev/null @@ -1,1166 +0,0 @@ -import fixtures -import re -from project_test import * -from tcutils.util import * -import json -from vnc_api.vnc_api import * -from contrail_fixtures import * -import copy -from tcutils.agent.vna_introspect_utils import * -from common.policy import policy_test_utils -import inspect -try: - from webui_test import * -except ImportError: - pass - -#@contrail_fix_ext () - - -class PolicyFixture(fixtures.Fixture): - - def __init__(self, policy_name, rules_list, inputs, connections, api=None, - project_fixture= None): - self.inputs = inputs - self.rules_list = rules_list - self.project_fq_name = self.inputs.project_fq_name - self.connections = connections - self.agent_inspect = self.connections.agent_inspect - self.cn_inspect = self.connections.cn_inspect - self.quantum_h = self.connections.quantum_h - self.api_s_inspect = self.connections.api_server_inspect - self.vnc_lib = self.connections.vnc_lib - self.policy_name = policy_name - self.policy_obj = None - self.logger = self.inputs.logger - self.already_present = False - self.verify_is_run = False - self.project_name = self.inputs.project_name - self.api_flag = api - if self.inputs.orchestrator == 'vcenter': - self.api_flag = True - if self.inputs.verify_thru_gui(): - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.webui = WebuiTest(self.connections, self.inputs) - self.project_fixture= project_fixture - if self.project_fixture: - self.project_fq_name = self.project_fixture.project_fq_name - self.project_name = self.project_fixture.project_name - # end __init__ - - def setUp(self): - super(PolicyFixture, self).setUp() - if self.api_flag is None: - self.policy_obj = self.quantum_h.get_policy_if_present( - self.project_name, self.policy_name) - if not self.policy_obj: - if self.inputs.is_gui_based_config(): - self.webui.create_policy(self) - else: - self._create_policy(self.policy_name, self.rules_list) - else: - self.already_present = True - self.logger.info( - 'Policy %s already present, not creating any policy' % - (self.policy_name)) - - self.policy_fq_name = self.quantum_h.get_policy_fq_name( - self.policy_obj) - else: - try: - self.policy_obj = self.vnc_lib.network_policy_read(fq_name=self.project_fq_name+[unicode(self.policy_name)]) - except: - self.policy_fq_name = self._create_policy_api(self.policy_name, self.rules_list) - else: - self.already_present = True - self.policy_fq_name=self.policy_obj.fq_name - self.logger.info( - 'Policy %s already present, not creating any policy' % - (self.policy_name)) - # end setUp - - def verify_on_setup(self): - # verifications return {'result': result, 'msg': err_msg} - result = True - err_msg = [] - ret = self.verify_policy_in_api_server() - if ret['result'] == False: - err_msg.append(ret['msg']) - ret = self.verify_policy_in_control_nodes() - if ret['result'] == False: - err_msg.append(ret['msg']) - - if err_msg != []: - result = False - self.verify_is_run = True - return {'result': result, 'msg': err_msg} - # end verify_on_setup - - def _create_policy(self, policy_name, rules_list): - ''' Create a policy from the supplied rules - Sample rules_list: - src_ports and dst_ports : can be 'any'/tuple/list as shown below - protocol : 'any' or a string representing a protocol number : ICMP(1), TCP(6), UDP(17) - simple_action : pass/deny - source_network/dest_network : VN name - rules= [ - { - 'direction' : '<>', 'simple_action' : 'pass', - 'protocol' : 'any', - 'source_network': vn1_name, - 'src_ports' : 'any', - 'src_ports' : (10,100), - 'dest_network' : vn1_name, - 'dst_ports' : [100,10], - }, - { - 'direction' : '<>', - 'simple_action' : 'pass', 'protocol' : '1', - 'source_network': vn1_name, 'src_ports' : (10,100), - 'dest_network' : vn1_name, 'dst_ports' : [100,10], - } - ] - ''' - def serialize(obj): - return_dict = {} - for k, v in obj.__dict__.iteritems(): - return_dict[k] = v - return return_dict - np_rules = [] - for rule_dict in rules_list: - source_vn = None - dest_vn = None - source_policy = None - dest_policy = None - source_subnet_dict = None - dest_subnet_dict = None - - new_rule = { - 'direction': '<>', - 'simple_action': 'pass', - 'protocol': 'any', - 'source_network': None, - 'source_policy': None, - 'source_subnet': None, - 'src_ports': [PortType(-1, -1)], - 'application': None, - 'dest_network': None, - 'dest_policy': None, - 'dest_subnet': None, - 'dst_ports': [PortType(-1, -1)], - 'action_list': {}, - } - for key in rule_dict: - new_rule[key] = rule_dict[key] - # end for - new_rule['action_list'][ - 'simple_action'] = rule_dict['simple_action'] - # Format Source ports - if 'src_ports' in rule_dict: - if type(rule_dict['src_ports']) is tuple or type(rule_dict['src_ports']) is list: - new_rule['src_ports'] = [ - PortType(rule_dict['src_ports'][0], rule_dict['src_ports'][1])] - elif rule_dict['src_ports'] == 'any': - new_rule['src_ports'] = [PortType(-1, -1)] - else: - self.logger.error( - "Error in Source ports arguments, should be (Start port, end port) or any ") - return None - # Format Dest ports - if 'dst_ports' in rule_dict: - if 'dst_ports' in rule_dict and type(rule_dict['dst_ports']) is tuple or type(rule_dict['dst_ports']) is list: - new_rule['dst_ports'] = [ - PortType(rule_dict['dst_ports'][0], rule_dict['dst_ports'][1])] - elif rule_dict['dst_ports'] == 'any': - new_rule['dst_ports'] = [PortType(-1, -1)] - else: - self.logger.error( - "Error in Destination ports arguments, should be (Start port, end port) or any ") - return None - if new_rule['source_network'] is not None: - m = re.match(r"(\S+):(\S+):(\S+)", new_rule['source_network']) - if m: - source_vn = new_rule['source_network'] - else: - source_vn = ':'.join(self.project_fq_name) + \ - ':' + new_rule['source_network'] - if new_rule['dest_network'] is not None: - m = re.match(r"(\S+):(\S+):(\S+)", new_rule['dest_network']) - if m: - dest_vn = new_rule['dest_network'] - else: - dest_vn = ':'.join(self.project_fq_name) + \ - ':' + new_rule['dest_network'] - if new_rule['source_policy'] is not None: - m = re.match(r"(\S+):(\S+):(\S+)", new_rule['source_policy']) - if m: - source_policy = new_rule['source_policy'] - else: - source_policy = ':'.join(self.project_fq_name) + \ - ':' + new_rule['source_policy'] - if new_rule['dest_policy'] is not None: - m = re.match(r"(\S+):(\S+):(\S+)", new_rule['dest_policy']) - if m: - dest_policy = new_rule['dest_policy'] - else: - dest_policy = ':'.join(self.project_fq_name) + \ - ':' + new_rule['dest_policy'] - if new_rule['source_subnet'] is not None: - try: - source_subnet_prefix = str(new_rule['source_subnet'].split('/')[0]) - source_subnet_prefix_length = int(new_rule['source_subnet'].split('/')[1]) - source_subnet_dict = {'ip_prefix':source_subnet_prefix, - 'ip_prefix_len':source_subnet_prefix_length} - except: - self.logger.debug("Subnet should be defined as ip/prefix_length \ - where ip = xx.xx.xx.xx and prefix_length is the subnet mask \ - length.") - if new_rule['dest_subnet'] is not None: - try: - dest_subnet_prefix = str(new_rule['dest_subnet'].split('/')[0]) - dest_subnet_prefix_length = int(new_rule['dest_subnet'].split('/')[1]) - dest_subnet_dict = {'ip_prefix':dest_subnet_prefix, - 'ip_prefix_len':dest_subnet_prefix_length} - except: - self.logger.debug("Subnet should be defined as ip/prefix_length \ - where ip = xx.xx.xx.xx and prefix_length is the subnet mask \ - length.") - - # handle 'any' network case - try: - if rule_dict['source_network'] == 'any': - source_vn = 'any' - except: - self.logger.debug("No source network defined") - try: - if rule_dict['dest_network'] == 'any': - dest_vn = 'any' - except: - self.logger.debug("No destination network defined") - # end code to handle 'any' network - try: - if source_vn: - new_rule['source_network'] = [ - AddressType(virtual_network=source_vn)] - src_address = new_rule['source_network'] - except: - self.logger.debug("No source vn defined in this rule of %s \ - policy" % (policy_name)) - try: - if dest_vn: - new_rule['dest_network'] = [ - AddressType(virtual_network=dest_vn)] - dest_address = new_rule['dest_network'] - except: - self.logger.debug("No dest vn defined in this rule of %s \ - policy" % (policy_name)) - try: - if source_policy: - new_rule['source_policy'] = [ - AddressType(network_policy=source_policy)] - src_address = new_rule['source_policy'] - except: - self.logger.debug("No source policy defined in this rule of %s \ - policy" % (policy_name)) - try: - if dest_policy: - new_rule['dest_policy'] = [ - AddressType(network_policy=dest_policy)] - dest_address = new_rule['dest_policy'] - except: - self.logger.debug("No dest policy defined in this rule of %s \ - policy" % (policy_name)) - try: - if source_subnet_dict: - new_rule['source_subnet'] = [ - AddressType(subnet=source_subnet_dict)] - src_address = new_rule['source_subnet'] - except: - self.logger.debug("No source subnet defined in this rule of %s \ - policy" % (policy_name)) - try: - if dest_subnet_dict: - new_rule['dest_subnet'] = [ - AddressType(subnet=dest_subnet_dict)] - dest_address = new_rule['dest_subnet'] - except: - self.logger.debug("No destination subnet defined in this rule of %s \ - policy" % (policy_name)) - - np_rules.append(PolicyRuleType(direction=new_rule['direction'], - protocol=new_rule['protocol'], - src_addresses=src_address, - src_ports=new_rule['src_ports'], - application=new_rule[ - 'application'], - dst_addresses=dest_address, - dst_ports=new_rule['dst_ports'], - action_list=new_rule['action_list'])) - # end for - self.logger.debug("Policy np_rules : %s" % (np_rules)) - pol_entries = PolicyEntriesType(np_rules) - pol_entries_dict = \ - json.loads(json.dumps(pol_entries, - default=serialize)) - policy_req = {'name': policy_name, - 'entries': pol_entries_dict} - policy_rsp = self.quantum_h.create_policy({'policy': policy_req}) - self.logger.debug("Policy Creation Response " + str(policy_rsp)) - self.policy_obj = policy_rsp - return policy_rsp - # end _create_policy - - def _create_policy_api(self, policy_name, rules_list): - ''' Create a policy from the supplied rules - Sample rules_list: - src_ports and dst_ports : can be 'any'/tuple/list as shown below - protocol : 'any' or a string representing a protocol number : ICMP(1), TCP(6), UDP(17) - simple_action : pass/deny - source_network/dest_network : VN name - rules= [ - { - 'direction' : '<>', 'simple_action' : 'pass', - 'protocol' : 'any', - 'source_network': vn1_name, - 'src_ports' : 'any', - 'src_ports' : (10,100), - 'dest_network' : vn1_name, - 'dst_ports' : [100,10], - }, - { - 'direction' : '<>', - 'simple_action' : 'pass', 'protocol' : 'icmp', - 'source_network': vn1_name, 'src_ports' : (10,100), - 'dest_network' : vn1_name, 'dst_ports' : [100,10], - } - ] - ''' - np_rules = [] - for rule_dict in rules_list: - new_rule = { - 'direction': '<>', - 'simple_action': 'pass', - 'protocol': 'any', - 'source_network': None, - 'source_policy': None, - 'source_subnet': None, - 'src_ports': [PortType(-1, -1)], - 'application': None, - 'dest_network': None, - 'dest_policy': None, - 'dest_subnet': None, - 'dst_ports': [PortType(-1, -1)], - 'action_list': None - } - for key in rule_dict: - new_rule[key] = rule_dict[key] - # end for - # Format Source ports - if 'src_ports' in rule_dict: - if isinstance( - rule_dict['src_ports'], - tuple) or isinstance( - rule_dict['src_ports'], - list): - new_rule['src_ports'] = [ - PortType( - rule_dict['src_ports'][0], - rule_dict['src_ports'][1])] - elif rule_dict['src_ports'] == 'any': - new_rule['src_ports'] = [PortType(-1, -1)] - else: - self.logger.error( - "Error in Source ports arguments, should be (Start port, end port) or any ") - return None - # Format Dest ports - if 'dst_ports' in rule_dict: - if 'dst_ports' in rule_dict and isinstance( - rule_dict['dst_ports'], - tuple) or isinstance( - rule_dict['dst_ports'], - list): - new_rule['dst_ports'] = [ - PortType( - rule_dict['dst_ports'][0], - rule_dict['dst_ports'][1])] - elif rule_dict['dst_ports'] == 'any': - new_rule['dst_ports'] = [PortType(-1, -1)] - else: - self.logger.error( - "Error in Destination ports arguments, should be (Start port, end port) or any ") - return None - - if new_rule['source_network'] is not None: - m = re.match(r"(\S+):(\S+):(\S+)", new_rule['source_network']) - if m: - source_vn = new_rule['source_network'] - else: - source_vn = ':'.join(self.project_fq_name) + \ - ':' + new_rule['source_network'] - if new_rule['dest_network'] is not None: - m = re.match(r"(\S+):(\S+):(\S+)", new_rule['dest_network']) - if m: - dest_vn = new_rule['dest_network'] - else: - dest_vn = ':'.join(self.project_fq_name) + \ - ':' + new_rule['dest_network'] - if new_rule['source_policy'] is not None: - m = re.match(r"(\S+):(\S+):(\S+)", new_rule['source_policy']) - if m: - source_policy = new_rule['source_policy'] - else: - source_policy = ':'.join(self.project_fq_name) + \ - ':' + new_rule['source_policy'] - if new_rule['dest_policy'] is not None: - m = re.match(r"(\S+):(\S+):(\S+)", new_rule['dest_policy']) - if m: - dest_policy = new_rule['dest_policy'] - else: - dest_policy = ':'.join(self.project_fq_name) + \ - ':' + new_rule['dest_policy'] - if new_rule['source_subnet'] is not None: - try: - source_subnet_prefix = str(new_rule['source_subnet'].split('/')[0]) - source_subnet_prefix_length = int(new_rule['source_subnet'].split('/')[1]) - source_subnet_dict = {'ip_prefix':source_subnet_prefix, - 'ip_prefix_len':source_subnet_prefix_length} - except: - self.logger.debug("Subnet should be defined as ip/prefix_length \ - where ip = xx.xx.xx.xx and prefix_length is the subnet mask \ - length.") - if new_rule['dest_subnet'] is not None: - try: - dest_subnet_prefix = str(new_rule['dest_subnet'].split('/')[0]) - dest_subnet_prefix_length = int(new_rule['dest_subnet'].split('/')[1]) - dest_subnet_dict = {'ip_prefix':dest_subnet_prefix, - 'ip_prefix_len':dest_subnet_prefix_length} - except: - self.logger.debug("Subnet should be defined as ip/prefix_length \ - where ip = xx.xx.xx.xx and prefix_length is the subnet mask \ - length.") - - # handle 'any' network case - try: - if rule_dict['source_network'] == 'any': - source_vn = 'any' - except: - self.logger.debug("No source network defined") - try: - if rule_dict['dest_network'] == 'any': - dest_vn = 'any' - except: - self.logger.debug("No destination network defined") - # end code to handle 'any' network - - try: - if source_vn: - new_rule['source_network'] = [ - AddressType(virtual_network=source_vn)] - src_address = new_rule['source_network'] - except: - self.logger.debug("No source vn defined in this rule of %s \ - policy" % (policy_name)) - try: - if dest_vn: - new_rule['dest_network'] = [ - AddressType(virtual_network=dest_vn)] - dest_address = new_rule['dest_network'] - except: - self.logger.debug("No dest vn defined in this rule of %s \ - policy" % (policy_name)) - try: - if source_policy: - new_rule['source_policy'] = [ - AddressType(network_policy=source_policy)] - src_address = new_rule['source_policy'] - except: - self.logger.debug("No source policy defined in this rule of %s \ - policy" % (policy_name)) - try: - if dest_policy: - new_rule['dest_policy'] = [ - AddressType(network_policy=dest_policy)] - dest_address = new_rule['dest_policy'] - except: - self.logger.debug("No dest policy defined in this rule of %s \ - policy" % (policy_name)) - try: - if source_subnet_dict: - new_rule['source_subnet'] = [ - AddressType(subnet=source_subnet_dict)] - src_address = new_rule['source_subnet'] - except: - self.logger.debug("No source subnet defined in this rule of %s \ - policy" % (policy_name)) - try: - if dest_subnet_dict: - new_rule['dest_subnet'] = [ - AddressType(subnet=dest_subnet_dict)] - dest_address = new_rule['dest_subnet'] - except: - self.logger.debug("No destination subnet defined in this rule of %s \ - policy" % (policy_name)) - - np_rules.append( - PolicyRuleType(direction=new_rule['direction'], - protocol=new_rule['protocol'], - src_addresses=src_address, - src_ports=new_rule['src_ports'], - application=new_rule['application'], - dst_addresses=dest_address, - dst_ports=new_rule['dst_ports'], - action_list={'simple_action':new_rule['simple_action']})) - - # end for - self.logger.debug("Policy np_rules : %s" % (np_rules)) - pol_entries = PolicyEntriesType(np_rules) - proj = self.vnc_lib.project_read(self.project_fq_name) - self.policy_obj = NetworkPolicy( - policy_name, network_policy_entries=pol_entries, parent_obj=proj) - uid = self.vnc_lib.network_policy_create(self.policy_obj) - self.policy_obj = self.vnc_lib.network_policy_read(id=uid) - return self.policy_obj.fq_name - # end _create_policy_api - - def cleanUp(self): - super(PolicyFixture, self).cleanUp() - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - self._delete_policy() - if self.verify_is_run: - assert self.verify_policy_not_in_api_server() - else: - self.logger.info('Skipping deletion of policy %s' % - (self.policy_name)) - # end cleanUp - - def get_id(self): - if isinstance(self.policy_obj, NetworkPolicy): - return self.policy_obj.uuid - else: - return self.policy_obj['policy']['id'] - - def _delete_policy(self): - if self.api_flag: - self.vnc_lib.network_policy_delete(id=self.policy_obj.uuid) - self.logger.info("Deleted policy %s" % (self.policy_name)) - return - if self.inputs.is_gui_based_config(): - self.webui.delete_policy(self) - self.logger.info("Deleted policy %s" % (self.policy_name)) - elif self.quantum_h.get_policy_if_present( - project_name=self.project_name, - policy_name=self.policy_name): - self.quantum_h.delete_policy(self.policy_obj['policy']['id']) - self.logger.info("Deleted policy %s" % (self.policy_name)) - else: - self.logger.info("No Policy present, to be deleted.") - # end _delete_policy - - def update_policy(self, policy_id, policy_data): - # policy_data format {'policy': {'entries': new_policy_entries}} - policy_rsp = self.quantum_h.update_policy(policy_id, policy_data) - self.logger.debug("Policy Update Response " + str(policy_rsp)) - self.policy_obj = policy_rsp - return policy_rsp - # end update_policy - - def tx_policy_to_vn(self, rules, vn_policy_dict): - """ - Return rules that have source and destination vn names in place of - source and destination policy. - """ - tx_rule_list = [] - src_pol = 'Null' - dest_pol = 'Null' - for rule in rules: - if ((not 'source_policy' in rule) and - (not 'dest_policy' in rule)): - tx_rule_list.append(rule) - continue - if 'source_policy' in rule: - src_pol = rule['source_policy'] - if 'dest_policy' in rule: - dest_pol = rule['dest_policy'] - src_pol_vns = [] - dest_pol_vns= [] - for each_vn in vn_policy_dict: - if src_pol in vn_policy_dict[each_vn]: - src_pol_vns.append(each_vn) - if dest_pol in vn_policy_dict[each_vn]: - dest_pol_vns.append(each_vn) - if (src_pol_vns and dest_pol_vns): - for eachvn in src_pol_vns: - new_rule = copy.deepcopy(rule) - del new_rule['source_policy'] - new_rule['source_network'] = eachvn - for eachvn2 in dest_pol_vns: - new_rule2 = copy.deepcopy(new_rule) - del new_rule2['dest_policy'] - new_rule2['dest_network'] = eachvn2 - tx_rule_list.append(new_rule) - - if (src_pol_vns and (not dest_pol_vns)): - for eachvn in src_pol_vns: - new_rule = copy.deepcopy(rule) - del new_rule['source_policy'] - new_rule['source_network'] = eachvn - tx_rule_list.append(new_rule) - - if (dest_pol_vns and (not src_pol_vns)): - for eachvn in dest_pol_vns: - new_rule = copy.deepcopy(rule) - del new_rule['dest_policy'] - new_rule['dest_network'] = eachvn - tx_rule_list.append(new_rule) - - return tx_rule_list - # end tx_policy_to_vn - - def tx_user_def_rule_to_aces(self, test_vn, rules): - """ - Return user defined rules to expected ACL entries, each rule as dictionary, a - list of dicts returned. - 1. translate keys rules-> ace - 2. translate 'any' value for port to range - 3. translate 'any' value for protocol to range - 4. expand bi-directional rules - 5. update 'action_l' as simple_action will not be used going forward - """ - - # step 1: key translation, update port/protocol values to system format - translator = { - 'direction': 'direction', 'simple_action': 'simple_action', - 'protocol': 'proto_l', 'source_network': 'src', 'src_ports': - 'src_port_l', 'dest_network': 'dst', 'dst_ports': 'dst_port_l'} - user_rules_tx = [] - configd_rules = len(user_rules_tx) - for rule in rules: - user_rule_tx = dict((translator[k], v) for (k, v) in rule.items()) - user_rules_tx.append(user_rule_tx) - for rule in user_rules_tx: - # port value mapping - for port in ['src_port_l', 'dst_port_l']: - if rule[port] == 'any': - rule[port] = {'max': '65535', 'min': '0'} - else: # only handling single or continuous range for port - if len(rule[port]) == 2: - rule[port] = {'max': str(rule[port][1]), - 'min': str(rule[port][0])} - else: - self.logger.error( - "user input port_list not handled by verification") - # protocol value mapping - if rule['proto_l'] == 'any': - rule['proto_l'] = {'max': '255', 'min': '0'} - else: - rule['proto_l'] = {'max': str(rule['proto_l']), - 'min': str(rule['proto_l'])} - - # step 2: expanding rules if bidir rule - final_rule_l = [] - for rule in user_rules_tx: - if rule['direction'] == '<>': - rule['direction'] = '>' - pos = user_rules_tx.index(rule) - new_rule = copy.deepcopy(rule) - # update newly copied rule: swap address/ports & insert - new_rule['src'], new_rule['dst'] = new_rule[ - 'dst'], new_rule['src'] - new_rule['src_port_l'], new_rule['dst_port_l'] = new_rule[ - 'dst_port_l'], new_rule['src_port_l'], - user_rules_tx.insert(pos + 1, new_rule) - - # step 3: update action - for rule in user_rules_tx: - rule['action_l'] = [rule['simple_action']] - - return user_rules_tx - - def tx_user_def_aces_to_system(self, test_vn, user_rules_tx): - '''convert ACEs derived from user rules to system format: - 1. For every user rule, add deny rule; skip adding duplicates - 2. For non-empty policy, add permit-all at the end - 3. add ace_id, rule_type - 4. Update VN to FQDN format - 5. remove direction and simple_action fields @end.. - ''' - if user_rules_tx == []: - return user_rules_tx - any_proto_port_rule = { - 'direction': '>', 'proto_l': {'max': '255', 'min': '0'}, 'src_port_l': {'max': '65535', 'min': '0'}, - 'dst_port_l': {'max': '65535', 'min': '0'}} - - # step 0: check & build allow_all for local VN if rules are defined in - # policy - test_vn_allow_all_rule = copy.copy(any_proto_port_rule) - test_vn_allow_all_rule['simple_action'] = 'pass' - test_vn_allow_all_rule['action_l'] = ['pass'] - test_vn_allow_all_rule['src'], test_vn_allow_all_rule[ - 'dst'] = test_vn, test_vn - - # check the rule for any protocol with same network exist and for deny - # rule - test_vn_deny_all_rule = copy.copy(any_proto_port_rule) - test_vn_deny_all_rule['simple_action'] = 'deny' - test_vn_deny_all_rule['action_l'] = ['deny'] - test_vn_deny_all_rule['src'], test_vn_deny_all_rule[ - 'dst'] = test_vn, test_vn - - # step 1: check & add permit-all rule for same VN but not for 'any' - # network - last_rule = copy.copy(any_proto_port_rule) - last_rule['simple_action'], last_rule['action_l'] = 'pass', ['pass'] - last_rule['src'], last_rule['dst'] = 'any', 'any' - - # check any rule exist in policy : - final_user_rule = self.get_any_rule_if_exist(last_rule, user_rules_tx) - - # step 2: check & add deny_all for every user-created rule - system_added_rules = [] - for rule in user_rules_tx: - pos = len(user_rules_tx) - new_rule = copy.deepcopy(rule) - new_rule['proto_l'] = {'max': '255', 'min': - '0'} - new_rule['direction'] = '>' - new_rule['src_port_l'], new_rule['dst_port_l'] = { - 'max': '65535', 'min': '0'}, {'max': '65535', 'min': '0'} - new_rule['simple_action'] = 'deny' - new_rule['action_l'] = ['deny'] - system_added_rules.append(new_rule) - - # step to check any one of the rule is any protocol and source and dst - # ntw is test vn then check for the duplicate rules - final_any_rules = self.get_any_rule_if_src_dst_same_ntw_exist( - test_vn_allow_all_rule, test_vn_deny_all_rule, user_rules_tx) - if final_any_rules: - user_rules_tx = final_any_rules - else: - pass - - # Skip adding rules if they already exist... - print json.dumps(system_added_rules, sort_keys=True) - if not policy_test_utils.check_rule_in_rules(test_vn_allow_all_rule, user_rules_tx): - user_rules_tx.append(test_vn_allow_all_rule) - for rule in system_added_rules: - if not policy_test_utils.check_rule_in_rules(rule, user_rules_tx): - user_rules_tx.append(rule) - - # step 3: check & add permit-all rule for same VN but not for 'any' - # network - last_rule = copy.copy(any_proto_port_rule) - last_rule['simple_action'], last_rule['action_l'] = 'pass', ['pass'] - last_rule['src'], last_rule['dst'] = 'any', 'any' - - # if the first rule is not 'any rule ' then append the last rule - # defined above. - for rule in user_rules_tx: - any_rule_flag = True - if ((rule['src'] == 'any') and (rule['dst'] == 'any')): - any_rule_flag = False - else: - pass - if any_rule_flag: - user_rules_tx.append(last_rule) - else: - pass - - # triming the duplicate rules - user_rules_tx = policy_test_utils.remove_dup_rules(user_rules_tx) - # triming the protocol with any option for rest of the fileds - tcp_any_rule = { - 'proto_l': {'max': 'tcp', 'min': 'tcp'}, 'src': 'any', 'dst': 'any', - 'src_port_l': {'max': '65535', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}} - udp_any_rule = { - 'proto_l': {'max': 'udp', 'min': 'udp'}, 'src': 'any', 'dst': 'any', - 'src_port_l': {'max': '65535', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}} - icmp_any_rule = { - 'proto_l': {'max': 'icmp', 'min': 'icmp'}, 'src': 'any', 'dst': 'any', - 'src_port_l': {'max': '65535', 'min': '0'}, 'dst_port_l': {'max': '65535', 'min': '0'}} - icmp_match, index_icmp = self.check_5tuple_in_rules( - icmp_any_rule, user_rules_tx) - tcp_match, index_tcp = self.check_5tuple_in_rules( - tcp_any_rule, user_rules_tx) - udp_match, index_udp = self.check_5tuple_in_rules( - udp_any_rule, user_rules_tx) - if icmp_match: - for rule in user_rules_tx[index_icmp + 1:len(user_rules_tx)]: - if rule['proto_l'] == {'max': 'icmp', 'min': 'icmp'}: - user_rules_tx.remove(rule) - else: - pass - if tcp_match: - for rule in user_rules_tx[index_tcp + 1:len(user_rules_tx)]: - if rule['proto_l'] == {'max': 'tcp', 'min': 'tcp'}: - user_rules_tx.remove(rule) - else: - pass - if udp_match: - for rule in user_rules_tx[index_udp + 1:len(user_rules_tx)]: - if rule['proto_l'] == {'max': 'udp', 'min': 'udp'}: - user_rules_tx.remove(rule) - else: - pass - # if any rule is exist the it will execute - if final_user_rule: - user_rules_tx = final_user_rule - else: - pass - # step 4: add ace_id, type, src to all rules - for rule in user_rules_tx: - rule['ace_id'] = str(user_rules_tx.index(rule) + 1) - # currently checking policy aces only - rule['rule_type'] = 'Terminal' - if rule['src'] != 'any': - m = re.match(r"(\S+):(\S+):(\S+)", rule['src']) - if not m: - rule['src'] = ':'.join( - self.project_fq_name) + ':' + rule['src'] - if rule['dst'] != 'any': - m = re.match(r"(\S+):(\S+):(\S+)", rule['dst']) - if not m: - rule['dst'] = ':'.join( - self.project_fq_name) + ':' + rule['dst'] - try: - del rule['direction'] - except: - continue - try: - del rule['simple_action'] - except: - continue - - return user_rules_tx - - # end tx_user_def_aces_to_system - - def get_any_rule_if_exist(self, all_rule, user_rules_tx): - final_rules = [] - if policy_test_utils.check_rule_in_rules(all_rule, user_rules_tx): - for rule in user_rules_tx: - if rule == all_rule: - final_rules.append(rule) - break - else: - final_rules.append(rule) - else: - pass - return final_rules - # end get_any_rule_if_exist - - def get_any_rule_if_src_dst_same_ntw_exist(self, test_vn_allow_all_rule, test_vn_deny_all_rule, user_rules_tx): - final_any_rules = [] - if (policy_test_utils.check_rule_in_rules(test_vn_allow_all_rule, user_rules_tx) or policy_test_utils.check_rule_in_rules(test_vn_deny_all_rule, user_rules_tx)): - for rule in user_rules_tx: - if ((rule == test_vn_allow_all_rule) or (rule == test_vn_deny_all_rule)): - final_any_rules.append(rule) - break - else: - final_any_rules.append(rule) - else: - pass - return final_any_rules - # end get_any_rule_if_src_dst_same_ntw_exist - - def check_5tuple_in_rules(self, rule, rules): - '''check if 5-tuple of given rule exists in given rule-set..Return True if rule exists; else False''' - #print ("check rule %s in rules" %(json.dumps(rule, sort_keys=True))) - match_keys = ['proto_l', 'src', 'dst', 'src_port_l', 'dst_port_l'] - for r in rules: - match = True - for k in match_keys: - if r[k] != rule[k]: - # print ("current rule not matching due to key %s, move on.." %k) - match = False - break - if match == True: - break - return (match, rules.index(r)) - # end check_5tuple_in_rules - - def verify_policy_in_vna(self, scn, policy_attch_to_vn=None): - ''' - Policies attached to VN will be pushed to VNA [in Compute node] once - a VM is spawned in a VN. - Input: Test scenario object is passed as input [defined in policy_test_input]. - Return: returns a dictionary with keys as result & msg. - For success, return is empty. - For failure, result is set to False & msg has the error info. - Steps: for each vn present in compute [vn has vm in compute] - -whats the expected policy list for the vn - -derive expected system rules for vn in vna - -get actual system rules for vn in vna - -compare - ''' - print "Starting verify_policy_in_vna" - result = True - # expected data: translate user rules to system format for verification - # Step 1: Translate user rules to ACEs - user_rules_tx = {} - if policy_attch_to_vn is None: - policy_attch_to_vn = scn.vn_policy - for policy in scn.policy_list: - flag_policy_inheritance = 0 - policy_rules = scn.rules[policy] - for rule in scn.rules[policy]: - if (('dest_policy' in rule) or - ('source_policy' in rule)): - flag_policy_inheritance = 1 - if flag_policy_inheritance == 1: - policy_rules = self.tx_policy_to_vn(scn.rules[policy], - policy_attch_to_vn) - for test_vn in scn.policy_vn[policy]: - user_rules_tx[policy] = self.tx_user_def_rule_to_aces( - test_vn, policy_rules) - - # Step 2: Aggregate rules by network - rules_by_vn = {} - for vn in scn.vnet_list: - tmp_vn_rules = [] - rules_by_vn[vn] = [] - print "vn is %s, scn.vn_policy is %s" % (vn, scn.vn_policy[vn]) - for policy in scn.vn_policy[vn]: - rules_by_vn[vn] += user_rules_tx[policy] - - # remove duplicate rules after adding policies - rules_by_vn[vn] = policy_test_utils.trim_realign_rules( - rules_by_vn[vn]) - - # Step 3: Translate user-rules-> ACEs to system format and update ACE - # IDs - for vn in scn.vnet_list: - if rules_by_vn[vn] != []: - rules_by_vn[vn] = self.tx_user_def_aces_to_system( - vn, rules_by_vn[vn]) - rules_by_vn[vn] = policy_test_utils.update_rule_ace_id( - rules_by_vn[vn]) - - self.logger.debug("VN: %s, expected ACE's is " % (vn)) - for r in rules_by_vn[vn]: - self.logger.info("%s" % (json.dumps(r, sort_keys=True))) - # end building VN ACE's from user rules - - # Get actual from vna in compute nodes [referred as cn] - vn_of_cn = scn.vn_of_cn # {'cn1': ['vn1', 'vn2'], 'cn2': 'vn2'} - cn_vna_rules_by_vn = {} # {'vn1':[{...}, {..}], 'vn2': [{..}]} - err_msg = {} # To capture error {compute: {vn: error_msg}} - for compNode in self.inputs.compute_ips: - self.logger.info("Verify rules expected in CN if VN-VM in CN") - self.logger.info("CN: %s, Check for expected data" % (compNode)) - inspect_h = self.agent_inspect[compNode] - vnCn = (vn for vn in vn_of_cn[compNode] if vn_of_cn[compNode]) - for vn in vnCn: - print "checking for vn %s in compute %s" % (vn, compNode) - vn_fq_name = inspect_h.get_vna_vn('default-domain', self.project_name, vn)['name'] - vna_acl = inspect_h.get_vna_acl_by_vn(vn_fq_name) - if vna_acl: - # system_rules - cn_vna_rules_by_vn[vn] = vna_acl['entries'] - else: - cn_vna_rules_by_vn[vn] = [] - # compare with test input & assert on failure - ret = policy_test_utils.compare_rules_list( - rules_by_vn[vn], cn_vna_rules_by_vn[vn]) - if ret: - result = ret['state'] - msg = ret['msg'] - err_msg[compNode] = {vn: msg} - self.logger.error("CN: %s, VN: %s, test result not expected, \ - msg: %s" % (compNode, vn, msg)) - self.logger.debug("expected rules: ") - for r in rules_by_vn[vn]: - self.logger.debug(r) - self.logger.debug("actual rules from system: ") - for r in cn_vna_rules_by_vn[vn]: - self.logger.debug(r) - else: - self.logger.info( - "CN: %s, VN: %s, result of expected rules check passed" % (compNode, vn)) - self.logger.info( - "Verify rules not expected to be in CN if no VN-VM in CN") - self.logger.info("CN: %s, Check for unexpected data" % (compNode)) - vn_not_of_cn = [] - skip_vn_not_of_cn = 0 - vn_not_of_cn = list(set(scn.vnet_list) - set(vn_of_cn[compNode])) - if vn_not_of_cn == []: - self.logger.info("CN: %s, no extra VN's to check" % (compNode)) - skip_vn_not_of_cn = 1 - for vn in vn_not_of_cn: - if skip_vn_not_of_cn == 1: - break - # VN & its rules should not be present in this Compute - vn_exists = inspect_h.get_vna_vn('default-domain', self.project_name, vn) - if vn_exists: - vn_fq_name = vn_exists['name'] - vna_acl = inspect_h.get_vna_acl_by_vn(vn_fq_name) - # system_rules - cn_vna_rules_by_vn[vn] = vna_acl['entries'] - result = False - msg = "CN: " + str(compNode) + ", VN: " + str(vn) + \ - " seeing unexpected rules in VNA" + \ - str(cn_vna_rules_by_vn[vn]) - err_msg[compNode] = {vn: msg} - else: - self.logger.info("CN: %s, VN: %s, result of unexpected rules check \ - passed" % (compNode, vn)) - return {'result': result, 'msg': err_msg} - # end verify_policy_in_vna - - def refresh_quantum_policy_obj(self): - # Rebuild the policy object to take care of cases where it takes time to update after instantiating the object - if self.api_flag: - return self - self.policy_obj=self.quantum_h.get_policy_if_present(self.project_name, self.policy_name) - return self - - def verify_policy_in_api_server(self): - '''Validate policy information in API-Server. Compare data with quantum based policy fixture data. - Check specifically for following: - api_server_keys: 1> fq_name, 2> uuid, 3> rules - quantum_h_keys: 1> policy_fq_name, 2> id in policy_obj, 3> policy_obj [for rules] - ''' - self.refresh_quantum_policy_obj() - me = inspect.getframeinfo(inspect.currentframe())[2] - result = True - err_msg = [] - out = None - self.logger.info("====Verifying data for %s in API_Server ======" % - (self.policy_name)) - self.api_s_policy_obj = self.api_s_inspect.get_cs_policy( - domain=self.project_fq_name[0], project=self.project_fq_name[1], policy=self.policy_name, refresh=True) - self.api_s_policy_obj_x = self.api_s_policy_obj['network-policy'] - - # compare policy_fq_name - out = policy_test_utils.compare_args( - 'policy_fq_name', self.api_s_policy_obj_x['fq_name'], self.policy_fq_name) - if out: - err_msg.append(out) - # compare policy_uuid - if isinstance(self.policy_obj, NetworkPolicy): - uuid = self.policy_obj.uuid - rules = self.policy_obj.network_policy_entries.exportDict()['PolicyEntriesType'] - else: - uuid = self.policy_obj['policy']['id'] - rules = self.policy_obj['policy']['entries'] - - out = policy_test_utils.compare_args( - 'policy_uuid', self.api_s_policy_obj_x['uuid'], uuid) - if out: - err_msg.append(out) - # compare policy_rules - out = policy_test_utils.compare_args( - 'policy_rules', self.api_s_policy_obj_x[ - 'network_policy_entries']['policy_rule'], rules['policy_rule']) - if out: - err_msg.append(out) - - if err_msg != []: - result = False - err_msg.insert(0, me + ":" + self.policy_name) - self.logger.info("verification: %s, status: %s" % (me, result)) - return {'result': result, 'msg': err_msg} - # end verify_policy_in_api_server - - @retry(delay=5, tries=3) - def verify_policy_not_in_api_server(self): - '''Verify that policy is removed in API Server. - - ''' - self.logger.info("====Verifying data for %s in API_Server ======" % - (self.policy_name)) - pol_found = False - - proj = self.vnc_lib.project_read(self.project_fq_name) - pol_dict = self.vnc_lib.network_policys_list( - parent_id=proj, parent_fq_name=proj.fq_name) - # pol_dict has policys from all projects, o/p is not filtered - # This needs to be debugged as vnc_lib.network_policys_list should return policys of requested project only... - policy_by_proj = [] - for p in pol_dict['network-policys']: - proj_of_policy = p['fq_name'][1] - if (proj_of_policy == proj.fq_name[1]): - policy_by_proj.append(p) - pol_dict = {'network-policys':policy_by_proj} - pol_list = pol_dict.get('network-policys') - for policy in pol_list: - if (policy['fq_name'][2] == self.policy_name): - pol_found = True - self.logger.info("policy %s is still found in API-Server" % - (self.policy_name)) - break - if not pol_found: - self.logger.info("policy %s is not found in API Server" % - (self.policy_name)) - return pol_found == False - # end verify_policy_not_in_api_server - - @retry(delay=3, tries=5) - def verify_policy_in_control_nodes(self): - """ Checks for policy details in Control-nodes. - Validate control-node data against quantum and return False if any mismatch is found. - """ - # Refresh quantum policy object - self.policy_obj - return {'result':True ,'msg':'Skipping control node verification'} - self.refresh_quantum_policy_obj() - me = inspect.getframeinfo(inspect.currentframe())[2] - result = True - err_msg = [] - out = None - for cn in self.inputs.bgp_ips: - # check if policy exists: - cn_config_policy_obj = self.cn_inspect[cn].get_cn_config_policy( - domain=self.project_fq_name[0], project=self.project_fq_name[1], policy=self.policy_name) - if not cn_config_policy_obj: - msg = "IFMAP View of Control-node %s is missing policy %s" % (cn, - self.policy_fq_name) - err_msg.append(msg) - self.logger.info(msg) - return {'result': False, 'msg': err_msg} - # compare policy_fq_name - self.logger.debug("Control-node %s : Policy object is : %s" % - (cn, cn_config_policy_obj)) - policy_fqn = ':'.join(self.policy_fq_name) - if policy_fqn not in cn_config_policy_obj['node_name']: - msg = "IFMAP View of Control-node %s is not having the policy detail of %s" % ( - cn, self.policy_fq_name) - err_msg.append(msg) - # compare policy_rules - if cn_config_policy_obj['obj_info']: - cn_rules = cn_config_policy_obj['obj_info'][ - 0]['data']['network-policy-entries'] - else: - # policy not attached to any network - cn_rules = [] - # translate control data in quantum data format for verification: - if cn_rules: - cn_rules = policy_test_utils.xlate_cn_rules(cn_rules) - else: - cn_rules = [] - self.logger.info("policy info in control node: %s" % cn_rules) - if isinstance(self.policy_obj, NetworkPolicy): - policy_info = self.policy_obj.network_policy_entries.exportDict()['PolicyEntriesType']['policy_rule'] - else: - policy_info = self.policy_obj['policy']['entries']['policy_rule'] - self.logger.info("policy info in quantum: %s" % policy_info) - out = policy_test_utils.compare_args('policy_rules', cn_rules, policy_info, - exp_name='cn_rules', act_name='quantum_rules') - if out: - msg = "Rules view in control-node %s is not matching, detailed msg follows %s" % ( - cn, out) - err_msg.append(msg) - - if err_msg != []: - result = False - err_msg.insert(0, me + ":" + self.policy_name) - self.logger.info("verification: %s, status: %s" % (me, result)) - return {'result': result, 'msg': err_msg} - # end verify_policy_in_control_node -# end PolicyFixture diff --git a/fixtures/port_fixture.py b/fixtures/port_fixture.py deleted file mode 100644 index 5a565f3dc..000000000 --- a/fixtures/port_fixture.py +++ /dev/null @@ -1,164 +0,0 @@ -import vnc_api_test - -class PortFixture(vnc_api_test.VncLibFixture): - - '''Fixture to handle Port/VMI objects - - Mandatory: - :param vn_id : UUID of the VN - - Optional: - :param fixed_ips : list of fixed ip dict - :param mac_address - :param security_groups - :params extra_dhcp_opts - :param api_type : one of 'neutron'(default) or 'contrail' - :param project_obj : Project object which is to be the parent - object of this port - - Inherited parameters: - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - ''' - - def __init__(self, *args, **kwargs): - super(PortFixture, self).__init__(self, *args, **kwargs) - self.vn_id = args[0] - self.fixed_ips = kwargs.get('fixed_ips', []) - self.mac_address = kwargs.get('mac_address', []) - self.security_groups = kwargs.get('security_groups', []) - self.extra_dhcp_opts = kwargs.get('extra_dhcp_opts', []) - self.api_type = kwargs.get('api_type', 'neutron') - self.project_obj = kwargs.get('project_obj', None) - - self.vn_obj = None - # end __init__ - - def setUp(self): - super(PortFixture, self).setUp() - self.vn_obj = self.vnc_api_h.virtual_network_read(id=self.vn_id) - - if self.api_type == 'neutron': - self._neutron_create_port() - else: - self._contrail_create_port() - self.neutron_handle = self.get_neutron_handle() - self.obj = self.neutron_handle.get_port(self.uuid) - self.mac_address = self.obj['mac_address'] - self.vmi_obj = self.vnc_api_h.virtual_machine_interface_read( - id=self.uuid) - self.logger.info('Created port %s' % (self.uuid)) - - def _neutron_create_port(self): - neutron_obj = self.neutron_handle.create_port( - self.vn_id, - fixed_ips=self.fixed_ips, - mac_address=self.mac_address, - security_groups=self.security_groups, - extra_dhcp_opts=self.extra_dhcp_opts) - self.neutron_obj = neutron_obj - self.uuid = neutron_obj['id'] - - def _contrail_create_port(self): - if not self.project_obj: - self.project_obj = self.vnc_api_h.project_read(id=self.project_id) - vmi_id = str(uuid.uuid4()) - vmi_obj = vnc_api_test.VirtualMachineInterface(name=vmi_id, - parent_obj=self.project_obj) - if self.mac_address: - mac_address_obj = vnc_api_test.MacAddressesType() - mac_address_obj.set_mac_address([str(EUI(self.mac_address))]) - vmi_obj.set_virtual_machine_interface_mac_addresses( - mac_address_obj) - vmi_obj.uuid = vmi_id - vmi_obj.add_virtual_network(self.vn_obj) - - if self.security_groups: - for sg_id in self.security_groups: - sg_obj = self.vnc_api_h.security_group_read(id=sg_id) - vmi_obj.add_security_group(sg_obj) - else: - # Associate default SG - default_sg_fq_name = self.project_obj.fq_name[:] - default_sg_fq_name.append('default') - sg_obj = self.vnc_api_h.security_group_read( - fq_name=default_sg_fq_name) - vmi_obj.add_security_group(sg_obj) - - if self.extra_dhcp_opts: - # TODO - pass - - self.vmi_obj = self.vnc_api_h.virtual_machine_interface_create(vmi_obj) - self.uuid = vmi_id - - if self.fixed_ips: - for fixed_ip in self.fixed_ips: - iip_id = str(uuid.uuid4()) - iip_obj = vnc_api_test.InstanceIp(name=iip_id, - subnet_id=fixed_ip['subnet_id']) - iip_obj.uuid = iip_id - iip_obj.add_virtual_machine_interface(vmi_obj) - iip_obj.add_virtual_network(self.vn_obj) - iip_obj.set_instance_ip_address(fixed_ip['ip_address']) - self.vnc_api_h.instance_ip_create(iip_obj) - else: - iip_id = str(uuid.uuid4()) - iip_obj = vnc_api_test.InstanceIp(name=iip_id) - iip_obj.uuid = iip_id - iip_obj.add_virtual_machine_interface(vmi_obj) - iip_obj.add_virtual_network(self.vn_obj) - self.vnc_api_h.instance_ip_create(iip_obj) - # end _contrail_create_port - - def cleanUp(self): - super(PortFixture, self).cleanUp() - if self.api_type == 'neutron': - self._neutron_delete_port() - else: - self._contrail_delete_port() - self.logger.info('Deleted port %s' % (self.uuid)) - - def _neutron_delete_port(self): - self.neutron_handle.delete_port(self.uuid) - - def _contrail_delete_port(self): - vmi_iips = self.vmi_obj.get_instance_ip_back_refs() - for vmi_iip in vmi_iips: - vmi_iip_uuid = vmi_iip['uuid'] - self.vnc_api_h.instance_ip_delete(id=vmi_iip_uuid) - self.vnc_api_h.virtual_machine_interface_delete(id=self.uuid) - - def verify_port_in_api_server(self): - pass - - def verify_port_in_control_node_ifmap(self): - pass - - def verify_port_in_control_node(self): - pass - - def verify_port_in_agent(self): - pass - - def verify_port_in_agent_ifmap(self): - pass - -# end PortFixture - -if __name__ == "__main__": - vn_id = '1c83bed1-7d24-4414-9aa2-9d92975bc86f' - subnet_id = '49fea486-57ab-4056-beb3-d311a385814e' - port_fixture = PortFixture(vn_id=vn_id) -# port_fixture.setUp() - port_fixture1 = PortFixture(vn_id=vn_id, api_type='contrail') -# port_fixture1.setUp() - port_fixture2 = PortFixture(vn_id=vn_id, api_type='contrail', fixed_ips=[ - {'subnet_id': subnet_id, 'ip_address': '10.1.1.20'}]) - port_fixture2.setUp() diff --git a/fixtures/project_test.py b/fixtures/project_test.py deleted file mode 100644 index 66886dd65..000000000 --- a/fixtures/project_test.py +++ /dev/null @@ -1,285 +0,0 @@ -import os -import fixtures -from vnc_api.vnc_api import * -import uuid -import fixtures - -from quantum_test import * -from vnc_api_test import * -from contrail_fixtures import * -from common.connections import ContrailConnections -from tcutils.util import retry -from time import sleep -from openstack import OpenstackAuth -from vcenter import VcenterAuth - - -class ProjectFixture(fixtures.Fixture): - - def __init__(self, vnc_lib_h, connections, auth=None, project_name=None, - username=None, password=None, role='admin', - domain_name=None, uuid=None): - self.inputs = connections.inputs - self.vnc_lib_h = connections.get_vnc_lib_h() - self.logger = connections.logger - self.connections = connections - self.auth = auth - self.project_name = project_name or self.inputs.stack_tenant - self.domain_name = domain_name or 'default-domain' - self.uuid = uuid - self.project_obj = None - self.already_present = False - self.project_fq_name = [self.domain_name, self.project_name] - self.username = username - self.password = password - self.role = role - self.user_dict = {} - self._create_user_set = {} - self.project_connections = None - self.api_server_inspects = self.connections.api_server_inspects - self.verify_is_run = False - if not self.auth: - if self.inputs.orchestrator == 'openstack': - self.auth = OpenstackAuth(self.inputs.stack_user, - self.inputs.stack_password, - self.inputs.project_name, self.inputs, self.logger) - else: # vcenter - self.auth = VcenterAuth(self.inputs.stack_user, - self.inputs.stack_password, - self.inputs.project_name, self.inputs) - # end __init__ - - def read(self): - if self.uuid: - self.project_obj = self.vnc_lib_h.project_read(id=self.uuid) - self.project_name = self.project_obj.name - self.project_fq_name = self.project_obj.get_fq_name() - self.already_present = True - - def _create_project(self): - self.logger.info('Proceed with creation of new project.') - self.uuid = self.auth.create_project(self.project_name) - self.project_obj = self.vnc_lib_h.project_read(id=self.uuid) - self.logger.info('Created Project:%s, ID : %s ' % (self.project_name, - self.uuid)) - # end _create_project - - def _delete_project(self): - self.logger.info('Deleting Project %s' % self.project_fq_name) - self.auth.delete_project(self.project_name) - # end _delete_project - - def setUp(self): - super(ProjectFixture, self).setUp() - self.create() - - def create(self): - self.uuid = self.uuid or self.auth.get_project_id(self.project_name) - if self.uuid: - self.read() - self.logger.debug( - 'Project %s(%s) already present. Not creating it'%( - self.project_fq_name, self.uuid)) - elif self.project_name == self.inputs.stack_tenant: - raise Exception('Project %s not found' % (self.project_name)) - else: - self.logger.info('Project %s not found, creating it' % ( - self.project_name)) - self._create_project() -# time.sleep(2) - - def get_uuid(self): - return self.uuid - - def get_fq_name(self): - return self.project_fq_name - - def getObj(self): - return self.project_obj - - def cleanUp(self): - super(ProjectFixture, self).cleanUp() - self.delete() - - def delete(self, verify=False): - if self.inputs.orchestrator == 'vcenter': - self.logger.debug('No need to verify projects in case of vcenter') - return - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - if not self.check_no_project_references(): - self.logger.warn('One or more references still present' - ', will not delete the project %s' % (self.project_name)) - return - self.auth.reauth() - self._delete_project() - if self.verify_is_run or verify: - assert self.verify_on_cleanup() - else: - self.logger.debug('Skipping the deletion of Project %s' % - self.project_fq_name) - - # end cleanUp - - @retry(delay=2, tries=10) - def check_no_project_references(self): - vnc_project_obj = self.vnc_lib_h.project_read(id=self.uuid) - vns = vnc_project_obj.get_virtual_networks() - if vns: - self.logger.warn('Project %s still has VNs %s before deletion' %( - self.project_name, vns)) - return False - vmis = vnc_project_obj.get_virtual_machine_interfaces() - if vmis: - self.logger.warn('Project %s still has VMIs %s before deletion' %( - self.project_name, vmis)) - return False - sgs = vnc_project_obj.get_security_groups() - if len(sgs) > 1: - self.logger.warn('Project %s still has SGs %s before deletion' %( - self.project_name, sgs)) - return False - return True - # end check_no_project_references - - def get_project_connections(self, username=None, password=None): - username = username or self.username or self.inputs.stack_user - password = password or self.password or self.inputs.stack_password - if not self.project_connections: - self.project_connections = ContrailConnections( - inputs=self.inputs, - logger=self.logger, - project_name=self.project_name, - username=username, - password=password, - domain_name=self.domain_name) - return self.project_connections - # end get_project_connections - - def verify_on_setup(self): - result = True - if not self.verify_project_in_api_server(): - result &= False - self.logger.error('Verification of project %s in APIServer ' - 'failed!! ' % (self.project_name)) - self.verify_is_run = True - return result - # end verify_on_setup - - @retry(delay=2, tries=6) - def verify_project_in_api_server(self): - if self.inputs.orchestrator == 'vcenter': - self.logger.debug('No need to verify projects in case of vcenter') - return True - result = True - for cfgm_ip in self.inputs.cfgm_ips: - api_s_inspect = self.api_server_inspects[cfgm_ip] - cs_project_obj = api_s_inspect.get_cs_project( - self.domain_name, - self.project_name) - if not cs_project_obj: - self.logger.warn('Project %s not found in API Server %s' - ' ' % (self.project_name, api_s_inspect._ip)) - result &= False - return result - if cs_project_obj['project']['uuid'] != self.uuid: - self.logger.warn('Project id %s got from API Server is' - ' not matching expected ID %s' % ( - cs_project_obj['project']['uuid'], self.uuid)) - result &= False - if result: - self.logger.info('Verification of project %s in API Server %s' - ' passed ' % (self.project_name, api_s_inspect._ip)) - return result - # end verify_project_in_api_server - - @retry(delay=10, tries=12) - def verify_project_not_in_api_server(self): - if self.inputs.orchestrator == 'vcenter': - self.logger.debug('No need to verify projects in case of vcenter') - return True - result = True - for cfgm_ip in self.inputs.cfgm_ips: - api_s_inspect = self.api_server_inspects[cfgm_ip] - cs_project_obj = api_s_inspect.get_cs_project( - self.domain_name, - self.project_name) - self.logger.info("Check for project %s after deletion, got cs_project_obj %s" % - (self.project_name, cs_project_obj)) - if cs_project_obj: - self.logger.warn('Project %s is still found in API Server %s' - 'with ID %s ' % (self.project_name, api_s_inspect._ip, - cs_project_obj['project']['uuid'])) - result &= False - if result: - self.logger.info('Verification of project %s removal in API Server ' - ' %s passed ' % (self.project_name, api_s_inspect._ip)) - return result - # end verify_project_not_in_api_server - - def set_sec_group_for_allow_all(self, project_name=None, sg_name='default'): - project_name = project_name or self.project_name - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - rule1 = [{'direction': '>', - 'protocol': 'any', - 'dst_addresses': [{'security_group': 'local', 'subnet': None}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_1 - }, - {'direction': '>', - 'protocol': 'any', - 'src_addresses': [{'security_group': 'local', 'subnet': None}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'rule_uuid': uuid_2 - }, - ] - self.update_sec_group(project_name, sg_name, rule1) - # end set_sec_group_for_allow_all - - def update_sec_group(self, project_name, sec_group_name, rules): - def_sec_grp = self.vnc_lib_h.security_group_read( - fq_name=[u'default-domain', project_name, sec_group_name]) - try: - old_rules = def_sec_grp.get_security_group_entries( - ).get_policy_rule() - except AttributeError: - old_rules = [] - self.logger.info( - "Adding rules to the %s security group in Project %s" % - (sec_group_name, project_name)) - self.set_sec_group(project_name, sec_group_name, rules) - self.addCleanup(self.set_sec_group, project_name, - sec_group_name, old_rules) - - def set_sec_group(self, project_name, sec_group_name, rules): - rule_list = PolicyEntriesType(policy_rule=rules) - project_fq_name = [u'default-domain', project_name] - sg_fq_name = [u'default-domain', project_name, sec_group_name] - project = self.vnc_lib_h.project_read(fq_name=project_fq_name) - def_sec_grp = self.vnc_lib_h.security_group_read(fq_name=sg_fq_name) - def_sec_grp = SecurityGroup( - name=sec_group_name, parent_obj=project, security_group_entries=rule_list) - def_sec_grp.set_security_group_entries(rule_list) - self.vnc_lib_h.security_group_update(def_sec_grp) - - @retry(delay=2, tries=10) - def verify_on_cleanup(self): - result = True - if not self.verify_project_not_in_api_server(): - result &= False - self.logger.error('Project %s is still present in API Server' % ( - self.project_name)) - return result - # end verify_on_cleanup -# end ProjectFixture diff --git a/fixtures/quantum_fixture.py b/fixtures/quantum_fixture.py deleted file mode 100644 index 042fe59ff..000000000 --- a/fixtures/quantum_fixture.py +++ /dev/null @@ -1,169 +0,0 @@ - -import fixtures -import time -import uuid -from common.openstack_libs import quantum_client as client -from common.openstack_libs import quantum_http_client as HTTPClient -from common.openstack_libs import quantum_exception as exceptions -from contrail_fixtures import * -cmt = lambda: int(round(time.time() * 1000)) - - -class QuantumFixture(fixtures.Fixture): - - def __init__(self, connections, tid, inputs, vn_count, vms_per_vn): - self._vn_count = vn_count - self._vms_per_vn = vms_per_vn - self._vns = [] - self._vms = [] - self._subs = [] - self.obj = connections.quantum_h.obj - self.logger = inputs.logger - self.inputs = inputs - self.tid = tid - - def setUp(self): - super(QuantumFixture, self).setUp() - self.tm = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - for idx in range(self._vn_count): - net_req = {'name': 'vnbzz-%s' % idx} - - try: - vn_obj = self.obj.show_network(network=net_req['name']) - except: - net_req["tenant_id"] = self.tid - vn_obj = self.obj.create_network( - {'network': net_req})['network'] - self._vns.append(vn_obj) - - snet = {"network_id": vn_obj['id']} - snet["cidr"] = "10.%d.0.0/16" % idx - snet["tenant_id"] = self.tid.replace('-', '') - snet["ip_version"] = 4 - - sub_obj = self.obj.create_subnet({'subnet': snet})['subnet'] - self._subs.append(sub_obj) - - self.logger.info("Created VN %s, tid %s, id %s" % - (net_req['name'], self.tid, vn_obj['id'])) - for jdx in range(self._vms_per_vn): - #import pdb; pdb.set_trace() - #port_obj = self.q_create_port( 'vmybz-%d-%d' % (idx,jdx), vn_obj['id']) - port_obj = self.n_create_port( - 'vmybz-%d-%d' % (idx, jdx), vn_obj['id'], self.tid) - self._vms.append(port_obj) - #import pdb; pdb.set_trace() - - self.logger.info("VM count %d" % len(self._vms)) - - def n_create_port(self, name, netid, tid): - try: - ii = 0 - ct = cmt() - ten_id = tid.replace('-', '') - self.obj.list_networks(id=[netid], tenant_id=ten_id) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.list_networks(id=[netid], shared=True) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - uuu = str(uuid.uuid4()) - self.obj.list_ports(device_id=uuu) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.list_security_groups() - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.list_networks(id=[netid], tenant_id=ten_id) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.list_networks(id=[netid], shared=True) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.list_security_groups(tenant_id=ten_id) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.show_network(network=netid) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.show_network(network=netid, fields=['tenant_id']) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - rport = self.q_create_port(name, netid, uuu) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - pt = self.obj.list_ports(device_id=uuu, tenant_id=ten_id) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.show_network(network=netid, fields=['tenant_id']) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.list_floatingips() - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - sid = pt['ports'][0]['fixed_ips'][0]['subnet_id'] - self.obj.list_subnets(id=sid) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.show_network(network=netid, fields=['tenant_id']) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - self.obj.list_ports( - device_owner=['network:dhcp'], network_id=netid) - self.tm[ii] += cmt() - ct - ii += 1 - ct = cmt() - return rport - - except Exception as e: - print (str(e)) - - pass - - def q_create_port(self, name, netid, uuu=None): - if uuu == None: - uuu = str(uuid.uuid4()) - port_req = {"device_id": uuu} - port_req["name"] = name - port_req["network_id"] = netid - port_req["tenant_id"] = self.tid.replace('-', '') - try: - port_obj = self.obj.create_port({'port': port_req})['port'] - except: - port_obj = None - return port_obj - - def q_delete_port(self, port_obj): - self.obj.delete_port(port_obj['id']) - return - - def cleanUp(self): - super(QuantumFixture, self).cleanUp() - print ("Time taken:") - for jj in self.tm: - print (jj) - for idx in self._vms: - self.q_delete_port(idx) - for idx in self._subs: - self.obj.delete_subnet(idx['id']) - for idx in self._vns: - self.obj.delete_network(idx['id']) - - def topVN(self): - return self._vns[0]['id'] - # return self.obj.show_network(self._vns[0]['id']) diff --git a/fixtures/quantum_test.py b/fixtures/quantum_test.py deleted file mode 100644 index a256aa867..000000000 --- a/fixtures/quantum_test.py +++ /dev/null @@ -1,885 +0,0 @@ -import os -from tcutils.util import * -import logging -from common.openstack_libs import network_client as client -from common.openstack_libs import network_http_client as HTTPClient -from common.openstack_libs import network_client_exception as CommonNetworkClientException - - -class NetworkClientException(CommonNetworkClientException): - - def __init__(self, **kwargs): - message = kwargs.get('message') - self.status_code = kwargs.get('status_code', 0) - if message: - self.message = message - super(NetworkClientException, self).__init__(**kwargs) - - def __str__(self): - return repr(self.message) - - -class QuantumHelper(): - - def __init__( - self, - username, - password, - project_id, - auth_server_ip, - cfgm_ip=None, - logger=None): - httpclient = None - self.quantum_port = '9696' - self.username = username - self.password = password - self.project_id = get_plain_uuid(project_id) - self.cfgm_ip = cfgm_ip - self.auth_server_ip = auth_server_ip - self.obj = None - self.logger = logger or logging.getLogger(__name__) - - self.auth_url = os.getenv('OS_AUTH_URL') or \ - 'http://' + auth_server_ip + ':5000/v2.0' - # end __init__ - - def setUp(self): - insecure = bool(os.getenv('OS_INSECURE', True)) - # Quantum Client class does not have tenant_id as argument - # So, do quantum auth differently - if 'quantum' in client.__name__: - self._do_quantum_authentication() - else: - self.obj = client.Client('2.0', username=self.username, - password=self.password, - tenant_id=self.project_id, - auth_url=self.auth_url) - # end __init__ - - def _do_quantum_authentication(self): - try: - httpclient = HTTPClient(username=self.username, - tenant_id= self.project_id, - password=self.password, - auth_url=self.auth_url) - httpclient.authenticate() - except CommonNetworkClientException, e: - self.logger.exception('Exception while connection to Quantum') - raise e - OS_URL = 'http://%s:%s/' % (self.cfgm_ip, self.quantum_port) - OS_TOKEN = httpclient.auth_token - self.obj = client.Client('2.0', endpoint_url=OS_URL, token=OS_TOKEN) - # end _do_quantum_authentication - - def get_handle(self): - return self.obj - # end get_handle - - def create_network( - self, - vn_name, - vn_subnets=None, - ipam_fq_name=None, - shared=False, - router_external=False, - enable_dhcp = True, - sriov_enable = False, - sriov_vlan = None, - sriov_provider_network = None, - disable_gateway=False): - """Create network given a name and a list of subnets. - """ - try: - net_req = {} - net_req['name'] = vn_name - if shared: - net_req['shared'] = shared - if router_external: - net_req['router:external'] = router_external - if sriov_enable: - net_req['provider:physical_network'] = sriov_provider_network - net_req['provider:segmentation_id'] = sriov_vlan - net_rsp = self.obj.create_network({'network': net_req}) - self.logger.debug('Response for create_network : ' + repr(net_rsp)) - - vn_id = net_rsp['network']['id'] - net_id = net_rsp['network']['id'] - if vn_subnets: - for subnet in vn_subnets: - net_rsp = self.create_subnet( - subnet, net_id, ipam_fq_name, enable_dhcp, disable_gateway) - # end for - return self.obj.show_network(network=net_id) - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum Exception while creating network %s' % (vn_name)) - return None - - def create_subnet(self, subnet, net_id, ipam_fq_name=None, enable_dhcp=True, disable_gateway=False): - subnet_req = subnet - subnet_req['network_id'] = net_id - subnet_req['enable_dhcp'] = enable_dhcp - subnet_req['ip_version'] = '6' if is_v6(subnet['cidr']) else '4' - subnet_req['cidr'] = unicode(subnet_req['cidr']) - subnet_req['contrail:ipam_fq_name'] = ipam_fq_name - if disable_gateway: - subnet_req['gateway_ip'] = None - try: - subnet_rsp = self.obj.create_subnet({'subnet': subnet_req}) - self.logger.debug( - 'Response for create_subnet : ' + - repr(subnet_rsp)) - return subnet_rsp - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum Exception while creating subnet for vn with id %s' % - (net_id)) - return None - # end _create_subnet - - def create_port(self, net_id, fixed_ips=[], - mac_address=None, no_security_group=False, - security_groups=[], extra_dhcp_opts=None,sriov=False): - port_req_dict = { - 'network_id': net_id, - } - if mac_address: - port_req_dict['mac_address'] = mac_address - if no_security_group: - port_req_dict['security_groups'] = None - if security_groups: - port_req_dict['security_groups'] = security_groups - if extra_dhcp_opts: - port_req_dict['extra_dhcp_opts'] = extra_dhcp_opts - - if fixed_ips: - port_req_dict['fixed_ips'] = fixed_ips - if sriov: - port_req_dict['binding:vnic_type'] = 'direct' - try: - port_rsp = self.obj.create_port({'port': port_req_dict}) - self.logger.debug('Response for create_port : ' + repr(port_rsp)) - return port_rsp['port'] - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum Exception while creating port in vn with id %s' % - (net_id)) - return None - - # end create_port - - def get_port(self, port_id, field=''): - try: - port_obj = self.obj.show_port(port_id, fields=field)['port'] - return port_obj[field] if field else port_obj - except CommonNetworkClientException as e: - self.logger.debug('Get port on %s failed' % (port_id)) - # end get_port - - def get_port_ips(self, port_id): - port_obj = self.get_port(port_id, field='fixed_ips') - return [x['ip_address'] for x in port_obj] - - def create_security_group(self, name): - sg_dict = {'name': name, 'description': 'SG-' + name} - try: - sg_resp = self.obj.create_security_group( - {'security_group': sg_dict}) - return sg_resp['security_group'] - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum Exception while creating security group %s' % (name)) - return None - - # end create_security_group - - def delete_security_group(self, sg_id): - self.obj.delete_security_group(sg_id) - # end delete_security_group - - def create_security_group_rule(self, sg_id, direction='ingress', - port_range_min=None, port_range_max=None, - protocol=None, remote_group_id=None, - remote_ip_prefix=None): - sg_rule = None - sg_rule_dict = {'security_group_id': sg_id} - if direction: - sg_rule_dict['direction'] = direction - if port_range_min != None: - sg_rule_dict['port_range_min'] = port_range_min - if port_range_max != None: - sg_rule_dict['port_range_max'] = port_range_max - if protocol: - sg_rule_dict['protocol'] = protocol - if remote_group_id: - sg_rule_dict['remote_group_id'] = remote_group_id - if remote_ip_prefix: - sg_rule_dict['remote_ip_prefix'] = remote_ip_prefix - try: - sg_rule = self.obj.create_security_group_rule( - {'security_group_rule': sg_rule_dict}) - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum Exception while creating SG Rule %s' % (sg_rule_dict)) - return sg_rule - # end create_security_group_rule - - def delete_default_egress_rule(self, sg_id): - #currently this method can be only used before adding any custom rule to sg - rules = self.list_security_group_rules(tenant_id=self.project_id) - for rule in rules['security_group_rules']: - if rule['security_group_id'] == sg_id and rule['remote_ip_prefix'] == '0.0.0.0/0': - self.delete_security_group_rule(rule['id']) - break - - def delete_security_group_rule(self, rule_id): - self.obj.delete_security_group_rule(rule_id) - # end delete_security_group_rule - - def delete_port(self, port_id,): - port_rsp = self.obj.delete_port(port_id) - self.logger.debug('Response for delete_port : ' + repr(port_rsp)) - return port_rsp - # end delete_port - - def get_vn_obj_if_present(self, vn_name, project_id=None): - project_id = project_id if project_id else self.project_id - try: - net_rsp = self.obj.list_networks(tenant_id=project_id, name=vn_name)['networks'] - if net_rsp: - return self.obj.show_network(network=net_rsp[0]['id']) - except CommonNetworkClientException as e: - self.logger.exception( - "Some exception while doing Quantum net-list") - raise NetworkClientException(message=str(e)) - return None - # end get_vn_obj_if_present - - def get_vn_obj_from_id(self, uuid): - try: - return self.obj.show_network(network=uuid) - except CommonNetworkClientException as e: - self.logger.exception( - "Some exception while doing Quantum net-list") - return None - return None - - def delete_vn(self, vn_id): - result = True - try: - net_rsp = self.obj.delete_network(vn_id) - self.logger.debug('Response for deleting network %s' % - (str(net_rsp))) - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum exception while deleting a VN %s' % (vn_id)) - result = False - - return result - # end _delete_vn - - def delete_quota(self, project_id): - result = True - try: - net_rsp = self.obj.delete_quota(project_id) - self.logger.debug('Response for deleting quota %s' % - (str(net_rsp))) - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum exception while quota delete for project %s' % (project_id)) - result = False - - return result - # end delete_quota - - def list_networks(self, args): - try: - net_rsp = self.obj.list_networks(args) - return net_rsp - except CommonNetworkClientException as e: - self.logger.debug("Exception while viewing Network list") - return [] - # end list_networks - - def create_floatingip(self, fip_pool_vn_id, project_id=None, port_id=None): - if not project_id: - project_id = self.project_id - fip_req = {'floatingip': {'floating_network_id': fip_pool_vn_id, - 'tenant_id': project_id}} - if port_id: - fip_req['floatingip']['port_id'] = port_id - try: - fip_resp = self.obj.create_floatingip(fip_req) - return fip_resp - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum Exception while creating floatingip for tenant %s with fip_pool_vn_id %s' % - (project_id, fip_pool_vn_id)) - return None - - # end create_floatingip - - def delete_floatingip(self, fip_id): - fip_resp = self.obj.delete_floatingip(fip_id) - return fip_resp - # end delete_floatingip - - def list_floatingips(self, tenant_id=None, port_id=None): - if not tenant_id: - tenant_id = self.project_id - if port_id: - tenant_id = None # workaround for api-server bug with multiple filters - return self.obj.list_floatingips(tenant_id=tenant_id, port_id=port_id)['floatingips'] - # end - - def get_floatingip(self, fip_id, fields=''): - fip_resp = self.obj.show_floatingip(fip_id, fields='')['floatingip'] - return fip_resp[fields] if fields else fip_resp - # end get_floatingip - - def get_port_id(self, vm_id): - ''' Returns the Quantum port-id of a VM. - - ''' - try: - port_rsp = self.obj.list_ports(device_id=[vm_id]) - port_id = port_rsp['ports'][0]['id'] - return port_id - except Exception as e: - self.logger.error('Error occured while getting port-id of a VM ') - return None - # end - - def assoc_floatingip(self, fip_id, port_id): - fip_dict = {'floatingip': {'port_id': port_id}} - return self.update_floatingip(fip_id, fip_dict) - - def update_floatingip(self, fip_id, update_dict): - return self.obj.update_floatingip(fip_id, update_dict) - # end update_floatingip - - def get_vn_id(self, vn_name): - net_id = None - net_rsp = self.obj.list_networks() - for ( - x, - y, - z) in [ - (network['name'], - network['id'], - network['tenant_id']) for network in net_rsp['networks']]: - if vn_name == x and self.project_id in z: - net_id = y - break - return net_id - # end get_vn_id - - def create_policy(self, policy_dict): - policy_rsp = None - try: - policy_rsp = self.obj.create_policy(policy_dict) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while creating policy" + str(e)) - return policy_rsp - # end create_policy - - def update_policy(self, policy_id, policy_entries): - '''policy_data format {'policy': {'entries': new_policy_entries}}''' - policy_rsp = None - try: - policy_rsp = self.obj.update_policy(policy_id, policy_entries) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while creating policy" + str(e)) - self.logger.info("policy_rsp for policy_id %s after update is %s" % - (policy_id, policy_rsp)) - return policy_rsp - # end update_policy - - def get_policy_if_present(self, project_name=None, policy_name=None): - policy_rsp = None - try: - policy_rsp = self.list_policys() - for ( - x, - y, - z) in [ - (policy['name'], - policy['id'], - policy['fq_name']) for policy in policy_rsp['policys']]: - if policy_name == x: - if project_name: - if project_name in z: - policy_id = y - return self.obj.show_policy(policy=policy_id) - else: - policy_id = y - return self.obj.show_policy(policy=policy_id) - except CommonNetworkClientException as e: - self.logger.exception( - "Some exception while doing Quantum policy-listing") - return None - - # end get_policy_if_present - - def list_policys(self): - policy_list = None - try: - policy_list = self.obj.list_policys(tenant_id=self.project_id) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while listing policies" + str(e)) - return policy_list - # end list_policys - - def delete_policy(self, policy_id): - result = True - try: - self.obj.delete_policy(policy_id) - except CommonNetworkClientException as e: - result = False - self.logger.error( - "Quantum Exception while deleting policy" + str(e)) - return result - # end delete_policy - - def get_policy_fq_name(self, policy_obj): - return policy_obj['policy']['fq_name'] - # end get_policy_fq_name - - def update_network(self, vn_id, network_dict): - net_rsp = None - try: - net_rsp = self.obj.update_network(vn_id, network_dict) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while updating network" + str(e)) - return net_rsp - # end update_network - - def list_security_groups(self, *args, **kwargs): - return self.obj.list_security_groups(*args, **kwargs) - # end list_security_groups - - def show_security_group(self, sg_id): - return self.obj.show_security_group(sg_id) - # end show_security_group - - def list_security_group_rules(self, *args, **kwargs): - return self.obj.list_security_group_rules(*args, **kwargs) - # end list_security_group_rules - - def create_router(self, router_name, tenant_id=None): - router_body = {} - router_body['router'] = {} - router_body['router']['name'] = router_name - if tenant_id: - router_body['router']['tenant_id'] = tenant_id - try: - return self.obj.create_router(router_body)['router'] - except CommonNetworkClientException as e: - self.logger.exception( - 'Quantum Exception while creating Router %s' % (router_name)) - return None - - def get_router(self, uuid=None, name=None): - if uuid: - return self.obj.show_router(uuid)['router'] - if name: - return self.obj.list_routers(name=name, tenant_id=self.project_id)['routers'][0] - - def delete_router(self, router_id=None): - return self.obj.delete_router(router_id) - - def get_subnet_ids(self, vn_id): - return self.obj.show_network(vn_id, fields='subnets')['network']['subnets'] - - def get_subnets_of_vn(self, vn_id): - subnets = [] - try: - for subnet_id in self.get_subnet_ids(vn_id): - subnets.append(self.obj.show_subnet(subnet_id)['subnet']) - except CommonNetworkClientException as e: - self.logger.exception( - 'Exception while reading network details%s' % (vn_id)) - return None - return subnets - # end get_subnets_of_vn - - def get_subnet(self, subnet_id, field=''): - resp = self.obj.show_subnet(subnet_id, fields=field)['subnet'] - return resp[field] if field else resp - - def get_vn_of_subnet(self, subnet_id): - return self.get_subnet(subnet_id, field='network_id') - - def get_vn_of_port(self, port_id): - return self.get_port(port_id, field='network_id') - - def add_router_interface(self, router_id, subnet_id=None, port_id=None, vn_id=None): - ''' Add an interface to router. - Result will be of form - {u'subnet_id': u'd5ae735b-4df2-473f-9d6c-ca9ddb263fdc', u'tenant_id': u'509a5c7a23474f15a456905adcd9fc8d', u'port_id': u'f2d4cb13-2401-4830-b8cc-c23d544bb1d6', u'id': u'da7e4878-04fa-4d1a-8def-4b11c2eaf569'} - ''' - body = {} - if vn_id: - subnet_id = self.get_subnet_ids(vn_id)[0] - if subnet_id: - body['subnet_id'] = subnet_id - if port_id: - body['port_id'] = port_id - self.logger.info('Adding interface with subnet_id %s, port_id %s' - ' to router %s' % (subnet_id, port_id, router_id)) - result = self.obj.add_interface_router(router_id, body) - return result - # end add_router_interface - - def delete_router_interface(self, router_id, subnet_id=None, port_id=None, vn_id=None): - ''' Remove an interface from router - ''' - body = {} - if vn_id: - subnet_id = self.get_subnet_ids(vn_id)[0] - if subnet_id: - body['subnet_id'] = subnet_id - if port_id: - body['port_id'] = port_id - self.logger.info('Deleting interface with subnet_id %s, port_id %s' - ' from router %s' % (subnet_id, port_id, router_id)) - try: - result = self.obj.remove_interface_router(router_id, body) - return result - except NetworkClientException as e: - self.logger.exception(e) - raise NetworkClientException(message=str(e)) - # end delete_router_interface - - def router_gateway_set(self, router_id, ex_net_id): - '''Set gateway for router - ''' - body = {} - body['network_id'] = ex_net_id - self.logger.info('Setting gateway for router %s to network %s ' - % (router_id, ex_net_id)) - try: - result = self.obj.add_gateway_router(router_id, body) - return result - except NetworkClientException as e: - self.logger.exception(e) - raise NetworkClientException(message=str(e)) - # end router_gateway_set - - def router_gateway_clear(self, router_id): - self.logger.info('clear gateway of router %s' %router_id) - try: - result = self.obj.remove_gateway_router(router_id) - return result - except NetworkClientException as e: - self.logger.exception(e) - raise NetworkClientException(message=str(e)) - - def update_router(self, router_id, router_dict): - router_rsp = None - body = {} - body['router'] = router_dict - try: - router_rsp = self.obj.update_router(router_id, body) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while updating router " + str(e)) - raise e - return router_rsp - # end update_router - - def update_security_group(self, sg_id, sg_dict): - sg_rsp = None - body = {} - body['security_group'] = sg_dict - try: - sg_rsp = self.obj.update_security_group(sg_id, body) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while updating security group " + str(e)) - raise e - return sg_rsp - # end update_security_group - - def get_router_interfaces(self, router_id): - ports_obj = self.obj.list_ports(device_id=router_id)['ports'] - return ports_obj - - def update_subnet(self, subnet_id, subnet_dict): - subnet_rsp = None - body = {} - body['subnet'] = subnet_dict - try: - subnet_rsp = self.obj.update_subnet(subnet_id, body) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while updating subnet" + str(e)) - raise e - return subnet_rsp - # end update_subnet - - def update_port(self, port_id, port_dict): - port_rsp = None - body = {} - body['port'] = port_dict - self.logger.debug('Updating port %s with body : %s' % ( - port_id, body)) - try: - port_rsp = self.obj.update_port(port_id, body) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while updating port" + str(e)) - raise e - return port_rsp - # end update_port - - def show_quota(self, tenant_id): - quota_rsp = None - try: - quota_rsp = self.obj.show_quota(tenant_id) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while running show quota" + str(e)) - raise e - return quota_rsp - # end show_quota - - def update_quota(self, tenant_id, quota_dict): - quota_rsp = None - body = {} - body['quota'] = quota_dict - try: - quota_rsp = self.obj.update_quota(tenant_id, body) - except CommonNetworkClientException as e: - self.logger.error( - "Quantum Exception while running quota update " + str(e)) - return quota_rsp - # end update_quota - - def create_lb_pool(self, name, lb_method, protocol, - subnet_id=None, network_id=None, custom_attr={}): - '''Create lb pool. Returns the lb object created''' - if network_id and not subnet_id: - subnet_id = self.get_subnet_ids(network_id)[0] - pool_dict = {'name': name, 'lb_method': lb_method, - 'protocol': protocol, 'subnet_id': subnet_id, - 'custom_attributes': [custom_attr]} - try: - pool_resp = self.obj.create_pool({'pool': pool_dict}) - return pool_resp['pool'] - except CommonNetworkClientException as e: - self.logger.exception( - 'Network Exception while creating LB Pool %s' % (name)) - return None - # end create_lb_pool - - def delete_lb_pool(self, pool_id): - '''Delete the lb''' - pool_rsp = self.obj.delete_pool(pool_id) - self.logger.debug('Response for delete_pool : ' + repr(pool_rsp)) - # end delete_lb_pool - - def update_lb_pool(self, pool_id, pool_dict={}): - pool_rsp = None - try: - pool_rsp = self.obj.update_pool(pool_id, {'pool': pool_dict}) - except CommonNetworkClientException as e: - self.logger.error( - "NetworkClient Exception while updating pool" + str(e)) - return pool_rsp - # end update_lb_pool - - def get_lb_pool(self, pool_id=None, name=None): - ''' Returns the pool dict - If pool_id is not found , returns None''' - try: - if pool_id: - return self.obj.show_pool(pool_id)['pool'] - elif name: - return self.obj.list_pools(name=name, tenant_id=self.project_id)['pools'][0] - except: - self.logger.debug('Get pool on %s failed' % (pool_id)) - return None - # end get_pool - - def list_lb_pools(self): - ''' Returns the LB pools in this tenant''' - try: - pools_list = self.obj.list_pools() - except CommonNetworkClientException as e: - self.logger.debug('List pools failed') - return None - return pools_list['pools'] - - def create_health_monitor(self, delay, max_retries, probe_type, timeout): - '''Returns the neutron health monitor dict created ''' - hm_dict = {'delay': delay, 'max_retries': max_retries, - 'type': probe_type, 'timeout': timeout} - try: - hm_resp = self.obj.create_health_monitor( - {'health_monitor': hm_dict}) - return hm_resp['health_monitor'] - except CommonNetworkClientException as e: - self.logger.exception( - 'Network Exception while creating Health monitor') - return None - # end create_health_monitor - - def delete_health_monitor(self, hm_id): - ''' Delete the Health monitor ''' - hm_rsp = self.obj.delete_health_monitor(hm_id) - self.logger.debug( - 'Response for delete_health_monitor : ' + repr(hm_rsp)) - - def update_health_monitor(self, hm_id, hm_dict): - '''Update Health monitor object''' - hm_rsp = None - try: - hm_rsp = self.obj.update_health_monitor(hm_id, hm_dict) - except CommonNetworkClientException as e: - self.logger.error( - "NetworkClient Exception while updating Health monitr" + str(e)) - return hm_rsp - # end update_health_monitor - - def get_health_monitor(self, hm_id): - ''' Returns Health monitor object as dict. - If not found, returns None - ''' - try: - hm_obj = self.obj.show_health_monitor(hm_id) - return hm_obj['health_monitor'] - except CommonNetworkClientException as e: - self.logger.debug('Get health-monitor on %s failed' % (hm_id)) - - def list_health_monitors(self): - ''' Returns a list of health monitor objects(dicts) in a tenant ''' - try: - hm_list = self.obj.list_health_monitors() - except CommonNetworkClientException as e: - self.logger.error('List health-monitors failed') - return None - return hm_list['health_monitors'] - - def associate_health_monitor(self, pool_id, hm_id): - ''' Associate Health monitor to the pool. Returns True on success. - Returns False if it fails - ''' - body = {'health_monitor': {'id' : hm_id}} - try: - hm_list = self.obj.associate_health_monitor(pool_id, body) - except CommonNetworkClientException as e: - self.logger.error('Associating HM %s to Pool %s failed' % ( - hm_id, pool_id)) - return None - return hm_list['health_monitor'] - - def disassociate_health_monitor(self, pool_id, hm_id): - '''Disassociate health monitor from the pool - ''' - self.obj.disassociate_health_monitor(pool_id, hm_id) - - def create_vip(self, name, protocol, protocol_port, pool_id, - subnet_id=None, network_id=None): - ''' Create vip in the pool. Returns the vip object as dict - ''' - if network_id and not subnet_id: - subnet_id = self.get_subnet_ids(network_id)[0] - vip_dict = {'name': name, - 'protocol': protocol, - 'protocol_port': protocol_port, - 'subnet_id': subnet_id, - 'pool_id': pool_id} - try: - vip_resp = self.obj.create_vip( - {'vip': vip_dict}) - return vip_resp['vip'] - except CommonNetworkClientException as e: - self.logger.exception( - 'Network Exception while creating vip %s' % (name)) - return None - # end create_vip - - def delete_vip(self, vip_id): - '''Delete the vip''' - self.obj.delete_vip(vip_id) - - def update_vip(self, vip_id, vip_dict): - '''Update vip usign vip_dict. Returns the updated object as dict''' - vip_resp = None - try: - vip_resp = self.obj.update_vip(hm_id, vip_dict) - except CommonNetworkClientException as e: - self.logger.error( - "NetworkClient Exception while updating vip" + str(e)) - return vip_resp - # end update_vip_resp - - def show_vip(self, vip_id=None, name=None): - '''Returns the vip object using id. If not found, returns None''' - try: - if vip_id: - return self.obj.show_vip(vip_id)['vip'] - elif name: - return self.obj.list_vips(name=name, tenant_id=self.project_id)['vips'][0] - except: - self.logger.debug('Get vip on %s/%s failed' % (vip_id, name)) - return None - # end show_vip - - def list_vips(self): - '''List the vips in this tenant''' - try: - vip_list = self.obj.list_vips() - except CommonNetworkClientException as e: - self.logger.error('List vips failed') - return None - return vip_list['vips'] - - def create_lb_member(self, ip_address, protocol_port, pool_id): - '''Create lb member. Returns the created lb member as dict''' - member_dict = {'address':ip_address, - 'protocol_port':protocol_port, - 'pool_id':pool_id} - try: - member_resp = self.obj.create_member({'member': member_dict}) - return member_resp['member'] - except CommonNetworkClientException as e: - self.logger.exception('Network Exception while creating LB member with address %s' % (ip_address)) - return None - - def delete_lb_member(self, lb_member_id): - '''Delete the lb member''' - member_resp = self.obj.delete_member(lb_member_id) - self.logger.debug('Response for delete_member : ' + repr(member_resp)) - # end delete_lb_member - - def update_lb_member(self, lb_member_id, lb_member_dict): - '''Update lb member using lb_member_dict. - Returns the updated object ''' - pass - - def list_lb_members(self, **kwargs): - '''Returns a list of lb member objects in the tenant''' - try: - member_list = self.obj.list_members(**kwargs) - except CommonNetworkClientException as e: - self.logger.error('List member failed') - return None - return member_list['members'] - - def show_lb_member(self, lb_member_id, **kwargs): - '''Returns the lb member dict ''' - try: - member_obj = self.obj.show_member(lb_member_id, **kwargs) - return member_obj['member'] - except CommonNetworkClientException as e: - self.logger.debug('show member on %s failed' % (lb_member_id)) - return None - # end show_lb_member - -# end QuantumHelper diff --git a/fixtures/router_fixture.py b/fixtures/router_fixture.py deleted file mode 100644 index 7b566ff37..000000000 --- a/fixtures/router_fixture.py +++ /dev/null @@ -1,615 +0,0 @@ -import vnc_api_test -from compute_node_test import ComputeNodeFixture -from tcutils.util import get_random_name, retry - -class LogicalRouterFixture(vnc_api_test.VncLibFixture): - - '''Fixture to handle Logical Router object - - Optional: - :param name : name of the logical router - :param uuid : UUID of the logical router - One of router name or router id is mandatory - :param public_vn_id : uuid of the public gateway network - :param private: dict of list of private vn_ids or subnet_ids or port_ids - {'vns': ['...', '...'], 'subnets': ['...'], 'ports':['...']} - :param api_type : one of 'neutron'(default) or 'contrail' - - Inherited optional parameters: - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - - - @staticmethod - def __new__ (cls, api_type='neutron', **kwargs): - cls.__bases__ = (eval(api_type), ) + cls.__bases__ - return super(LogicalRouterFixture, cls).__new__(cls, **kwargs) - ''' - - def __init__(self, **kwargs): - super(LogicalRouterFixture, self).__init__(self, **kwargs) - self.name = kwargs.get('name', get_random_name('Router')) - self.uuid = kwargs.get('uuid', None) - self.public_vn_id = kwargs.get('public_vn_id', None) - self.private = kwargs.get('private', None) - self.api_type = kwargs.get('api_type', 'neutron') - self.already_present = False - self.ports = []; self.vns = []; self.subnets = [] - self.deleted_vn_ids = [] - self.is_gw_active = False - - # temporary place till vnc_api_test is re-done - super(LogicalRouterFixture, self).setUp() - self.network_h = self.get_network_handle() - self.vnc_api_h = self.get_handle() - - if self.uuid: - self.read(self.uuid) - self.parent_fq_name = [self.domain, self.project_name] - self.fq_name = self.parent_fq_name + [self.name] - self.parent_type = 'project' - - def setUp(self): - super(LogicalRouterFixture, self).setUp() - self.create() - - def cleanUp(self): - super(LogicalRouterFixture, self).cleanUp() - if (self.already_present or self.inputs.fixture_cleanup == 'no') and\ - self.inputs.fixture_cleanup != 'force': - self.logger.info('Skipping deletion of Logical Router %s :' - %(self.fq_name)) - else: - self.delete() - - def get_network_handle(self): - if self.api_type == 'contrail': - return self.get_handle() - else: - return self.get_neutron_handle() - - def read(self, uuid): - self.logger.debug('Fetching information about Logical Router %s'%uuid) - self.obj = self.network_h.get_router(uuid) - self.uuid = self.obj.get('id', None) or getattr(self.obj, 'uuid', None) - self.name = self.obj.get('name', None) or getattr(self.obj, 'name',None) - public_vn_id = self.obj.get('external_gateway_info', None) and \ - self.obj['external_gateway_info'].get('network_id', None) - if public_vn_id: - self.public_vn_id = public_vn_id - ports = self.network_h.get_router_interfaces(self.uuid) - self.vn_ids = [self.network_h.get_vn_of_port(port['id']) - for port in ports] - self.logger.info('LR: %s, members: %s, gw: %s'%(self.name, - self.get_vn_ids(), self.public_vn_id)) - - def create(self): - try: - self.obj = self.network_h.get_router(name=self.name) - self.uuid = self.obj.get('id') - self.read(self.uuid) - self.already_present = True - self.logger.info('Logical router %s is already present'%self.name) - except: - self.logger.info('Creating Logical router %s'%self.name) - self.obj = self.network_h.create_router(self.name) - self.uuid = self.obj.get('id', None) or getattr(self, 'id', None) - pre_vn_ids = self.get_vn_ids() - if self.private: - for vn_id in self.private.get('vns', []): - if not (pre_vn_ids and vn_id in pre_vn_ids): - self.add_interface(vn_id=vn_id) - for port_id in self.private.get('ports', []): - if not (pre_vn_ids and self.network_h.get_vn_of_port(port_id)\ - in pre_vn_ids): - self.add_interface(port_id=port_id) - for subnet_id in self.private.get('subnets', []): - if not (pre_vn_ids and \ - self.network_h.get_vn_of_subnet(subnet_id) in pre_vn_ids): - self.add_interface(subnet_id=subnet_id) - if self.public_vn_id: - self.set_gw() - self.logger.info('LR: %s, members: %s, gw: %s'%(self.name, - self.get_vn_ids(), self.public_vn_id)) - - def add_interface(self, port_id=None, vn_id=None, subnet_id=None): - self.network_h.add_router_interface(self.uuid, port_id=port_id, - vn_id=vn_id, subnet_id=subnet_id) - if port_id: - vn_id = self.network_h.get_vn_of_port(port_id) - if subnet_id: - vn_id = self.network_h.get_vn_of_subnet(subnet_id) - self.vn_ids.append(vn_id) - - def remove_interface(self, port_id=None, vn_id=None, subnet_id=None): - self.network_h.delete_router_interface(self.uuid, port_id=port_id, - vn_id=vn_id, subnet_id=subnet_id) - if port_id: - vn_id = self.network_h.get_vn_of_port(port_id) - if subnet_id: - vn_id = self.network_h.get_vn_of_subnet(subnet_id) - self.vn_ids.remove(vn_id) - - def set_gw(self, gw=None): - self.public_vn_id = gw or self.public_vn_id - self.network_h.router_gateway_set(self.uuid, self.public_vn_id) - self.dyn_ri_on_left = None - self.active_vm = None - self.snat_ip = None - self.label = None - self.is_gw_active = True - - def clear_gw(self): - self.network_h.router_gateway_clear(self.uuid) - self.is_gw_active = False - - def reset_gw(self, gw=None): - self.clear_gw() - self.set_gw(gw) - - def delete(self, verify=False): - self.logger.info('Deleting LogicalRouter %s(%s)'%(self.name, self.uuid)) - self.deleted_vn_ids = list(self.get_vn_ids()) - for vn_id in list(self.vn_ids): - self.remove_interface(vn_id=vn_id) - self.network_h.delete_router(self.uuid) - if getattr(self, 'verify_is_run', None) or verify: - assert self.verify_on_cleanup() - self.uuid = None - - @retry(6, 10) - def verify_lr_rt_not_in_vns_in_api_server(self): - self.api_h = self.connections.api_server_inspect - for vn in self.deleted_vn_ids: - rt = list() - for ri in self.api_h.get_cs_routing_instances(vn, - refresh=True)['routing_instances']: - rt.extend([x['to'][0] for x in ri['routing-instance']['route_target_refs']]) - if self.route_target in rt: - self.logger.warn('RT(%s) of LR is not yet deleted from' - ' VN %s'%(self.route_target, vn)) - return False - self.logger.debug('LR rt refs is removed from all private RIs') - return True - - @retry(6, 10) - def verify_auto_vn_deleted_in_api_server(self): - self.api_h = self.connections.api_server_inspect - if self.api_h.get_cs_vn(self.parent_fq_name[0], - self.parent_fq_name[1], - self.get_auto_vn_name(), - refresh=True): - self.logger.warn('auto vn(%s) is not deleted yet' - %self.get_auto_vn_name()) - return False - self.logger.debug('auto vn %s got deleted'%self.get_auto_vn_name()) - return True - - @retry(6, 10) - def verify_dyn_ri_on_public_deleted_in_api_server(self): - self.api_h = self.connections.api_server_inspect - vn_obj = self.api_h.get_cs_vn_by_id(self.public_vn_id, refresh=True) - ris = [':'.join(ri) for ri in vn_obj.ri_refs() - if ri[-1].endswith('si_%s'%self.uuid)] - if ris: - self.logger.warn('Dynamic RI on gateway vn is not deleted') - return False - self.logger.debug('Dynamic RI on gateway vn is deleted') - return True - - @retry(6, 10) - def verify_lr_got_deleted_in_api_server(self): - self.api_h = self.connections.api_server_inspect - if self.api_h.get_lr(uuid=self.uuid, refresh=True): - self.logger.warn('LR is still not deleted') - return False - else: - self.logger.info('config: LR got deleted') - return True - - def verify_not_in_api_server(self): - self.logger.info('Verify LR(%s) not in api server'%self.uuid) - if self.deleted_vn_ids: - assert self.verify_lr_rt_not_in_vns_in_api_server() - if self.public_vn_id: - assert self.verify_auto_vn_deleted_in_api_server() - assert self.verify_dyn_ri_on_public_deleted_in_api_server() - assert self.verify_lr_got_deleted_in_api_server() - return True - - @retry(6, 10) - def verify_snat_ip_got_deleted_in_agent(self): - vn_fqname = self.id_to_fq_name(self.public_vn_id) - active_vr = self.get_active_vrouter() - inspect_h = self.connections.agent_inspect[active_vr] - route = inspect_h.get_vna_active_route(ip=self.get_snat_ip(), - prefix='32', - vn_fq_name=':'.join(vn_fqname)) - if route: - self.logger.warn('snat gw ip is still found on public net') - return False - self.logger.debug('snat gw ip %s deleted on public'%self.get_snat_ip()) - return True - - def verify_not_in_agent(self): - assert self.verify_snat_ip_got_deleted_in_agent() - return True - - @retry(6, 10) - def verify_not_in_control_node(self): - vn_fqname = self.id_to_fq_name(self.public_vn_id) - ri_fqname = vn_fqname + vn_fqname[-1:] - for ctrl_node in self.inputs.bgp_ips: - cn_inspect = self.connections.cn_inspect[ctrl_node] - routes = cn_inspect.get_cn_route_table_entry( - prefix=self.get_snat_ip(), - ri_name=':'.join(ri_fqname)) - if routes: - self.logger.warn('ctrl node %s: gw ip %s not deleted in RI %s' - %(ctrl_node, self.get_snat_ip(), ri_fqname)) - return False - return True - - def verify_on_cleanup(self): - assert self.verify_not_in_api_server() - if self.public_vn_id: - assert self.verify_not_in_agent() - assert self.verify_not_in_control_node() - self.logger.info('LR(%s): verify_on_cleanup passed'%self.uuid) - return True - - def verify_on_setup(self): - assert self.verify_in_api_server() - if self.is_gw_active: - assert self.verify_in_agent() - assert self.verify_in_control_node() - self.logger.info('LR(%s): verify_on_setup passed'%self.uuid) - self.verify_is_run = True - return True - - def get_si_name(self): - return 'si_'+self.uuid - - def get_table_name(self): - return 'rt_'+self.uuid - - def get_auto_vn_name(self): - return 'snat-si-left_'+self.get_si_name() - - def get_rt(self): - if not getattr(self, 'route_target', None): - self.api_h = self.connections.api_server_inspect - rts = self.api_h.get_lr(uuid=self.uuid, refresh=True).get_rt() - self.route_target = rts[0] if rts else None - return self.route_target - - def get_vms(self): - self.api_h = self.connections.api_server_inspect - return self.api_h.get_cs_si(self.parent_fq_name[0], - self.parent_fq_name[1], - self.get_si_name(), True).get_vms() - - def get_active_standby_instance(self): - self.active_vm = None; self.standby_vm = None - self.api_h = self.connections.api_server_inspect - for vm_id in self.get_vms(): - vmis = self.api_h.get_cs_vmi_of_vm(vm_id, refresh=True) - pref = vmis[0].properties('local_preference') - if pref == 200: - self.active_vm = vm_id - else: - self.standby_vm = vm_id - return (self.active_vm, self.standby_vm) - - def get_standby_instance(self): - if not getattr(self, 'standby_vm', None): - self.get_active_standby_instance() - if not self.standby_vm: - self.logger.warn('Unable to get standby vm for LR %s'%self.uuid) - return self.standby_vm - - def get_active_instance(self): - if not getattr(self, 'active_vm', None): - self.get_active_standby_instance() - if not self.active_vm: - self.logger.warn('Unable to get active vm for LR %s'%self.uuid) - return self.active_vm - - def get_vn_ids(self, refresh=False): - return self.vn_ids - - def get_snat_ip(self): - if not getattr(self, 'snat_ip', None): - self.snat_ip = None - self.api_h = self.connections.api_server_inspect - active_vm = self.get_active_instance() - if active_vm: - for iip in self.api_h.get_cs_instance_ips_of_vm(active_vm): - if iip.vn_uuid == self.public_vn_id: - self.snat_ip = iip.ip - break - if not self.snat_ip: - self.logger.warn('Unable to get gw ip for LR %s'%self.uuid) - return self.snat_ip - - @retry(6, 10) - def verify_rt_import_on_private_vns_in_api_server(self): - self.api_h = self.connections.api_server_inspect - for vn in self.get_vn_ids(): - rt = list() - for ri in self.api_h.get_cs_routing_instances(vn)['routing_instances']: - rt.extend([x['to'][0] - for x in ri['routing-instance']['route_target_refs']]) - if self.get_rt() not in rt: - self.logger.warn('RT of LR is not imported by VN %s'%vn) - self.logger.warn('Expected: %s, actual: %s'%(self.get_rt(), rt)) - return False - self.logger.debug('Logical router rt is imported by all private RIs') - return True - - @retry(6, 10) - def verify_dyn_ri_on_public_in_api_server(self): - self.api_h = self.connections.api_server_inspect - vn_obj = self.api_h.get_cs_vn_by_id(self.public_vn_id) - ris = [':'.join(ri) for ri in vn_obj.ri_refs() - if ri[-1].endswith('si_%s'%self.uuid)] - if not ris: - self.logger.warn('Dynamic RI on gateway vn is not available') - return False - self.logger.debug('Dynamic RI on gateway vn is created') - return True - - def get_dyn_ri_on_auto_vn(self): - if not getattr(self, 'dyn_ri_on_left', None): - self.api_h = self.connections.api_server_inspect - vn_obj = self.api_h.get_cs_vn(self.parent_fq_name[0], - self.parent_fq_name[1], - self.get_auto_vn_name(), - refresh=True) - ris = [':'.join(ri) for ri in vn_obj.ri_refs() - if ri[-1].endswith('si_%s'%self.uuid)] - self.dyn_ri_on_left = ris[0] - return self.dyn_ri_on_left - - @retry(6, 10) - def verify_dyn_ri_on_auto_vn_in_api_server(self): - try: - ri = self.get_dyn_ri_on_auto_vn() - self.logger.debug('Dynamic RI on auto vn is %s'%ri) - except: - self.logger.warn('Dynamic RI on auto vn is not available') - return False - return True if ri else False - - @retry(6, 10) - def verify_route_tables_in_api_server(self): - self.api_h = self.connections.api_server_inspect - self.route_table_fqname = self.parent_fq_name + [self.get_table_name()] - if not self.get_vn_ids(): - return True - for vn_id in self.get_vn_ids(): - route_table = self.api_h.get_cs_vn_by_id(vn_id, - refresh=True).route_table() - if self.route_table_fqname != route_table['to']: - self.logger.warn('Route tables attached to VN %s is not as ' - 'expected. Expecting %s: Got %s'%(vn_id, - self.route_table_fqname, route_table['to'])) - return False - rt_uuid = route_table['uuid'] - for route in self.api_h.get_route_table(rt_uuid).get_route(): - if route['prefix'] != "0.0.0.0/0": - self.logger.warn('Dont see a static route under route_table %s' - %(self.route_table_fqname)) - return False - self.logger.debug('RouteTable with static route is imported by all VNs') - return True - - def verify_in_api_server(self): - self.logger.debug('LR: Started verify on api server for %s'%self.name) - if self.get_vn_ids(): - assert self.verify_rt_import_on_private_vns_in_api_server() - if self.is_gw_active: - assert self.verify_route_tables_in_api_server() - assert self.verify_dyn_ri_on_auto_vn_in_api_server() - assert self.verify_dyn_ri_on_public_in_api_server() - assert self.verify_instance_launched() - self.logger.debug('LR: passed verification in api server') - return True - - @retry(6, 10) - def verify_instance_launched(self, refresh=False): - svc_mon_h = self.connections.get_svc_mon_h(refresh) - if svc_mon_h.get_service_instance(name=self.get_si_name(), - refresh=refresh).is_launched(): - self.logger.debug('LR: SI got launched') - return True - self.logger.warn('LR(%s): SI status is not active in svc-mon'%self.uuid) - return False - - def get_active_vrouter(self, refresh=False): - if not getattr(self, 'active_vr', None) or refresh: - svc_mon_h = self.connections.get_svc_mon_h(refresh) - try: - self.active_vr = self.inputs.get_host_ip( - svc_mon_h.get_service_instance( - name=self.get_si_name(), - refresh=refresh).active_vrouter()) - if self.active_vr.lower() == 'none': - self.active_vr = None - except: - self.logger.warn('Fail to get vrouter for active snat') - self.active_vr = None - return self.active_vr - - def get_standby_vrouter(self, refresh=False): - if not getattr(self, 'standby_vr', None) or refresh: - svc_mon_h = self.connections.get_svc_mon_h(refresh) - try: - self.standby_vr = self.inputs.get_host_ip( - svc_mon_h.get_service_instance( - name=self.get_si_name(), - refresh=refresh).standby_vrouter()) - if self.standby_vr.lower() == 'none': - self.standby_vr = None - except: - self.logger.warn('Fail to get vrouter for standby snat') - self.standby_vr = None - return self.standby_vr - - @retry(6, 10) - def verify_default_route_on_auto_vn_in_agent(self): - active_vr = self.get_active_vrouter(refresh=True) - if not active_vr: - self.logger.warn('LR(%s): unable to find active vr'%self.uuid) - return False - inspect_h = self.connections.agent_inspect[active_vr] - route = inspect_h.get_vna_active_route(ip='0.0.0.0', prefix='0', - vn_fq_name=':'.join(self.parent_fq_name + - [self.get_auto_vn_name()])) - if not route: - self.logger.warn('LR: Agent: Unable to find default' - ' route in auto created VN') - return False - self.logger.debug('Agent: Auto VN has default route') - return True - - def get_snat_label(self, intf='right'): - if not getattr(self, 'label', None) or \ - not self.label.get(intf, None): - self.label = dict() - right_ip = self.get_snat_ip(); left_ip = '100.64.0.4' - vm_id = self.get_active_instance() - active_vr = self.get_active_vrouter() - if not (right_ip and active_vr and vm_id): - self.logger.warn('LR: Unable to fetch either of gw_ip ' - ' or active vm/vrouter info') - return None - inspect_h = self.connections.agent_inspect[active_vr] - if vm_id: - vmis = inspect_h.get_vna_tap_interface_by_vm(vm_id) - if vmis: - self.label['right'] = [vmi['label'] for vmi in vmis - if vmi['ip_addr'] ==right_ip][0] - self.label['left'] = [vmi['label'] for vmi in vmis - if vmi['ip_addr'] == left_ip][0] - if not self.label[intf]: - self.logger.warn('LR: Unable to fetch label of %s intf'%intf) - return self.label[intf] - - @retry(6, 10) - def verify_snat_ip_on_public_in_agent(self): - vn_fqname = self.id_to_fq_name(self.public_vn_id) - label = self.get_snat_label() - active_vr = self.get_active_vrouter() - if not (active_vr and label): - self.logger.warn('LR: Unable to fetch either of label ' - ' or active vrouter info') - return None - inspect_h = self.connections.agent_inspect[active_vr] - route = inspect_h.get_vna_active_route(ip=self.get_snat_ip(), - prefix='32', - vn_fq_name=':'.join(vn_fqname)) - if not route or label != route['path_list'][0]['active_label']: - self.logger.warn('LR: agent: label doesnt match for gw ip %s, ' - 'expected %s: actual %s'%(self.get_snat_ip(), - label, route['path_list'][0]['active_label'] - if route else None)) - return False - return True - - def verify_in_agent(self): - self.logger.debug('Verifying LR(%s) on agent'%(self.uuid)) - assert self.verify_default_route_on_auto_vn_in_agent() - assert self.verify_snat_ip_on_public_in_agent() - self.logger.debug('LR(%s) verfication on agent passed'%(self.uuid)) - return True - - def get_ctrl_nodes(self, ri_name): - rt_list = [] - peer_list = [] - ri = self.vnc_api_h.routing_instance_read(fq_name=ri_name) - rt_list = [rt['to'][0] for rt in ri.get_route_target_refs()] - ctrl_node = ComputeNodeFixture(self.connections, - self.get_active_vrouter() - ).get_active_controller() - cn_inspect = self.connections.cn_inspect[ctrl_node] - peer_list.append(ctrl_node) - for rt in rt_list: - rt_group_entry = cn_inspect.get_cn_rtarget_group(rt) - if rt_group_entry['peers_interested'] is not None: - for peer in rt_group_entry['peers_interested']: - if peer in self.inputs.host_names: - peer = self.inputs.get_host_ip(peer) - peer_list.append(peer) - else: - self.logger.info('%s is not defined as a control node' - ' in the topology' % peer) - return list(set(peer_list)) - - @retry(6, 10) - def verify_static_route_in_cn(self): - ri_fqname = self.parent_fq_name + [self.get_auto_vn_name(), - self.get_auto_vn_name()] - exp_label = self.get_snat_label(intf='left') - if not exp_label: - self.logger.warn('LR: Unable to fetch left intf label') - return False - for ctrl_node in self.get_ctrl_nodes(ri_fqname): - cn_inspect = self.connections.cn_inspect[ctrl_node] - routes = cn_inspect.get_cn_route_table_entry(prefix='0.0.0.0/0', - ri_name=':'.join(ri_fqname)) - if not routes: - self.logger.warn('LR: Unable to find static route on auto VN') - return False - for route in routes: - if route['label'] != exp_label: - self.logger.warn('label(%s) doesnt match expected(%s)' - %(route['label'], exp_label)) - return False - return True - - def verify_in_control_node(self): - self.logger.debug('Verifying LR(%s) in control node'%(self.uuid)) - assert self.verify_static_route_in_cn() - self.logger.debug('LR(%s) verfication in ctrl node passed'%(self.uuid)) - return True - -def setup_test_infra(): - import logging - from common.contrail_test_init import ContrailTestInit - from common.connections import ContrailConnections - from common.log_orig import ContrailLogger - logging.getLogger('urllib3.connectionpool').setLevel(logging.WARN) - logging.getLogger('paramiko.transport').setLevel(logging.WARN) - logging.getLogger('keystoneclient.session').setLevel(logging.WARN) - logging.getLogger('keystoneclient.httpclient').setLevel(logging.WARN) - logging.getLogger('neutronclient.client').setLevel(logging.WARN) - logger = ContrailLogger('event') - logger.setUp() - mylogger = logger.logger - inputs = ContrailTestInit('./sanity_params.ini', logger=mylogger) - connections = ContrailConnections(inputs=inputs, logger=mylogger) - return connections - -if __name__ == "__main__": - obj = LogicalRouterFixture(api_type='neutron', name='Router', connections=setup_test_infra(), public_vn_id='ed8b6b51-1259-4437-a6ab-bf26f5f0276d', private={'vns': ['4b39a2bd-4528-40e8-b848-28084e59c944', 'c92957fb-22df-49ed-a1ea-d766ebbf05ae']}) - obj.setUp() - #obj = LogicalRouterFixture(api_type='neutron', uuid='a8395987-8882-41b4-898f-e43085c0f889', connections=setup_test_infra()) - obj.verify_on_setup() - obj.clear_gw() - obj.verify_on_setup() - obj.set_gw() - obj.verify_on_setup() - obj.reset_gw() - obj.verify_on_setup() - obj.remove_interface(vn_id='4b39a2bd-4528-40e8-b848-28084e59c944') - obj.verify_on_setup() - obj.add_interface(vn_id='4b39a2bd-4528-40e8-b848-28084e59c944') - obj.verify_on_setup() - obj.cleanUp() diff --git a/fixtures/sdn_ui_topo_setup.py b/fixtures/sdn_ui_topo_setup.py deleted file mode 100644 index 785c76110..000000000 --- a/fixtures/sdn_ui_topo_setup.py +++ /dev/null @@ -1,156 +0,0 @@ -import os -import copy -from common.openstack_libs import nova_client as mynovaclient -from common.openstack_libs import nova_exception as novaException -import fixtures -import testtools -from tcutils.topo import topo_steps -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from vn_policy_test import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from vm_test import * -from common.connections import ContrailConnections -from floating_ip import * -from policy_test import * -from contrail_fixtures import * -from tcutils.agent.vna_introspect_utils import * -from tcutils.topo.topo_helper import * -from vnc_api import vnc_api -from vnc_api.gen.resource_test import * -try: - from webui_test import * - from tcutils.topo import ui_topo_steps -except ImportError: - pass - - -class sdnUiTopoSetupFixture(fixtures.Fixture): - - def __init__(self, connections, topo): - self.ini_file = os.environ.get('TEST_CONFIG_FILE') - self.connections = connections - self.inputs = self.connections.inputs - self.quantum_h = self.connections.quantum_h - self.nova_h = self.connections.nova_h - self.vnc_lib = self.connections.vnc_lib - self.logger = self.inputs.logger - self.topo = topo - if self.inputs.verify_thru_gui(): - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.webui = WebuiTest(self.connections, self.inputs) - # end __init__ - - def setUp(self): - super(sdnUiTopoSetupFixture, self).setUp() - # end setUp - - def topo_setup( - self, - config_option='openstack', - skip_verify='no', - flavor='contrail_flavor_small', - vms_on_single_compute=False, - VmToNodeMapping=None): - self.result = True - self.err_msg = [] - self.flavor = flavor - self.skip_verify = skip_verify - self.public_vn_present = False - self.fvn_vm_map = False - self.fvn_fixture = None - self.fip_fixture = None - self.si_fixture = {} - self.fip_fixture_dict = { - } - self.config_option = config_option - self.secgrp_fixture = None - topo_helper_obj = topology_helper(self.topo) - self.topo.vmc_list = topo_helper_obj.get_vmc_list() - self.topo.policy_vn = topo_helper_obj.get_policy_vn() - self.logger.info("Starting setup") - topo_steps.createUser(self) - topo_steps.createProject(self) - # end topo_setup - - def create_security_group(self): - assert topo_steps.createSec_group(self) - return True - # end create_security_group - - def create_svc_instance(self): - assert topo_steps.createServiceInstance(self) - return True - # end create_svc_instance - - def create_policy(self): - assert topo_steps.createPolicy(self) - return True - # end create_policy - - def attach_policy_to_vn(self, option='openstack'): - assert topo_steps.attachPolicytoVN(self, option) - return True - # end attach_policy_to_vn - - def create_vm(self): - assert topo_steps.createVMNova(self) - return True - # end create_vm - - def create_svc_template(self): - assert topo_steps.createServiceTemplate(self) - return True - # end create_svc_template - - def create_dns_server(self): - assert ui_topo_steps.createDnsServer(self) - return True - # end create_dns_server - - def create_dns_record(self): - assert ui_topo_steps.createDnsRecord(self) - return True - # end create_dns_record - - def create_ipam(self): - assert topo_steps.createIPAM(self) - return True - # end create_ipam - - def create_vn(self): - assert topo_steps.createVN(self) - return True - # end create_vn - - def create_floating_ip(self): - assert topo_steps.allocNassocFIP(self) - return True - # end create_floating_ip - - def create_port(self): - assert ui_topo_steps.createPort(self) - return True - # end create_port - - def create_router(self): - assert ui_topo_steps.createRouter(self) - return True - # end create_router - - def create_security_group(self, option='contrail'): - assert topo_steps.createSec_group(self, option) - return True - # end create_security_group - - def cleanUp(self): - if self.inputs.fixture_cleanup == 'yes': - super(sdnUiTopoSetupFixture, self).cleanUp() - else: - self.logger.info('Skipping sdn topology config cleanup') - # end cleanUp - -# end sdnSetupFixture diff --git a/fixtures/security_group.py b/fixtures/security_group.py deleted file mode 100644 index ab2d073a3..000000000 --- a/fixtures/security_group.py +++ /dev/null @@ -1,329 +0,0 @@ -import fixtures - -from vnc_api.vnc_api import NoIdError -from vnc_api.gen.cfixture import ContrailFixture -from vnc_api.gen.resource_xsd import PolicyEntriesType -from vnc_api.gen.resource_test import SecurityGroupTestFixtureGen,\ - ProjectTestFixtureGen, DomainTestFixtureGen - -from tcutils.util import retry -try: - from webui_test import * -except ImportError: - pass - -class SecurityGroupFixture(ContrailFixture): - - def __init__( - self, inputs, connections, domain_name=None, project_name=None, secgrp_name=None, - uuid=None, secgrp_entries=None,option='orch'): - #option <'orch' or 'contrail'> - self.connections = connections - self.inputs = connections.inputs - self.logger = connections.logger - self.vnc_lib_h = connections.get_vnc_lib_h() - self.api_s_inspect = connections.api_server_inspect - self.domain_name = self.inputs.domain_name - self.project_name = self.inputs.project_name - self.secgrp_name = secgrp_name - self.secgrp_id = uuid - self.secgrp_entries = secgrp_entries - self.already_present = True - self.domain_fq_name = [self.domain_name] - self.project_fq_name = [self.domain_name, self.project_name] - self.project_id = self.connections.get_project_id() - self.secgrp_fq_name = [self.domain_name, - self.project_name, self.secgrp_name] - self.cn_inspect = self.connections.cn_inspect - self.orch = self.connections.orch - self.option = option - self.verify_is_run = False - if self.inputs.verify_thru_gui(): - self.webui = WebuiTest(self.connections, self.inputs) - - def read(self): - if self.secgrp_id: - obj = self.orch.get_security_group(self.secgrp_id) - self.secgrp_fq_name = obj.get_fq_name() - self.secgrp_name = obj.name - - def setUp(self): - super(SecurityGroupFixture, self).setUp() - self.create() - - def create(self): - self.secgrp_id = self.secgrp_id or self.get_sg_id() - if self.secgrp_id: - self.read() - self.logger.info('SG %s(%s) already present, not creating SG'% - (self.secgrp_name, self.secgrp_id)) - else: - self.logger.debug("Creating Security group: %s"%self.secgrp_fq_name) - self.already_present = False - if self.inputs.is_gui_based_config(): - self.webui.create_security_group(self) - else: - self.secgrp_id = self.orch.create_security_group( - sg_name=self.secgrp_name, - parent_fqname=self.project_fq_name, - sg_entries=self.secgrp_entries, - option=self.option) - self.logger.info("Created security-group name:%s" % - self.secgrp_name) - - def get_uuid(self): - return self.secgrp_id - - def get_fq_name(self): - return self.secgrp_fq_name - - def delete_all_rules(self, sg_id): - #deletes all the rules of the sg sg_id - self.orch.delete_security_group_rules(sg_id=sg_id, project_id=self.project_id, option=self.option) - - def create_sg_rule(self, sg_id, secgrp_rules=None): - return self.orch.set_security_group_rules(sg_id=sg_id, sg_entries=secgrp_rules, option=self.option) - - def cleanUp(self): - super(SecurityGroupFixture, self).cleanUp() - self.delete() - - def delete(self, verify=False): - self.logger.debug("Deleting Security group: %s", self.secgrp_fq_name) - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - if self.inputs.is_gui_based_config(): - self.webui.delete_security_group(self) - else: - self.orch.delete_security_group(sg_id=self.secgrp_id, option=self.option) - if self.verify_is_run or verify: - result, msg = self.verify_on_cleanup() - assert result, msg - else: - self.logger.info('Skipping deletion of security_group %s' % - (self.secgrp_fq_name)) - - def add_rule(self, rule): - """Add a rule to this security group.""" - pass - - def delete_rule(self, rule): - """Remove a rule from this security group.""" - pass - - def replace_rules(self, rules,exp='pass'): - """Replace all the rules of this security group with the rules list.""" - self.logger.info( - "Replace all the rules of this security group %s with the new rules" % - self.secgrp_name) - self.logger.debug(rules) - self.orch.set_security_group_rules(sg_id=self.secgrp_id, sg_entries=rules, option=self.option) - - @retry(delay=2, tries=5) - def verify_secgrp_in_api_server(self): - """Validate security group information in API-Server.""" - #verify if sg present in api - self.api_s_secgrp_obj = self.api_s_inspect.get_cs_secgrp( - domain=self.domain_name, project=self.project_name, - secgrp=self.secgrp_name, refresh=True) - if not self.api_s_secgrp_obj: - errmsg = "Security group %s not found in the API Server" % self.secgrp_name - self.logger.warn(errmsg) - return False, errmsg - else: - self.logger.info( - "Security group %s found in the API Server", self.secgrp_name) - - #verify if sg acls present in api - self.api_s_acls = self.api_s_inspect.get_secgrp_acls_href( - domain=self.domain_name, project=self.project_name, - secgrp=self.secgrp_name, refresh=True) - if not self.api_s_acls: - errmsg = "ACLs for Security group %s not found in the API Server" % self.secgrp_name - self.logger.warn(errmsg) - return False, errmsg - else: - self.logger.info( - "ACLs for Security group %s found in the API Server", self.secgrp_name) - - return True, None - - def verify_on_setup(self): - self.verify_is_run = True - try: - secgrp = self.vnc_lib_h.security_group_read( - fq_name=self.secgrp_fq_name) - self.logger.debug( - "Security group: %s created succesfully", self.secgrp_fq_name) - except NoIdError: - errmsg = "Security group: %s not created." % self.secgrp_fq_name - self.logger.warn(errmsg) - return False, errmsg - - retval, errmsg = self.verify_secgrp_in_api_server() - if not retval: - return False, errmsg - retval = self.verify_secgrp_in_control_nodes() - if not retval: - errmsg = "Security group: %s not found in control node." % self.secgrp_fq_name - return False, errmsg - - return True, None - - @retry(delay=2, tries=5) - def verify_secgrp_not_in_api_server(self): - """Validate security group information in API-Server.""" - #verify if sg is removed from api - self.api_s_secgrp_obj = self.api_s_inspect.get_cs_secgrp( - domain=self.domain_name, project=self.project_name, - secgrp=self.secgrp_name, refresh=True) - if self.api_s_secgrp_obj: - errmsg = "Security group %s still found in the API Server" % self.secgrp_name - self.logger.warn(errmsg) - return False, errmsg - else: - self.logger.info( - "Security group %s removed from the API Server", self.secgrp_name) - - #verify if sg acls removed from api - self.api_s_acls = self.api_s_inspect.get_secgrp_acls_href( - domain=self.domain_name, project=self.project_name, - secgrp=self.secgrp_name, refresh=True) - if self.api_s_acls: - errmsg = "ACLs for Security group %s still found in the API Server" % self.secgrp_name - self.logger.warn(errmsg) - self.logger.debug("ACLs found for SG %s are: %s" %(self.secgrp_name, self.api_s_acls)) - return False, errmsg - else: - self.logger.info( - "ACLs for Security group %s removed from the API Server", self.secgrp_name) - - return True, None - - def verify_on_cleanup(self): - try: - secgroup = self.vnc_lib_h.security_group_read( - fq_name=self.secgrp_fq_name) - errmsg = "Security group: %s still not removed" % self.secgrp_fq_name - self.logger.warn(errmsg) - return False, errmsg - except NoIdError: - self.logger.info("Security group: %s deleted successfully." % - self.secgrp_fq_name) - - errmsg = "Security group: %s still not removed" % self.secgrp_fq_name - retval, msg = self.verify_secgrp_not_in_api_server() - if not retval: - return False, errmsg - retval = self.verify_secgrp_not_in_control_nodes() - if not retval: - return False, errmsg - - return True, None - - def get_sg_id(self): - try: - secgroup = self.vnc_lib_h.security_group_read( - fq_name=self.secgrp_fq_name) - self.secgrp_id = secgroup.uuid - except NoIdError: - return None - return self.secgrp_id - - @retry(delay=2, tries=5) - def verify_secgrp_in_control_nodes(self): - """Validate security group information in control nodes.""" - - for cn in self.inputs.bgp_ips: - #verify if sg present in control nodes - cn_secgrp_obj = self.cn_inspect[cn].get_cn_sec_grp( - domain=self.domain_name, - project=self.project_name, - secgrp=self.secgrp_name) - if not cn_secgrp_obj: - self.logger.warn( - 'security group %s not present in Control-node %s' % - (self.secgrp_name, cn)) - return False - else: - self.logger.info( - "Security group %s found in the control node %s" % (self.secgrp_name, cn)) - - #verify if sg acls present in control nodes - cn_secgrp_obj = self.cn_inspect[cn].get_cn_sec_grp_acls( - domain=self.domain_name, - project=self.project_name, - secgrp=self.secgrp_name) - if not cn_secgrp_obj: - self.logger.warn( - 'security group %s ACLs not present in Control-node %s' % - (self.secgrp_name, cn)) - return False - else: - self.logger.info( - "Security group %s ACLs found in the control node %s" % (self.secgrp_name, cn)) - - - return True - - - @retry(delay=2, tries=15) - def verify_secgrp_not_in_control_nodes(self): - """Validate security group not present in control nodes.""" - #verify if sg present in control nodes - for cn in self.inputs.bgp_ips: - cn_secgrp_obj = self.cn_inspect[cn].get_cn_sec_grp( - domain=self.domain_name, - project=self.project_name, - secgrp=self.secgrp_name) - if cn_secgrp_obj: - self.logger.warn( - 'security group %s present in Control-node %s' % - (self.secgrp_name, cn)) - return False - else: - self.logger.info( - 'security group %s removed from Control-node %s' % - (self.secgrp_name, cn)) - - #verify if sg acls removed from control nodes - cn_secgrp_obj = self.cn_inspect[cn].get_cn_sec_grp_acls( - domain=self.domain_name, - project=self.project_name, - secgrp=self.secgrp_name) - if cn_secgrp_obj: - self.logger.warn( - 'security group %s ACLs still present in Control-node %s' % - (self.secgrp_name, cn)) - return False - else: - self.logger.info( - "Security group %s ACLs removed from the control node %s" % (self.secgrp_name, cn)) - - return True - -def get_secgrp_id_from_name(connections,secgrp_fq_name): - fq_name_list = secgrp_fq_name.split(':') - try: - secgroup = connections.vnc_lib.security_group_read( - fq_name=fq_name_list) - secgrp_id = secgroup.uuid - except NoIdError: - return False - return secgrp_id - -def list_sg_rules(connections,sg_id): - sg_info = show_secgrp(connections,sg_id) - - return sg_info['security_group']['security_group_rules'] - -def show_secgrp(connections,sg_id): - sg_info = connections.quantum_h.show_security_group(sg_id) - - return sg_info diff --git a/fixtures/smgr_common.py b/fixtures/smgr_common.py deleted file mode 100644 index 7316bef09..000000000 --- a/fixtures/smgr_common.py +++ /dev/null @@ -1,1492 +0,0 @@ -import fixtures -from contrail_fixtures import * -import sys -import pdb -import json -import string -import textwrap -import tempfile -import os -import re -import fabric -import ConfigParser -import argparse -import sys -from datetime import datetime as dt -from fabric.api import settings, run -from fabric.api import hosts, env, task -from fabric.api import local, put, get -from fabric.tasks import execute -from os.path import expanduser -import imp -from fabric.state import connections -from time import sleep -from common.contrail_test_init import ContrailTestInit - -from common import log_orig as logging -import logging as std_logging -import time - - - -REIMAGE_WAIT=700 -SERVER_RETRY_TIME=150 -PROVISION_TIME = 1800 -RESTART_WAIT=300 -RESTART_MESSAGE = "IPMI reboot operation initiated" -RESTART_OK = "restart issued" -REIMAGE_OK = "reimage queued" -PROVISION_OK = "provision issued" - - - - -class SmgrFixture(fixtures.Fixture): - - ''' Fixture to bring up a vns cluster using server manager . - - ''' - - def __init__(self, inputs, testbed_py="./testbed.py", - smgr_config_ini="./smgr_input.ini", - test_local=False,logger = None): - self.testbed_py = testbed_py - self.testbed = self.get_testbed() - self.smgr_config_ini = smgr_config_ini - self.test_local = test_local - self.params = self.read_ini_file(smgr_config_ini) - self.svrmgr = self.params['svrmgr'] - self.svrmgr_password = self.params['smgr_password'] - self.logger = logger - # end __init__ - - def svrmgr_add_all(self): - self.add_cluster() - self.add_image() - self.add_pkg() - self.add_server() - # end svrmgr_add_all - - - def create_json(self): - self.modify_server_json() - self.modify_cluster_json() - # end create_json - - def modify_server_json(self): - params=self.params - if not params: - return None - if not params.has_key('server_file'): - return None - server_file = params['server_file'] - - timestamp = dt.now().strftime("%Y_%m_%d_%H_%M_%S") - local('cp %s %s.org.%s' %(server_file, server_file, timestamp)) - - in_file = open( server_file, 'r' ) - in_data = in_file.read() - server_dict = json.loads(in_data) - self.update_roles_from_testbed_py(server_dict) - self.update_bond_from_testbed_py(server_dict) - self.update_multi_if_from_testbed_py(server_dict) - - out_file = open(server_file, 'w') - out_data = json.dumps(server_dict, indent=4) - out_file.write(out_data) - out_file.close() - - return server_dict - # end modify_server_json - - def update_roles_from_testbed_py(self, server_dict): - ''' This will update the dict corresponding to server.json with - the roles mentioned in testbed.roledefs. It will seamlessly integrate - Server Manager with legacy method where a user has to edit testbed.py only. ''' - - testbed = self.testbed - if not testbed.env.has_key('roledefs'): - return server_dict - for node in server_dict['server']: - roles = [] - for key in testbed.env.roledefs: - if key == 'all' or key == 'build' : - continue - for host_string in testbed.env.roledefs[key]: - ip = getIp(host_string) - if node['ip_address'] == ip: - if key == 'cfgm': - roles.append("config") - else: - roles.append(key) - if not len(roles): - node['roles'] = [ "compute" ] - else: - node['roles'] = roles - - for node in server_dict['server']: - node['cluster_id'] = self.get_pref_cluster_id() - - return server_dict - # end update_roles_from_testbed_py - - def get_compute_node_from_testbed_py(self): - testbed = self.testbed - if not testbed.env.has_key('roledefs'): - return None - return testbed.env.roledefs['compute'] - # end get_compute_node_from_testbed_py - - def get_remaining_node_from_testbed_py(self, test_node): - testbed = self.testbed - remaining_node = ' ' - for node in testbed.env.roledefs['all']: - if node not in test_node: - remaining_node += node - return remaining_node - # end get_remaining_node_from_testbed_py - - def delete_cluster_id_based(self, test_cluster_id=None): - if test_cluster_id is None: - return False - if self.test_local: - local('server-manager delete cluster --cluster_id %s' %test_cluster_id) - else: - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - run('server-manager delete cluster --cluster_id %s' %test_cluster_id) - run('server-manager show server') - #end delete_cluster_id_based - - def delete_server_id_based(self, test_node_id=None): - if test_node_id is None: - return False - if self.test_local: - local('server-manager delete server --server_id %s' %test_node_id) - else: - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - run('server-manager delete server --server_id %s' %test_node_id) - run('server-manager show server') - #end delete_server_id_based - - def delete_server(self, test_node): - ip = test_node.split('@')[1] - server_dict = self.get_server_with_ip_from_db(ip) - server_id = server_dict['server'][0]['id'] - self.delete_server_id_based(server_id) - - # end delete_server - - def provision_server(self, node): - result = True - svrmgr = self.get_svrmgr() - svrmgr_password = self.svrmgr_password - ip = node.split('@')[1] - server_dict = self.get_server_with_ip_from_db(ip) - server_id = server_dict['server'][0]['id'] - pkg_id = self.get_pkg_id() - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - output = run('server-manager provision -F --server_id %s %s' %(server_id,pkg_id) ) - if PROVISION_OK not in output: - self.logger.error("provision command was not successfull") - result = result and False - run('server-manager status server --server_id %s' %server_id) - return result - - # end provision_server - - def delete_compute_node(self): - cn_list = self.get_compute_node_from_testbed_py() - if cn_list == None: - return None - if len(cn_list) == 1: - return None - if len(cn_list) > 1: - test_node = cn_list[-1] - - self.delete_server(test_node) - return test_node - # end delete_compute_node - - - def update_bond_from_testbed_py(self, server_dict): - testbed = self.testbed - if 'control_data' in dir(testbed): - - for node in server_dict['server']: - for key in testbed.bond: - ip = getIp(key) - if node['ip_address'] == ip: - node['parameters']['setup_interface'] = "Yes" - - name = testbed.bond[key]['name'] - mode = testbed.bond[key]['mode'] - member = testbed.bond[key]['member'] - option = {} - option['miimon'] = '100' - option['mode'] = mode - option['xmit_hash_policy'] = 'layer3+4' - - node['bond']={} - node['bond'][name]={} - node['bond'][name]['bond_options'] = "%s"%option - node['bond'][name]['member'] = "%s"%member - return server_dict - #End update_bond_from_testbed_py(server_dict): - - def update_multi_if_from_testbed_py(self, server_dict): - - testbed = self.testbed - if 'control_data' in dir(testbed): - - for node in server_dict['server']: - for key in testbed.control_data: - ip = getIp(key) - if node['ip_address'] == ip: - node['parameters']['setup_interface'] = "Yes" - - ip = testbed.control_data[key]['ip'] - gw = testbed.control_data[key]['gw'] - device = testbed.control_data[key]['device'] - - node['control_data_network']={} - node['control_data_network'][device] = {} - node['control_data_network'][device]['ip_address'] = ip - node['control_data_network'][device]['gateway'] = gw - - return server_dict - - #End update_multi_if_from_testbed_py(server_dict): - - - def get_image_id(self) : - params=self.params - image_file = params['image_file'] - - image_file = open( image_file, 'r' ) - image_data = image_file.read() - image_json = json.loads(image_data) - image_id = image_json['image'][0]['id'] - image_file.close() - return image_id - # end get_image_id() - - def get_pkg_id(self) : - params=self.params - pkg_file = params['pkg_file'] - pkg_file = open( pkg_file, 'r' ) - pkg_data = pkg_file.read() - pkg_json = json.loads(pkg_data) - pkg_id = pkg_json['image'][0]['id'] - pkg_file.close() - return pkg_id - # end get_pkg_id() - - def get_cluster_id(self) : - cluster_id = None - params=self.params - cluster_file = params['cluster_file'] - - cluster_file = open( cluster_file, 'r' ) - cluster_data = cluster_file.read() - cluster_json = json.loads(cluster_data) - cluster_id = cluster_json['cluster'][0]['id'] - if params.has_key('cluster_id'): - cluster_id = params['cluster_id'] - cluster_file.close() - return cluster_id - # end get_cluster_id() - - - def add_cluster(self): - cluster_file = None - params=self.params - if params.has_key('cluster_file'): - cluster_file = params['cluster_file'] - - cluster_id = self.get_pref_cluster_id() - if not cluster_file: - cluster_dict = self.get_cluster_with_cluster_id_from_db() - if not len(cluster_dict['cluster']): - cluster_dict = new_cluster() - else: - cluster_dict = { - "cluster" : [ - { - "id" : "", - "parameters" : { - - } - } - ] - } - - cluster_dict['cluster'][0]['id'] = cluster_id - self.modify_cluster_from_testbed_py(cluster_dict) - temp_dir= expanduser("~") - cluster_file = '%s/cluster.json' %temp_dir - local('touch %s' %cluster_file) - out_file = open(cluster_file, 'w') - out_data = json.dumps(cluster_dict, indent=4) - - out_file.write(out_data) - out_file.close() - else : - timestamp = dt.now().strftime("%Y_%m_%d_%H_%M_%S") - local('cp %s %s.org.%s' %(cluster_file, cluster_file, timestamp)) - with open(cluster_file, 'r') as clf: data=json.load(clf) - clf.close() - data['cluster'][0]['id'] = cluster_id - with open(cluster_file, 'w') as clf: json.dump(data, clf) - clf.close() - - if self.test_local: - local('server-manager add cluster -f %s' %(cluster_file)) - else: - svrmgr = self.svrmgr - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - file_name = os.path.basename(cluster_file) - temp_dir= tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - put(cluster_file, '%s/%s' % (temp_dir, file_name)) - run('server-manager add cluster -f %s/%s' %(temp_dir, file_name) ) - run('server-manager show cluster') - # end add_cluster() - - def add_server(self): - self.add_server_using_json() - self.update_server_in_db_with_testbed_py() - #end add_server - - def add_image(self): - params=self.params - if not params: - return None - if not params.has_key('image_file'): - return None - image_file = params['image_file'] - - if self.test_local: - local('server-manager add image -f %s' %(image_file)) - local('server-manager show image') - else: - svrmgr = self.svrmgr - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - file_name = os.path.basename(image_file) - temp_dir = tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - put(image_file, '%s/%s' % (temp_dir, file_name)) - - run('server-manager add image -f %s/%s' %(temp_dir, file_name) ) - run('server-manager show image') - #end add_image - - def add_pkg(self): - params=self.params - if not params: - return None - if not params.has_key('pkg_file'): - return None - pkg_file = params['pkg_file'] - - if self.test_local: - local('server-manager add image -f %s' %(pkg_file)) - local('server-manager show image ') - else: - svrmgr = self.svrmgr - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - file_name = os.path.basename(pkg_file) - temp_dir= tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - put(pkg_file, '%s/%s' % (temp_dir, file_name)) - - run('server-manager add image -f %s/%s' %(temp_dir, file_name) ) - run('server-manager show image') - #end add_pkg - - def add_server_using_json(self): - params=self.params - if not params: - return None - - if not params.has_key('server_file'): - return None - server_file = params['server_file'] - - if self.test_local: - print "line 408" - local('server-manager add server -f %s' %(server_file)) - local('server-manager show server') - else: - svrmgr = self.svrmgr - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - file_name = os.path.basename(server_file) - temp_dir= tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - put(server_file, '%s/%s' % (temp_dir, file_name)) - print "line 420" - run('server-manager add server -f %s/%s' %(temp_dir, file_name) ) - run('server-manager show server') - #end add_server_using_json - - def modify_cluster_json(self): - params=self.params - if not params: - return None - if not params.has_key('cluster_file'): - return None - cluster_file = params['cluster_file'] - - timestamp = dt.now().strftime("%Y_%m_%d_%H_%M_%S") - local('cp %s %s.org.%s' %(cluster_file, cluster_file, timestamp)) - - in_file = open( cluster_file, 'r' ) - in_data = in_file.read() - cluster_dict = json.loads(in_data) - - self.modify_cluster_from_testbed_py(cluster_dict) - - out_file = open(cluster_file, 'w') - out_data = json.dumps(cluster_dict, indent=4) - out_file.write(out_data) - out_file.close() - #end modify_cluster_json - - - def modify_cluster_from_testbed_py(self, cluster_dict): - testbed = self.testbed - if testbed.env.has_key('mail_to'): - cluster_dict['cluster'][0]['email'] = testbed.env.mail_to - if testbed.env.has_key('encap_priority'): - cluster_dict['cluster'][0]['parameters']['encapsulation_priority'] = testbed.env.encap_priority - if 'multi_tenancy' in dir(testbed): - if testbed.multi_tenancy == True : - cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "True" - elif testbed.multi_tenancy == False : - cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "False" - else: - cluster_dict['cluster'][0]['parameters']['multi_tenancy'] = "False" - if 'os_username' in dir(testbed): - cluster_dict['cluster'][0]['parameters']['keystone_username'] = testbed.os_username - if 'os_password' in dir(testbed): - cluster_dict['cluster'][0]['parameters']['keystone_password'] = testbed.os_password - if 'os_tenant_name' in dir(testbed): - cluster_dict['cluster'][0]['parameters']['keystone_tenant'] = testbed.os_tenant_name - if 'router_asn' in dir(testbed): - cluster_dict['cluster'][0]['parameters']['router_asn'] = testbed.router_asn - #end modify_cluster_from_testbed_py - - - def new_cluster(self): - params=self.params - cluster_id = params['cluster'] - cluster_dict = { - "cluster" : [ - { - "id" : cluster_id, - "parameters" : { - "router_asn": "64512", - "database_dir": "/home/cassandra", - "db_initial_token": "", - "openstack_mgmt_ip": "", - "use_certs": "False", - "multi_tenancy": "False", - "encapsulation_priority": "'MPLSoUDP','MPLSoGRE','VXLAN'", - "service_token": "contrail123", - "keystone_user": "admin", - "keystone_password": "contrail123", - "keystone_tenant": "admin", - "openstack_password": "contrail123", - "analytics_data_ttl": "168", - "subnet_mask": "255.255.255.0", - "gateway": "1.1.1.254", - "password": "c0ntrail123", - "domain": "contrail.juniper.net", - "haproxy": "disable" - } - } - ] - } - return cluster_dict - # End new_cluster() - - def read_ini_file(self, config_ini): - try: - config = ConfigParser.SafeConfigParser() - config.read([config_ini]) - smgr_config = dict(config.items("SERVER-MANAGER")) - return smgr_config - except: - sys.exit("Error reading config file %s" %config_ini) - - return smgr_config - #end read_ini_file - - - def get_server_with_cluster_id_from_db(self): - cluster_id = self.get_pref_cluster_id() - - temp_dir= expanduser("~") - file_name = '%s/server_with_cluster_id_from_db.json' %(temp_dir) - - if self.test_local: - local('server-manager show server --cluster_id %s --detail \ - | tr -d "\n" \ - | sed "s/[^{]*//" \ - > %s' %(cluster_id, file_name)) - - else: - svrmgr = self.params - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - temp_dir= tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - file_name = '%s/server_list.json' %(temp_dir) - - run('server-manager show server --cluster_id %s --detail \ - | tr -d "\n" \ - | sed \'s/[^{]*//\' \ - > %s' %(cluster_id, file_name) ) - - local('mkdir -p %s' % temp_dir) - - in_file = open( file_name, 'r' ) - in_data = in_file.read() - server_dict = json.loads(in_data) - return server_dict - #end get_server_with_cluster_id_from_db - - def get_cluster_with_cluster_id_from_db(self): - params=self.params - cluster_id = params['cluster_id'] - - cluster_dict = {"cluster": []} - - temp_dir= expanduser("~") - - file_name = '%s/cluster.json' %(temp_dir) - - if self.test_local: - local('server-manager show cluster --cluster_id %s --detail \ - | tr -d "\n" \ - | sed "s/[^{]*//" \ - > %s' %(cluster_id, file_name)) - else: - svrmgr = self.svrmgr - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - temp_dir= tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - file_name = '%s/cluster.json' %(temp_dir) - run('server-manager show cluster --cluster_id %s --detail\ - | tr -d "\n" \ - | sed \'s/[^{]*//\' \ - > %s' %(cluster_id, file_name) ) - local('mkdir -p %s' % temp_dir) - - in_file = open( file_name, 'r' ) - in_data = in_file.read() - - cluster_dict = json.loads(in_data) - return cluster_dict - #end get_cluster_with_cluster_id_from_db(self): - - - def get_server_with_ip_from_db(self, ip=None): - params=self.params - - server_dict={} - if not ip: - print "Please provide an ip as input arg" - return ip - - temp_dir= expanduser("~") - - file_name = '%s/server.json' %(temp_dir) - - if self.test_local: - local('server-manager show server --ip %s --detail \ - | tr -d "\n" \ - | sed "s/[^{]*//" \ - > %s' %(ip, file_name)) - else: - svrmgr = self.svrmgr - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - temp_dir= tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - file_name = '%s/server.json' %(temp_dir) - run('server-manager show server --ip %s --detail \ - | tr -d "\n" \ - | sed \'s/[^{]*//\' \ - > %s' %(ip, file_name) ) - local('mkdir -p %s' % temp_dir) - - in_file = open( file_name, 'r' ) - in_data = in_file.read() - server_dict = json.loads(in_data) - return server_dict - #end get_server_with_ip_from_db(self, ip=None): - - def get_host_roles_from_testbed_py(self): - testbed = self.testbed - node = {} - if not testbed.env.has_key('roledefs'): - return node - for key in testbed.env.roledefs: - if key == 'all' or key == 'build': - continue - for host_string in testbed.env.roledefs[key]: - ip = getIp(host_string) - if not node.has_key(ip): - node[ip] = [] - if key == 'cfgm': - node[ip].append('config') - else: - node[ip].append(key) - return node - # end get_host_roles_from_testbed_py - - def update_server_in_db_with_testbed_py(self): - cluster_id = self.get_pref_cluster_id() - node = self.get_host_roles_from_testbed_py() - if not node: - return - u_server_dict = {} - u_server_dict['server'] = [] - for key in node: - server_dict = {} - server_dict = self.get_server_with_ip_from_db(key) - if not server_dict or not server_dict['server']: - self.logger.error("Server with ip %s not present in Server Manager" % key) - continue - server_id = server_dict['server'][0]['id'] - u_server = {} - u_server['id'] = server_id - u_server['cluster_id'] = cluster_id - u_server['roles'] = node[key] - u_server_dict['server'].append(u_server) - - temp_dir= expanduser("~") - server_file = '%s/server.json' %temp_dir - local('touch %s' %server_file) - out_file = open(server_file, 'w') - out_data = json.dumps(u_server_dict, indent=4) - out_file.write(out_data) - out_file.close() - - if self.test_local: - local('server-manager add server -f %s' %(server_file) ) - for u_server in u_server_dict['server']: - local('server-manager show server --server_id %s --detail' \ - % u_server['id'] ) - else: - svrmgr = self.svrmgr - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - file_name = os.path.basename(server_file) - temp_dir= tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - put(server_file, '%s/%s' % (temp_dir, file_name)) - run('server-manager add server -f %s/%s' %(temp_dir, file_name) ) - run('server-manager show server --server_id %s --detail' %server_id) - #end update_server_in_db_with_cluster_id - - def get_pref_cluster_id(self): - cluster_id = None - params=self.read_ini_file(self.smgr_config_ini) - if params.has_key('cluster_id'): - cluster_id = params['cluster_id'] - else: - cluster_id = self.get_cluster_id() - return cluster_id - #end get_pref_cluster_id(self): - - def get_svrmgr(self): - svrmgr = None - params=self.params - if params.has_key('svrmgr'): - svrmgr = params['svrmgr'] - return params['svrmgr'] - #end get_svrmgr(self): - - def get_server_file(self): - params=self.params - if not params: - return None - if not params.has_key('server_file'): - return None - server_file = params['server_file'] - return server_file - #end get_server_file(self): - - def get_testbed(self): - filepath = self.testbed_py - if not filepath: - sys.exit("tesbed.py missing in args ") - mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1]) - - if file_ext.lower() == '.py': - py_mod = imp.load_source(mod_name, filepath) - return py_mod - #end get_testbed(self): - - def verify_roles(self): - result = True - for node in env.roledefs['database']: - with settings(host_string=node, warn_only=True): - try: - verify.verify_database() - except SystemExit: - self.logger.error('verify_database has Failed') - result = result and False - for node in env.roledefs['cfgm']: - with settings(host_string=node, warn_only=True): - try: - verify.verify_cfgm() - except SystemExit: - self.logger.error('verify_cfgm has Failed') - result = result and False - for node in env.roledefs['control']: - with settings(host_string=node, warn_only=True): - try: - verify.verify_control() - except SystemExit: - self.logger.error('verify_control has Failed') - result = result and False - for node in env.roledefs['collector']: - with settings(host_string=node, warn_only=True): - try: - verify.verify_collector() - except SystemExit: - self.logger.error('verify_collector has Failed') - result = result and False - for node in env.roledefs['webui']: - with settings(host_string=node, warn_only=True): - try: - verify.verify_webui() - except SystemExit: - self.logger.error('verify_webui has Failed') - result = result and False - for node in env.roledefs['compute']: - with settings(host_string=node, warn_only=True): - try: - verify.verify_compute() - except SystemExit: - self.logger.error('verify_compute has Failed') - result = result and False - for node in env.roledefs['openstack']: - with settings(host_string=node, warn_only=True): - try: - verify.verify_openstack() - except SystemExit: - self.logger.error('verify_openstack has Failed') - result = result and False - return result - #end verify_roles(self): - - def verify_contrail_status(self, skip_node=None): - result = True - if not self.verify_database(skip_node): - result = result and False - if not self.verify_cfgm(skip_node): - result = result and False - if not self.verify_control(skip_node): - result = result and False - if not self.verify_collector(skip_node): - result = result and False - if not self.verify_webui(skip_node): - result = result and False - if not self.verify_compute(skip_node): - result = result and False - if not self.verify_openstack(skip_node): - result = result and False - return result - #end verify_contrail_status - - def verify_openstack(self, skip_node): - result = True - for node in env.roledefs['openstack']: - if skip_node: - if node in skip_node: - continue - with settings(host_string=node, warn_only=True): - output = run('source /etc/contrail/keystonerc') - output = run('openstack-status') - pattern = ["openstack-nova-api: active", - "openstack-nova-network: inactive (disabled on boot)", - "openstack-nova-scheduler: active", - "openstack-nova-volume: inactive (disabled on boot)", - "openstack-nova-conductor: active", - "openstack-glance-api: active", - "openstack-glance-registry: active", - "openstack-keystone: active", - "openstack-cinder-api: active", - "openstack-cinder-scheduler: active", - "openstack-cinder-volume: inactive (disabled on boot)", - "mysql: inactive (disabled on boot)", - "rabbitmq-server: active", - "memcached: inactive (disabled on boot)"] - for line in pattern: - if line not in output: - self.logger.error('verify %s has Failed' %line) - result = result and False - return result - #end verify_openstack(self): - - def verify_compute(self, skip_node): - result = True - for node in env.roledefs['compute']: - if skip_node: - if node in skip_node: - continue - with settings(host_string=node, warn_only=True): - output = run('contrail-status') - pattern = ["supervisor-vrouter: active", - "contrail-vrouter-agent active", - "contrail-vrouter-nodemgr active"] - for line in pattern: - if line not in output: - self.logger.error('verify %s has Failed' %line) - result = result and False - return result - #end verify_compute(self): - - def verify_webui(self, skip_node): - result = True - for node in env.roledefs['webui']: - if skip_node: - if node in skip_node: - continue - with settings(host_string=node, warn_only=True): - output = run('contrail-status') - pattern = ["supervisor-webui: active", - "contrail-webui active", - "contrail-webui-middleware active"] - for line in pattern: - if line not in output: - self.logger.error('verify %s has Failed' %line) - result = result and False - return result - #end verify_webui(self): - - def verify_collector(self, skip_node=None): - result = True - for node in env.roledefs['collector']: - if skip_node: - if node in skip_node: - continue - with settings(host_string=node, warn_only=True): - output = run('contrail-status') - pattern = ["supervisor-analytics: active", - "contrail-analytics-api active", - "contrail-analytics-nodemgr active", - "contrail-collector active", - "contrail-query-engine active", - "contrail-snmp-collector active", - "contrail-topology active"] - for line in pattern: - if line not in output: - self.logger.error('verify %s has Failed' %line) - result = result and False - return result - #end verify_collector(self): - - def verify_database(self, skip_node=None): - result = True - for node in env.roledefs['database']: - if skip_node: - if node in skip_node: - continue - with settings(host_string=node, warn_only=True): - output = run('contrail-status') - pattern = ["supervisor-database: active", - "contrail-database active", - "contrail-database-nodemgr active"] - for line in pattern: - if line not in output: - self.logger.error('verify %s has Failed' %line) - result = result and False - return result - #end verify_database(self): - - def verify_cfgm(self, skip_node=None): - result = True - for node in env.roledefs['cfgm']: - if skip_node: - if node in skip_node: - continue - with settings(host_string=node, warn_only=True): - output = run('contrail-status') - pattern = ["supervisor-config: active", - "contrail-api:0 active", - "contrail-config-nodemgr active", - "contrail-discovery:0 active", - "ifmap active", - "supervisor-support-service: active", - "rabbitmq-server active"] - - for line in pattern: - if line not in output: - self.logger.error('verify %s has Failed' %line) - result = result and False - return result - #end verify_cfgm(self): - - def verify_control(self, skip_node=None): - result = True - for node in env.roledefs['control']: - if skip_node: - if node in skip_node: - continue - with settings(host_string=node, warn_only=True): - output = run('contrail-status') - pattern = ["supervisor-control: active", - "contrail-control active", - "contrail-control-nodemgr active", - "contrail-dns active", - "contrail-named active"] - for line in pattern: - if line not in output: - self.logger.error('verify %s has Failed' %line) - result = result and False - return result - #end verify_control(self): - - def check_server_status_with_tag(self, tag=None, tag_server_ids=None): - if ((tag is not None) and (tag_server_ids is not None)): - flag_reimage_started=0 - for index in range(30): - sleep(10) - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - states=run('server-manager status server --tag %s | grep status' % tag) - if len(states.splitlines()) == len(tag_server_ids): - flag_reimage_started=len(tag_server_ids) - for each_state in states.splitlines(): - if (('restart_issued' in each_state.split(':')[1]) - or ('reimage_started' in each_state.split(':')[1])): - flag_reimage_started=flag_reimage_started-1 - if flag_reimage_started == 0: - self.logger.info('All the servers with tag %s have started reimaging' % tag) - flag_reimage_started='true' - break - else: - self.logger.error('No of servers with tag %s and servers listed are not matching.' % tag) - - if flag_reimage_started == 'true': - for index in range(24): - sleep(10) - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - states=run('server-manager status server --tag %s | grep status' % tag) - if len(states.splitlines()) == len(tag_server_ids): - flag_reimage_started=len(tag_server_ids) - for each_state in states.splitlines(): - if ('reimage_completed' in each_state.split(':')[1]): - flag_reimage_started=flag_reimage_started-1 - if flag_reimage_started == 0: - self.logger.info('All the servers with tag %s have reimaged successfully' % tag) - return True - else: - self.logger.error('No of servers with tag %s and servers listed are not matching.' % tag) - else: - self.logger.error('The servers did not move through restart_issued and reimage_started stares') - return False - else: - self.logger.error("A tag in form of tag_index=tag_value and a list of tagged server id's is not provided.") - return False - return False - #end check_server_status_with_tag - - def reimage(self, no_pkg=False, skip_node=None, restart_only=False, tag=None, tag_server_ids=None): - """ using svrmgr, reimage all the nodes """ - - result = True - image_id = self.get_image_id() - pkg_id = self.get_pkg_id() - cluster_id = self.get_cluster_id() - svrmgr = self.get_svrmgr() - svrmgr_password = self.svrmgr_password - server_file = self.get_server_file() - in_file = open( server_file, 'r' ) - in_data = in_file.read() - server_dict = json.loads(in_data) - - #Reimage and check status with tag. - if ((tag is not None) and (tag_server_ids is not None)): - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - reimage_command_failed = 0 - server_ids=run('server-manager reimage -F --tag %s %s | grep id' % (tag, image_id)) - for each_node in tag_server_ids: - if each_node not in server_ids: - reimage_command_failed = 1 - if reimage_command_failed == 0: - self.logger.info("Reimage command was successfull") - else: - self.logger.error("Reimage command FAILED") - return False - sleep(30) - result=self.check_server_status_with_tag(tag, tag_server_ids) - return result - - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - run('server-manager show all') - if no_pkg: - if skip_node == None: - if restart_only: - output=run('server-manager restart --cluster_id %s -F' %(cluster_id)) - if RESTART_MESSAGE not in output: - self.logger.warn("Restart command was not successfull") - else: - output=run('server-manager reimage --cluster_id %s -F %s' %(cluster_id,image_id)) - if REIMAGE_OK not in output: - self.logger.warn("Reimage command was not successfull") - else: - for node in server_dict['server']: - server_ip = node['ip_address'] - if server_ip in skip_node: - continue - server_id = node['id'] - sleep (5) - output=run('server-manager reimage -F --server_id %s %s' %(server_id,image_id)) - else: - if skip_node == None: - output=run('server-manager reimage --package_image_id %s --cluster_id %s %s -F' \ - %(pkg_id,cluster_id,image_id)) - else: - for node in server_dict['server']: - server_ip = node['ip_address'] - if server_ip in skip_node: - continue - output=run('server-manager reimage --server_id %s -F %s' %(server_id,image_id)) - if not(restart_only): - if "reimage queued" not in output: - self.logger.error("Reimage command was not successfull") - - if restart_only: - expected_status = "restart_issued" - expected_wait = RESTART_WAIT - else: - expected_status = "restart_issued" - expected_wait = REIMAGE_WAIT - if not self.verify_server_status(expected_status, skip_node) : - self.logger.error("server status \"%s\" not correctly updated", expected_status) - result = result and False - - self.logger.info("Server Rebooted. Going to sleep for %d seconds...." %expected_wait) - sleep(expected_wait) - - user = "root" - server_state = {} - - for node in server_dict['server']: - server_ip = node['ip_address'] - if skip_node: - if server_ip in skip_node: - continue - server_state[server_ip] = False - - home_dir= expanduser("~") - local('rm -rf %s/.ssh/known_hosts' %home_dir) - for retry in range(SERVER_RETRY_TIME): - for node in server_dict['server']: - server_ip = node['ip_address'] - if skip_node: - if server_ip in skip_node: - continue - if not verify_sshd(server_ip, user, env.password): - sleep(1) - self.logger.info("Node %s not reachable....retrying" %(server_ip)) - server_state[server_ip] = False - else: - self.logger.info("Node %s is UP" %(server_ip)) - if server_state[server_ip] == False: - target_node = '%s@%s' %(user,server_ip) - with settings( host_string = target_node ): - connections.connect(env.host_string) - with settings( host_string = target_node ) : - output = run('uptime') - uptime_string = output.split()[2] - if ':' in uptime_string: - uptime = int(uptime_string.split(':')[0]) - else: - uptime = int(uptime_string) - if uptime > 9 : - raise RuntimeError('Restart failed for Host (%s)' %server_ip) - else : - self.logger.info("Node %s has rebooted and UP now" %(server_ip)) - if not no_pkg: - output = run('dpkg -l | grep contrail') - match = re.search('contrail-fabric-utils\s+\S+-(\S+)\s+', output, re.M) - if match.group(1) not in pkg_id : - raise RuntimeError('Reimage not able to download package %s on targetNode (%s)' \ - %(pkg_id, server_ip) ) - match = re.search('contrail-install-packages\s+\S+~(\S+)\s+', output, re.M) - if match.group(1) not in pkg_id : - raise RuntimeError('Reimage not able to download package %s on targetNode (%s)' \ - %(pkg_id, server_ip) ) - server_state[server_ip] = True - - #End for node in server_dict['server']: - - cluster_state = True - for key in server_state: - cluster_state = cluster_state and server_state[key] - - if cluster_state == True: - break - #End for key in server: - - #End for retry in range(SERVER_RETRY_TIME): - - if not cluster_state: - raise RuntimeError('Unable to SSH to one or more Host ' ) - - if restart_only: - expected_status = "restart_issued" - else: - expected_status = "reimage_completed" - - if not self.verify_server_status(expected_status, skip_node) : - result = result and False - - return result - #end reimage - - def provision(self): - """ using svrmgr, provision the cluster """ - result = True - image_id = self.get_image_id() - pkg_id = self.get_pkg_id() - cluster_id = self.get_cluster_id() - svrmgr = self.get_svrmgr() - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - output = run('server-manager provision -F --cluster_id %s %s' %(cluster_id,pkg_id) ) - if PROVISION_OK not in output: - self.logger.error("provision command was not successfull") - result = result and False - run('server-manager show all') - return result - #end provision(self): - - def setup_cluster(self, no_reimage_pkg=False, provision_only=False): - result = True - if not provision_only: - if no_reimage_pkg: - if not self.reimage(no_pkg=True) : - result = result and False - else: - if not self.reimage() : - result = result and False - - if not self.provision() : - result = result and False - self.logger.info("Cluster provisioning initiated... Going to sleep for %d seconds...." %PROVISION_TIME) - sleep(PROVISION_TIME) - if not self.verify_server_status("provision_completed"): - result = result and False - for node in env.roledefs['all']: - try: - with settings(host_string=node, warn_only=True): - output = run('contrail-version') - except: - continue - if not self.verify_contrail_status(): - result = result and False - - return result - #end setup_cluster - - def verify_node_add_delete(self, no_reimage_pkg=False): - result = True - test_node = self.delete_compute_node() - if test_node == None: - self.logger.info("Not enough nodes to perform this test") - return None - global nodethatisdeleted - nodethatisdeleted = test_node - if no_reimage_pkg: - if not self.reimage(no_pkg=True, skip_node=test_node) : - result = result and False - else: - if not self.reimage(skip_node=test_node) : - result = result and False - if not self.provision() : - result = result and False - self.logger.info("Cluster provisioning initiated... Going to sleep for %d seconds...." %PROVISION_TIME) - sleep(PROVISION_TIME) - if not self.verify_server_status("provision_completed", skip_node=test_node): - result = result and False - if not self.verify_contrail_status(skip_node=test_node): - result = result and False - - #restoring all will re-add compute node - self.add_server() - remaining_node = self.get_remaining_node_from_testbed_py(test_node) - if no_reimage_pkg: - if not self.reimage(no_pkg=True, skip_node=remaining_node) : - result = result and False - else: - if not self.reimage(skip_node=remaining_node) : - result = result and False - if not self.provision_server(test_node) : - result = result and False - self.logger.info("Cluster provisioning initiated... Going to sleep for %d seconds...." %PROVISION_TIME) - sleep(PROVISION_TIME) - if not self.verify_server_status("provision_completed", skip_node=remaining_node): - result = result and False - if not self.verify_contrail_status(): - result = result and False - - return result - #end setup_cluster - - def get_cluster_status_having_this_tag(self): - params=self.params - - server_dict={} - cluster_id = self.get_cluster_id() - - temp_dir= expanduser("~") - - file_name = '%s/status.json' %(temp_dir) - - if self.test_local: - local('server-manager status server --cluster_id %s \ - | tr -d "\n" \ - | sed "s/[^{]*//" \ - > %s' %(cluster_id, file_name)) - else: - svrmgr = self.svrmgr - svrmgr_password = self.svrmgr_password - with settings(host_string=svrmgr, password=svrmgr_password, warn_only=True): - temp_dir= tempfile.mkdtemp() - run('mkdir -p %s' % temp_dir) - file_name = '%s/status.json' %(temp_dir) - run('server-manager status server --cluster_id %s \ - | tr -d "\n" \ - | sed \'s/[^{]*//\' \ - > %s' %(cluster_id, file_name) ) - local('mkdir -p %s' % temp_dir) - - in_file = open( file_name, 'r' ) - in_data = in_file.read() - server_dict = json.loads(in_data) - return server_dict - #end get_cluster_status_having_this_tag - - def verify_server_status(self, status, skip_node=None): - """ verify status of server """ - result = True - cluster_id = self.get_cluster_id() - expected_state = {} - actual_state = {} - server_file = self.get_server_file() - in_file = open( server_file, 'r' ) - in_data = in_file.read() - in_file.close() - server_dict = json.loads(in_data) - for node in server_dict['server']: - server_ip = node['ip_address'] - if skip_node: - if server_ip in skip_node: - continue - expected_state[server_ip] = status - - status_dict = self.get_cluster_status_having_this_tag() - - for node in status_dict['server']: - server_ip = node['ip_address'] - if skip_node: - if server_ip in skip_node: - continue - actual_state[server_ip] = status - - if cmp(expected_state,actual_state) != 0: - self.logger.error( - 'Cluster status \"%s\" is incorrectly updated for %s ' % - (status, cluster_id)) - result = result and False - else: - self.logger.info( - 'Cluster status \"%s\" is correctly updated for %s ' % - (status, cluster_id)) - return result - #end verify_server_status - - # Install server manager and start the service provided the SM - # installer file path is specified. - def install_sm(self, SM_installer_file_path=None): - """Install Server Manager Server and verify it's running.""" - self.logger.info("Running install_sm...") - result=False - if SM_installer_file_path is None: - self.logger.error("No installer file specified for SM") - return False - - self.logger.info("Verify server manager install.") - self.logger.info("Installer :: %s" % SM_installer_file_path) - - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - run('dpkg -i %s' % SM_installer_file_path) - run('cd /opt/contrail/contrail_server_manager/; ./setup.sh --all') - run('rm -rf /etc/contrail_smgr/role_sequence.json') - run('cp /contrail-smgr-save/dhcp.template /etc/cobbler/dhcp.template; cp /contrail-smgr-save/named.template /etc/cobbler/named.template') - run('cp /contrail-smgr-save/settings /etc/cobbler/settings; cp /contrail-smgr-save/zone.template /etc/cobbler/zone.template') - run('cp -r /contrail-smgr-save/zone_templates /etc/cobbler/; cp /contrail-smgr-save/named.conf.options /etc/bind/') - run('service contrail-server-manager start') - time.sleep(30) - SM_port=run('netstat -nap | grep 9001') - if '9001' in SM_port: - result=True - return result - #end install_sm - - # Uninstall Server Manager and delete trailing directories. - def uninstall_sm(self): - """Uninstall Server Manager Server and cleanup.""" - self.logger.info("Running uninstall_sm...") - - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - run('mkdir -p /contrail-smgr-save/; cp /etc/cobbler/named.template /contrail-smgr-save') - run('cp /etc/cobbler/settings /contrail-smgr-save; cp /etc/cobbler/zone.template /contrail-smgr-save') - run('cp /etc/cobbler/dhcp.template /contrail-smgr-save') - run('cp -r /etc/cobbler/zone_templates /contrail-smgr-save; cp /etc/bind/named.conf.options /contrail-smgr-save') - run('service contrail-server-manager stop') - run('dpkg -r contrail-server-manager-installer') - run('dpkg -P contrail-server-manager') - run('dpkg -P contrail-server-manager-client') - run('dpkg -P contrail-server-manager-monitoring') - run('dpkg -P contrail-web-server-manager') - run('dpkg -P contrail-web-core') - run('dpkg -P python-contrail') - run('rm -rf /opt/contrail/contrail_server_manager/; rm -rf /opt/contrail/server-manager') - return True - #end uninstall_sm - - # Back-up or save a file with extn _back_up. - def backup_file(self, file_path=None): - result = False - self.logger.info("Running backup_file...") - if file_path is None: - self.logger.error("No file path passed to the function") - return result - bkup_file=file_path + "_back_up" - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - run('cp -rf %s %s' % (file_path, bkup_file)) - run('ls -lrt %s' % bkup_file) - result=True - return result - #end backup_file - - # Restore a file provided a file with extn _back_up exists in the same path. - def restore_file(self, file_path=None): - self.logger.info("Running restore_file...") - return True - #end restore_file - - def add_tag_to_server(self, server_ip, tag_index, tag_value): - server_dict = self.get_server_with_ip_from_db(server_ip) - server_id = server_dict['server'][0]['id'] - server_file = '/tmp/tempserver.json' - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - run('server-manager show server --server_id %s -d > %s' % (server_id, server_file)) - with open(server_file, 'r') as svf: - data=json.load(svf) - svf.close() - data['server'][0]['tag'][tag_index]=tag_value - with open(server_file, 'w') as svf: - json.dump(data, svf) - svf.close() - run('server-manager add server -f %s' % server_file) - return server_id - #end add_tag_to_server - - def delete_tag_from_server(self, server_ip, tag_index=None, all_tags=False): - server_dict = self.get_server_with_ip_from_db(server_ip) - server_id = server_dict['server'][0]['id'] - server_file = '/tmp/tempserver.json' - if (all_tags == True) or (tag_index == None): - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - run('server-manager show server --server_id %s -d > %s' % (server_id, server_file)) - with open(server_file, 'r') as svf: - data=json.load(svf) - svf.close() - data['server'][0]['tag']['datacenter']='' - data['server'][0]['tag']['floor']='' - data['server'][0]['tag']['rack']='' - data['server'][0]['tag']['user_tag']='' - with open(server_file, 'w') as svf: - json.dump(data, svf) - svf.close() - run('server-manager add server -f %s' % server_file) - else: - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - run('server-manager show server --server_id %s -d > %s' % (server_id, server_file)) - with open(server_file, 'r') as svf: - data=json.load(svf) - svf.close() - data['server'][0]['tag'][tag_index]='' - with open(server_file, 'w') as svf: - json.dump(data, svf) - svf.close() - run('server-manager add server -f %s' % server_file) - return True - #end delete_tag_from_server - - def add_tag_and_verify_server_listing(self, server_list=None, tag_ind=None, tag_val=None): - if (server_list is None) or (tag_ind is None) or (tag_val is None): - self.logger.error("No server_list or tag_ind or tag_val was provided to add_and_list_tag.") - - # Configure tag on servers. - server_id_list = [] - for node in server_list: - server_id_list.append(self.add_tag_to_server(server_ip=node.split('@')[1], - tag_index=tag_ind, tag_value=tag_val)) - # Check listing servers with tag. - with settings(host_string=self.svrmgr, password=self.svrmgr_password, warn_only=True): - no_of_servers=run("server-manager show server --tag %s='%s' | grep id | wc -l" % (tag_ind, tag_val)) - server_ids=run("server-manager show server --tag %s='%s' | grep id" % (tag_ind, tag_val)) - if (len(server_list) != int(no_of_servers)): - self.logger.error("All the nodes with tag %s='%s' were not listed" % (tag_ind, tag_val)) - return False - fail_flag=0 - for server_id in server_id_list: - if server_id in server_ids: - self.logger.info("Server %s listed with tag %s='%s'" % (server_id, tag_ind, tag_val)) - else: - self.logger.error("Server %s not listed with tag %s='%s'" % (server_id, tag_ind, tag_val)) - fail_flag=1 - if fail_flag == 1: - self.logger.error("Test test_list_servers_using_tag FAILED") - return False - return True - #end add_tag_and_verify_server_listing - -# end SmgrFixture - - -def getIp(string) : - regEx = re.compile( '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' ) - result = regEx.search(string) - if result: - return result.group() - else: - return None -#end getIp(string) : - -def verify_sshd(host, user, password): - import paramiko - try: - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect(host, username=user, password=password, timeout=5) - except Exception: - return False - client.close() - return True -#end verify_sshd - diff --git a/fixtures/svc_instance_fixture.py b/fixtures/svc_instance_fixture.py deleted file mode 100644 index 9c63af08b..000000000 --- a/fixtures/svc_instance_fixture.py +++ /dev/null @@ -1,492 +0,0 @@ -import fixtures -from vnc_api.vnc_api import * -from tcutils.util import retry -from time import sleep -from tcutils.services import get_status -try: - from webui_test import * -except ImportError: - pass - - -class SvcInstanceFixture(fixtures.Fixture): - - def __init__(self, connections, inputs, domain_name, project_name, si_name, - svc_template, if_list, left_vn_name=None, right_vn_name=None, do_verify=True, max_inst=1, static_route=['None', 'None', 'None']): - self.vnc_lib = connections.vnc_lib - self.api_s_inspect = connections.api_server_inspect - self.nova_h = connections.nova_h - self.inputs = connections.inputs - self.domain_name = domain_name - self.project_name = project_name - self.si_name = si_name - self.svc_template = svc_template - self.st_name = svc_template.name - self.si_obj = None - self.domain_fq_name = [self.domain_name] - self.project_fq_name = [self.domain_name, self.project_name] - self.si_fq_name = [self.domain_name, self.project_name, self.si_name] - self.logger = inputs.logger - self.left_vn_name = left_vn_name - self.right_vn_name = right_vn_name - self.already_present = False - self.do_verify = do_verify - self.if_list = if_list - self.max_inst = max_inst - self.static_route = static_route - self.si = None - self.svm_ids = [] - self.cs_svc_vns = [] - self.cs_svc_ris = [] - self.svn_list = ['svc-vn-mgmt', 'svc-vn-left', 'svc-vn-right'] - if self.inputs.verify_thru_gui(): - self.browser = connections.browser - self.browser_openstack = connections.browser_openstack - self.webui = WebuiTest(connections, inputs) - # end __init__ - - def setUp(self): - super(SvcInstanceFixture, self).setUp() - self.si_obj = self._create_si() - # end setUp - - def cleanUp(self): - super(SvcInstanceFixture, self).cleanUp() - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - if self.inputs.is_gui_based_config(): - self.webui.delete_svc_instance(self) - else: - self._delete_si() - self.logger.info("Deleted SI %s" % (self.si_fq_name)) - assert self.verify_on_cleanup() - else: - self.logger.info('Skipping deletion of SI %s' % - (self.si_fq_name)) - # end cleanUp - - def _create_si(self): - self.logger.debug("Creating service instance: %s", self.si_fq_name) - try: - svc_instance = self.vnc_lib.service_instance_read( - fq_name=self.si_fq_name) - self.already_present = True - self.logger.debug( - "Service instance: %s already exists", self.si_fq_name) - except NoIdError: - project = self.vnc_lib.project_read(fq_name=self.project_fq_name) - svc_instance = ServiceInstance(self.si_name, parent_obj=project) - if self.left_vn_name and self.right_vn_name: - si_prop = ServiceInstanceType( - left_virtual_network=self.left_vn_name, - right_virtual_network=self.right_vn_name) - bridge = False - if 'bridge_svc_instance_1' in self.si_fq_name: - bridge = True - for itf in self.if_list: - if (itf[0] == 'left' and not bridge): - virtual_network = self.left_vn_name - elif (itf[0] == 'right' and not bridge): - virtual_network = self.right_vn_name - else: - virtual_network = "" - if_type = ServiceInstanceInterfaceType( - virtual_network=virtual_network, - static_routes=RouteTableType([RouteType(prefix=self.static_route[self.if_list.index(itf)])])) - if_type.set_static_routes( - RouteTableType([RouteType(prefix=self.static_route[self.if_list.index(itf)])])) - si_prop.add_interface_list(if_type) - - else: - if self.left_vn_name: - # In Network mode - si_prop = ServiceInstanceType( - left_virtual_network=self.left_vn_name) - intf_count = 1 - virtual_network = self.left_vn_name - else: - # Transparent mode - si_prop = ServiceInstanceType() - intf_count = 1 - virtual_network = "" - if self.svc_template.service_template_properties.service_type == 'firewall': - # Transparent mode firewall - intf_count = 3 - for i in range(intf_count): - if_type = ServiceInstanceInterfaceType( - virtual_network=virtual_network) - si_prop.add_interface_list(if_type) - si_prop.set_scale_out(ServiceScaleOutType(self.max_inst)) - svc_instance.set_service_instance_properties(si_prop) - svc_instance.set_service_template(self.svc_template) - if self.inputs.is_gui_based_config(): - self.webui.create_svc_instance(self) - else: - self.vnc_lib.service_instance_create(svc_instance) - svc_instance = self.vnc_lib.service_instance_read( - fq_name=self.si_fq_name) - return svc_instance - # end _create_si - - def _delete_si(self): - self.logger.debug("Deleting service instance: %s", self.si_fq_name) - self.vnc_lib.service_instance_delete(fq_name=self.si_fq_name) - # end _delete_si - - def verify_si(self): - """check service instance""" - self.project = self.vnc_lib.project_read(fq_name=self.project_fq_name) - try: - self.si = self.vnc_lib.service_instance_read( - fq_name=self.si_fq_name) - self.logger.debug( - "Service instance: %s created succesfully", self.si_fq_name) - except NoIdError: - errmsg = "Service instance: %s not found." % self.si_fq_name - self.logger.warn(errmsg) - return (False, errmsg) - return True, None - - def verify_st(self): - """check service template""" - self.cs_si = self.api_s_inspect.get_cs_si( - project=self.project.name, si=self.si_name, refresh=True) - try: - st_refs = self.cs_si['service-instance']['service_template_refs'] - except KeyError: - st_refs = None - if not st_refs: - errmsg = "No service template refs in SI '%s'" % self.si_name - self.logger.warn(errmsg) - return (False, errmsg) - - st_ref_name = [st_ref['to'][-1] - for st_ref in st_refs if st_ref['to'][-1] == self.st_name] - if not st_ref_name: - errmsg = "SI '%s' has no service template ref to %s" % ( - self.si_name, self.st_name) - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug("SI '%s' has service template ref to %s", - self.si_name, self.st_name) - - return True, None - - @retry(delay=5, tries=5) - def verify_svm(self): - """check Service VM""" - # read again from api in case of retry - self.cs_si = self.api_s_inspect.get_cs_si( - project=self.project.name, si=self.si_name, refresh=True) - try: - self.vm_refs = self.cs_si[ - 'service-instance']['virtual_machine_back_refs'] - except KeyError: - self.vm_refs = None - if not self.vm_refs: - errmsg = "SI %s does not have back refs to Service VM" % self.si_name - self.logger.warn(errmsg) - return (False, errmsg) - - self.logger.debug("SI %s has back refs to Service VM", self.si_name) - self.svm_ids = [vm_ref['to'][0] for vm_ref in self.vm_refs] - for svm_id in self.svm_ids: - cs_svm = self.api_s_inspect.get_cs_vm(vm_id=svm_id, refresh=True) - if not cs_svm: - errmsg = "Service VM for SI '%s' not launched" % self.si_name - self.logger.warn(errmsg) - #self.logger.debug("Service monitor status: %s", get_status('contrail-svc-monitor')) - return (False, errmsg) - self.logger.debug("Serivce VM for SI '%s' is launched", self.si_name) - return True, None - - def svm_compute_node_ip(self): - admin_project_uuid = self.api_s_inspect.get_cs_project(project=self.project.name)['project'][ - 'uuid'] - #svm_name = self.si_name + str('_1') - #svm_name = self.si_obj.uuid + str('__1') - svm_name = self.si_obj.name + str('__1') - # handle change in to ____ - svm_name = self.inputs.domain_name + '__' + \ - self.inputs.project_name + '__' + svm_name - svm_obj = self.nova_h.get_vm_if_present( - svm_name, admin_project_uuid) - svm_compute_node_ip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(svm_obj)]['host_ip'] - return svm_compute_node_ip - - @retry(delay=1, tries=5) - def verify_interface_props(self): - """check if properties""" - try: - vm_if_props = self.svc_vm_if[ - 'virtual-machine-interface']['virtual_machine_interface_properties'] - except KeyError: - vm_if_props = None - if not vm_if_props: - errmsg = "No VM interface in Service VM of SI %s" % self.si_name - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug( - "VM interface present in Service VM of SI %s", self.si_name) - - self.if_type = vm_if_props['service_interface_type'] - if (not self.if_type and self.if_type not in self.if_list): - errmsg = "Interface type '%s' is not present in Servcice VM of SI '%s'" % ( - self.if_type, self.si_name) - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug( - "Interface type '%s' is present in Service VM of SI '%s'", self.if_type, self.si_name) - return True, None - - @retry(delay=1, tries=5) - def verify_vn_links(self): - """check vn links""" - try: - vn_refs = self.svc_vm_if[ - 'virtual-machine-interface']['virtual_network_refs'] - except KeyError: - vn_refs = None - if not vn_refs: - errmsg = "IF %s has no back refs to vn" % self.if_type - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug("IF %s has back refs to vn", self.if_type) - for vn in vn_refs: - self.svc_vn = self.api_s_inspect.get_cs_vn( - # project=self.project.name, vn=vn['to'][-1], refresh=True) - project=vn['to'][1], vn=vn['to'][-1], refresh=True) - if not self.svc_vn: - errmsg = "IF %s has no vn" % self.if_type - self.logger.warn(errmsg) - return (False, errmsg) - if self.svc_vn['virtual-network']['name'] in self.svn_list: - self.cs_svc_vns.append(vn['to'][-1]) - self.logger.info('SVC_VNs = %s' % self.cs_svc_vns) - self.logger.debug("IF %s has vn '%s'", self.if_type, - self.svc_vn['virtual-network']['name']) - return True, None - - @retry(delay=1, tries=5) - def verify_ri(self): - """check routing instance""" - try: - ri_refs = self.svc_vm_if[ - 'virtual-machine-interface']['routing_instance_refs'] - except KeyError: - ri_refs = None - vn_name = self.svc_vn['virtual-network']['name'] - if not ri_refs: - errmsg = "IF %s, VN %s has no back refs to routing instance" % ( - self.if_type, vn_name) - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug( - "IF %s, VN %s has back refs to routing instance", self.if_type, vn_name) - - for ri in ri_refs: - svc_ri = self.api_s_inspect.get_cs_ri_by_id(ri['uuid']) - if not svc_ri: - errmsg = "IF %s VN %s has no RI" % (self.if_type, vn_name) - self.logger.warn(errmsg) - return (False, errmsg) - if svc_ri['routing-instance']['name'] in self.svn_list: - self.cs_svc_ris.append(ri['uuid']) - ri_name = svc_ri['routing-instance']['name'] - self.logger.debug("IF %s VN %s has RI", self.if_type, vn_name) - if ri_name == vn_name: - continue - else: - if not ri['attr']: - errmsg = "IF %s VN %s RI %s no attributes" % ( - self.if_type, vn_name, ri_name) - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug("IF %s VN %s RI %s has attributes", - self.if_type, vn_name, ri_name) - # check service chain - sc_info = svc_ri[ - 'routing-instance']['service_chain_information'] - if not sc_info: - errmsg = "IF %s VN %s RI %s has no SCINFO" % ( - self.if_type, vn_name, ri_name) - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug("IF %s VN %s RI %s has SCINFO", - self.if_type, vn_name, ri_name) - return True, None - - @retry(delay=2, tries=10) - def verify_svm_interface(self): - # check VM interfaces - for svm_id in self.svm_ids: - cs_svm = self.api_s_inspect.get_cs_vm(vm_id=svm_id, refresh=True) - svm_ifs = (cs_svm['virtual-machine'].get('virtual_machine_interfaces') or - cs_svm['virtual-machine'].get('virtual_machine_interface_back_refs')) - - if svm_ifs is None: - errmsg = "Service VM hasn't come up." - self.logger.warn(errmsg) - return False, errmsg - - elif len(svm_ifs) != len(self.if_list): - errmsg = "Service VM dosen't have all the interfaces %s" % self.if_list - self.logger.warn(errmsg) - return False, errmsg - - svc_vm_if = self.api_s_inspect.get_cs_vmi_of_vm(svm_id, refresh=True) - for self.svc_vm_if in svc_vm_if: - result, msg = self.verify_interface_props() - if not result: - return result, msg - - result, msg = self.verify_vn_links() - if not result: - return result, msg - - result, msg = self.verify_ri() - if not result: - return result, msg - return True, None - - def verify_on_setup(self, report=True): - if report: - self.report(self.verify_si()) - self.report(self.verify_st()) - self.report(self.verify_svm()) - self.report(self.verify_svm_interface()) - else: - # Need verifications to be run without asserting so that they can - # retried to wait for instances to come up - result = True - msg = "" - result1, msg1 = self.verify_si() - if not result1: - result = False - msg = msg + msg1 - result1, msg1 = self.verify_st() - if not result1: - result = False - msg = msg + msg1 - result1, msg1 = self.verify_svm() - if not result1: - result = False - msg = msg + msg1 - else: - # verification has dependency on verify_svm - result1, msg1 = self.verify_svm_interface() - if not result1: - result = False - msg = msg + msg1 - return result, msg - - return True, None - # end verify_on_setup - - def report(self, result): - if type(result) is tuple: - result, errmsg = result - if not result: - assert False, errmsg - - @retry(delay=2, tries=15) - def verify_si_not_in_api_server(self): - if not self.si: - return (True, None) - si = self.api_s_inspect.get_cs_si( - project=self.project.name, si=self.si_name, refresh=True) - if si: - errmsg = "Service instance %s not removed from api server" % self.si_name - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug("Service instance %s removed from api server" % - self.si_name) - return (True, None) - - @retry(delay=5, tries=20) - def verify_svm_not_in_api_server(self): - for svm_id in self.svm_ids: - cs_svm = self.api_s_inspect.get_cs_vm(vm_id=svm_id, refresh=True) - if cs_svm: - errmsg = "Service VM for SI '%s' not deleted" % self.si_name - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug("Serivce VM for SI '%s' is deleted", self.si_name) - return (True, None) - - def si_exists(self): - svc_instances = self.vnc_lib.service_instances_list()[ - 'service-instances'] - self.logger.info("%s svc intances found in all projects. They are %s" % ( - len(svc_instances), svc_instances)) - # Filter SI's in current project as the above list call gives SIs in - # all projects - project_si_list = [] - for x in svc_instances: - proj_of_x = [x['fq_name'][0], x['fq_name'][1]] - if proj_of_x == self.project_fq_name: - project_si_list.append(x) - self.logger.info("%s svc intances found in current project. They are %s" % ( - len(project_si_list), project_si_list)) - if (len(project_si_list) == 0 and len(svc_instances) == 0): - return False - else: - return True - - @retry(delay=2, tries=30) - def verify_svn_not_in_api_server(self): - if self.si_exists(): - self.logger.info( - "Some Service Instance exists; skip SVN check in API server") - return (True, None) - for vn in self.cs_svc_vns: - svc_vn = self.api_s_inspect.get_cs_vn( - project=self.project.name, vn=vn, refresh=True) - self.logger.info('Service VN %s seen' % svc_vn) - # We will not worry about the Service-VNs not generated via - # fixtures - if (svc_vn and (svc_vn not in self.svn_list)): - errmsg = "Service VN %s is not removed from api server" % vn - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug("Service VN %s is removed from api server", vn) - return (True, None) - - @retry(delay=2, tries=15) - def verify_ri_not_in_api_server(self): - if self.si_exists(): - self.logger.info( - "Some Service Instance exists; skip RI check in API server") - return (True, None) - for ri in self.cs_svc_ris: - svc_ri = self.api_s_inspect.get_cs_ri_by_id(ri) - if svc_ri: - errmsg = "RI %s is not removed from api server" % ri - self.logger.warn(errmsg) - return (False, errmsg) - self.logger.debug("RI %s is removed from api server", ri) - return (True, None) - - def verify_on_cleanup(self): - result = True - result, msg = self.verify_si_not_in_api_server() - assert result, msg - result, msg = self.verify_svm_not_in_api_server() - assert result, msg - if self.do_verify: - result, msg = self.verify_svn_not_in_api_server() - assert result, msg - result, msg = self.verify_ri_not_in_api_server() - assert result, msg - - return result - # end verify_on_cleanup - -# end SvcInstanceFixture diff --git a/fixtures/svc_template_fixture.py b/fixtures/svc_template_fixture.py deleted file mode 100644 index af1b023e2..000000000 --- a/fixtures/svc_template_fixture.py +++ /dev/null @@ -1,125 +0,0 @@ -import fixtures -from vnc_api.vnc_api import * -from tcutils.util import retry -try: - from webui_test import * -except ImportError: - pass - -class SvcTemplateFixture(fixtures.Fixture): - - def __init__(self, connections, inputs, domain_name, st_name, svc_img_name, - svc_type, if_list, svc_scaling, ordered_interfaces, svc_mode='transparent', flavor='contrail_flavor_2cpu'): - self.nova_h = connections.nova_h - self.vnc_lib_h = connections.vnc_lib - self.domain_name = domain_name - self.st_name = st_name - self.st_obj = None - self.domain_fq_name = [self.domain_name] - self.st_fq_name = [self.domain_name, self.st_name] - self.image_name = svc_img_name - self.nova_h.get_image(self.image_name) - self.svc_type = svc_type - self.if_list = if_list - self.svc_mode = svc_mode - self.svc_scaling = svc_scaling - self.ordered_interfaces = ordered_interfaces - self.flavor = flavor - self.logger = inputs.logger - self.inputs = inputs - self.connections = connections - self.nova_h = connections.nova_h - if self.inputs.verify_thru_gui(): - self.browser = connections.browser - self.browser_openstack = connections.browser_openstack - self.webui = WebuiTest(connections, inputs) - # end __init__ - - def setUp(self): - super(SvcTemplateFixture, self).setUp() - self.st_obj = self._create_st() - # end setUp - - def cleanUp(self): - super(SvcTemplateFixture, self).cleanUp() - if self.inputs.is_gui_based_config(): - self.webui.delete_svc_template(self) - else: - self._delete_st() - assert self.verify_on_cleanup() - # end cleanUp - - def _create_st(self): - self.logger.debug("Creating service template: %s", self.st_fq_name) - try: - svc_template = self.vnc_lib_h.service_template_read( - fq_name=self.st_fq_name) - self.logger.debug( - "Service template: %s already exists", self.st_fq_name) - except NoIdError: - domain = self.vnc_lib_h.domain_read(fq_name=self.domain_fq_name) - svc_template = ServiceTemplate( - name=self.st_name, parent_obj=domain) - svc_properties = ServiceTemplateType() - svc_properties.set_image_name(self.image_name) - svc_properties.set_service_type(self.svc_type) - svc_properties.set_service_mode(self.svc_mode) - svc_properties.set_service_scaling(self.svc_scaling) - # Add flavor if not already added - self.nova_h.get_flavor(self.flavor) - svc_properties.set_flavor(self.flavor) - svc_properties.set_ordered_interfaces(self.ordered_interfaces) - for itf in self.if_list: - if_type = ServiceTemplateInterfaceType( - service_interface_type=itf[0], shared_ip=itf[1], static_route_enable=itf[2]) - if_type.set_service_interface_type(itf[0]) - svc_properties.add_interface_type(if_type) - - svc_template.set_service_template_properties(svc_properties) - if self.inputs.is_gui_based_config(): - self.webui.create_svc_template(self) - else: - self.vnc_lib_h.service_template_create(svc_template) - svc_template = self.vnc_lib_h.service_template_read( - fq_name=self.st_fq_name) - - return svc_template - # end _create_st - - def _delete_st(self): - self.logger.debug("Deleting service template: %s", self.st_fq_name) - self.vnc_lib_h.service_template_delete(fq_name=self.st_fq_name) - # end _delete_st - - def verify_on_setup(self): - result = True - try: - svc_template = self.vnc_lib_h.service_template_read( - fq_name=self.st_fq_name) - self.logger.debug( - "Service template: %s created succesfully", self.st_fq_name) - except NoIdError: - self.logger.error("Service template: %s not created." % - self.st_fq_name) - result = result and False - return False - return result - # end verify_on_setup - - @retry(delay=5, tries=6) - def verify_on_cleanup(self): - result = True - try: - svc_template = self.vnc_lib_h.service_instance_read( - fq_name=self.st_fq_name) - self.logger.debug( - "Service template: %s still not removed", self.st_fq_name) - result = result and False - return False - except NoIdError: - self.logger.info("Service template: %s deleted successfully." % - self.st_fq_name) - return result - # end verify_on_cleanup - -# end SvcTemplateFixture diff --git a/fixtures/tor_fixture.py b/fixtures/tor_fixture.py deleted file mode 100644 index 527c629bc..000000000 --- a/fixtures/tor_fixture.py +++ /dev/null @@ -1,559 +0,0 @@ -import os -from netaddr import * -import abc -import time -import re - -from jnpr.junos.exception import * -from fabric.operations import get, put, run, local -from fabric.context_managers import settings - -import vnc_api_test -from pif_fixture import PhysicalInterfaceFixture -import physical_device_fixture -from tcutils.util import retry - - -class AbstractToR(object): - ''' Abstract ToR Switch - ''' - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def __init__(self, *args, **kwargs): - pass - - @abc.abstractmethod - def restart_ovs(self, *args, **kwargs): - pass - -# end AbstractToR - - - -class ToRFixture(physical_device_fixture.PhysicalDeviceFixture): - - '''Fixture to manage Physical Switch objects - - Mandatory: - :param name : name of the device - :param mgmt_ip : Management IP - - Optional: - :param vendor : juniper/openvswitch - :param model : optional ,ex : qfx5100 - :param ssh_username : Login username to ssh, default is root - :param ssh_password : Login password, default is Embe1mpls - :tunnel_ip : Tunnel IP (for vtep) - :ports : List of Ports which are available to use - :param tor_ovs_port - :param controller_ip : vip to which tor connects to in case of HA mode - :param tor_ovs_protocol : pssl/tcp - :param priv_key_file : private key file (SSL). Default is - contrail-test/tools/tor/sc-privkey.pem - :param cert_privkey_file : Cert for private key file. Default is - contrail-test/tools/tor/sc-cert.pem - - Inherited optional parameters: - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - :param logger : logger object - ''' - - def __init__(self, *args, **kwargs): - super(ToRFixture, self).__init__(*args, **kwargs) - self.vendor = kwargs.get('vendor', 'juniper') - self.model = kwargs.get('model', None) - self.tunnel_ip = kwargs.get('tunnel_ip', self.mgmt_ip) - self.controller_ip = kwargs.get('controller_ip', None) - self.tor_ovs_port = kwargs.get('tor_ovs_port', '6632') - self.tor_ovs_protocol = kwargs.get('tor_ovs_protocol', 'pssl') - self.ports = kwargs.get('ports', []) - - # Required for SSL connections - pwd = os.getcwd() - self.priv_key_file = kwargs.get('priv_key_file', - '%s/tools/tor/sc-privkey.pem' % (pwd)) - self.cert_privkey_file = kwargs.get('cert_privkey_file', - '%s/tools/tor/sc-cert.pem' % (pwd)) - - self.bgp_router = None - - # end __init__ - - def setUp(self): - super(ToRFixture, self).setUp() - self.tor_session = self.get_connection_obj(self.vendor, - host=self.mgmt_ip, - username=self.ssh_username, - password=self.ssh_password, - logger=self.logger) - - def cleanUp(self): - super(ToRFixture, self).cleanUp() - - def restart_ovs(self, *args, **kwargs): - pass - - def get_remote_flood_vtep(self, vn_uuid=None): - pass - - def get_tor_agents_details(self): - return self.device_details['tor_agent_dicts'] - - def get_active_tor_agent_ip(self, key='ip'): - ''' Currently TSN and Tor-agent are supposed to have a 1:1 - relationship - Get the details from any of the logical switches on the TOR - - Returns mgmt ip by default - key is host_control_ip for control ip - ''' - # TODO - active_tsn_ip = self.get_remote_flood_vtep() - return self.inputs.host_data[active_tsn_ip][key] - # end get_active_tor_agent_ip - - - def get_backup_tor_agent_ip(self, key='ip'): - ''' There are only two tsns/TAs possible to be mapped to a TOR - - Returns mgmt ip by default - key is host_control_ip for control ip - ''' - active_tsn_ip = self.get_remote_flood_vtep() - if active_tsn_ip == self.device_details['tor_tsn_ips'][0]: - index = 1 - if active_tsn_ip == self.device_details['tor_tsn_ips'][1]: - index = 0 - backup_tsn_ip = self.device_details['tor_tsn_ips'][index] - return self.inputs.host_data[backup_tsn_ip][key] - # end get_backup_tor_agent_ip - - def restart_backup_tor_agent(self): - ''' the tor_agent strings here are of the format - root@10.204.216.51:3 - ''' - active_ta = self.get_active_tor_agent_ip() - tor_agents = self.device_details['tor_agents'] - for tor_agent in tor_agents: - if active_ta not in tor_agent: - ta_id = tor_agent.split('@')[1].split(':')[1] - ta_ip = tor_agent.split('@')[1].split(':')[0] - self.inputs.restart_service('contrail-tor-agent-%s' % (ta_id), - [ta_ip]) - time.sleep(20) - # end restart_backup_tor_agent - - def clear_mac(self, vn_uuid, mac_address): - pass - - def get_other_tor_agent(self, tor_agent_ip): - pass - - def stop_active_tor_agent(self): - pass - - def start_active_tor_agent(self): - pass - -# end ToRFixture - -class QFXFixture(ToRFixture, AbstractToR): - def __init__(self, *args, **kwargs): - super(QFXFixture, self).__init__(*args, **kwargs) - self.bringup = kwargs.get('bringup', False) - self.model = kwargs.get('model', 'qfx5100') - - def setUp(self): - super(QFXFixture, self).setUp() - if self.bringup: - if self.tor_ovs_protocol == 'pssl': - self._copy_certs_to_switch() - self.config_ovsdb() - - def cleanUp(self): - super(QFXFixture, self).cleanUp() - if self.bringup: - self.remove_ovsdb() - - def _copy_certs_to_switch(self): - pwd = os.getcwd() - with settings(host_string='%s@%s' % (self.ssh_username, self.mgmt_ip), - password=self.ssh_password, shell='/bin/sh -c') : - put(self.priv_key_file, '/var/db/certs/vtep-privkey.pem') - put(self.cert_privkey_file, '/var/db/certs/vtep-cert.pem') - run('rm -f /var/db/certs/ca-cert.pem') - - def _delete_ovsdb_config(self): - stmts = [] - stmts.append('delete protocols ovsdb') - try: - self.tor_session.config(stmts, ignore_errors=True) - except ConfigLoadError,e: - self.logger.debug('No ovsdb config present to delete..ok') - - def config_ovsdb(self): - stmts = [] - - # Delete all ovsdb config first - self._delete_ovsdb_config() - if 'pssl' in self.tor_ovs_protocol: - stmts.append('set protocols ovsdb controller %s protocol ssl port '\ - '%s' % (self.controller_ip, self.tor_ovs_port)) - else: - stmts.append('set protocols ovsdb passive-connection protocol tcp' - ' port %s' % (self.tor_ovs_port)) - for port in self.ports: - stmts.append('set protocols ovsdb interfaces %s' % (port)) - stmts.append('set protocols ovsdb traceoptions file ovsdb.log') - self.logger.debug('Configuring QFX : ' % (stmts)) - self.tor_session.config(stmts) - #self.restart_ovs() - - def remove_ovsdb(self): - stmts = ['delete protocols ovsdb'] - self.logger.debug('Configuring QFX : ' % (stmts)) - self.tor_session.config(stmts) - # end remove_ovsdb - - def restart_ovs(self, *args, **kwargs): - ''' Ex : ovsdb-server, virtual-tunnel-end-point-management - - setting all_ovs to True will restart both the above procs - ''' - all_ovs_procs = ['ovsdb-server', - 'virtual-tunnel-end-point-management'] - if args: - procs_to_restart = args - else: - procs_to_restart = all_ovs_procs - for proc in procs_to_restart: - self.tor_session.restart(proc) - # end restart_ovs - - @retry(delay=3, tries=5) - def is_logical_switch_present(self, vn_id, expectation=True): - handle = self.tor_session.handle - xml_resp = handle.rpc.get_ovsdb_logical_switch_information() - ls_list = [x.text for x in xml_resp.findall( - 'logical-switch/logical-switch-name')] - if 'Contrail-%s' % (vn_id) in ls_list: - self.logger.debug('Logical switch for VN %s seen on ToR' % ( - vn_id)) - else: - self.logger.debug('Logical switch for VN %s not seen on ToR' % ( - vn_id)) - result = 'Contrail-%s' % (vn_id) in ls_list - return (result == expectation) - # end is_logical_switch_present - - def get_remote_flood_vtep(self, vn_uuid=None): - ''' - Returns the current flood vtep on the TOR - If VN UUID is passed, lookup is done for the corresponding logical - switch. - If VN UUID is not passed, the first logical switch seen is picked - ''' - if vn_uuid: - logical_switch = 'Contrail-%s' % (vn_uuid) - else: - logical_switch = None - handle = self.tor_session.handle - if logical_switch: - xml_resp = handle.rpc.get_ovsdb_mac_routes_information(remote=True, - logical_switch=logical_switch) - else: - xml_resp = handle.rpc.get_ovsdb_mac_routes_information(remote=True) - entries = xml_resp.findall('vtep-mac-routes') - for entry in entries: - mac = entry.find('mac').text - vtep_ip = entry.find('vtep-address').text - if mac == 'ff:ff:ff:ff:ff:ff': - return vtep_ip - # end get_remote_flood_vtep - - def clear_mac(self, vn_uuid, mac_address): - handle = self.tor_session.handle - vlan_name = 'Contrail-%s' % (vn_uuid) - handle.rpc.clear_ethernet_switching_table(address=mac_address, - vlan_name=vlan_name) - self.logger.info('Cleared MAC %s in MAC table of %s on %s' % ( - mac_address, vlan_name, self.name)) - # Workaround to avoid backup having the cleaned up mac for 5 min - # Restart the backup tor-agent - self.restart_backup_tor_agent() - # end clear_mac - - # TODO - # Enable this method once a clean solution is found for openvswitch also -# def get_vxlan_id_on_tor(self, vn_id): -# vn_ls_name = 'Contrail-%s' % (vn_id) -# handle = self.tor.session.handle -# xml_resp = handle.rpc.get_ovsdb_logical_switch_information() -# ls_xml = [x for xml_resp.findall('logical-switch')] -# for ls in ls_xml: -# ls_name = ls.findtext('logical-switch-name') -# if ls_name == vn_ls_name: -# tunnel_key = ls.findtext('tunnel-key') -# return tunnel_key -# return None -# # end get_vxlan_id_on_tor - - -class OpenVSwitchFixture(ToRFixture, AbstractToR): - ''' - The openvswitch node should be running Ubuntu 14.04 atleast - ssh_username should be a sudo user (no password prompt) - - Optional: - :param bringup : Bringup a new openvswitch. Default is False - Setup is assumed to be brought up earlier - - ''' - def __init__(self, *args, **kwargs): - super(OpenVSwitchFixture, self).__init__(*args, **kwargs) - self.bringup = kwargs.get('bringup', False) - self.timeout = kwargs.get('timeout', '200') - - self.common_cmd_str = 'timeout %s bash -x contrail-ovs-tool.sh --name %s '\ - '-t %s ' % (self.timeout, self.name, self.tunnel_ip) - if self.tor_ovs_protocol == 'pssl': - self.remote = ' -r ssl:%s:%s ' % (self.controller_ip, self.tor_ovs_port) - else: - self.remote = ' -r ptcp:%s ' % (self.tor_ovs_port) - self.common_cmd_str += '%s' % (self.remote) - pwd = os.getcwd() - self.cacert_file = '/tmp/%s-cacert.pem' % (self.name) - - def setUp(self): - super(OpenVSwitchFixture, self).setUp() - self._copy_tool_to_ovs_node() - if self.bringup: - self.config_ovsdb() - - def cleanUp(self): - super(OpenVSwitchFixture, self).cleanUp() - - # Workaround for issue in ovs-vtep where the local-macs - # are not deleted on deleting the bindings - # We manually go ahead and clear the local-macs - # so that tor-agent deletes the logical switch - self.delete_all_local_macs() - - if self.bringup: - self.delete_ports() - self.remove_ovsdb() - - def delete_all_local_macs(self): - ls_list = self.get_all_logical_switches() - for ls in ls_list: - self.vtep_ctl('clear-local-macs %s' % (ls)) - - def ovs_vsctl(self, args): - if self.tor_session.exists('/var/run/openvswitch/db-%s.sock ' % ( - self.name)): - prefix = '--db=unix:/var/run/openvswitch/db-%s.sock ' % (self.name) - else: - prefix = '' - args = prefix + args - output = self.tor_session.run_cmd(['ovs-vsctl %s' % (args)]) - return output[0] - # end ovs_vsctl - - def vtep_ctl(self, args): - if self.tor_session.exists('/var/run/openvswitch/db-%s.sock ' % ( - self.name)): - prefix = '--db=unix:/var/run/openvswitch/db-%s.sock ' % (self.name) - else: - prefix = '' - args = prefix + args - - output = self.tor_session.run_cmd(['vtep-ctl %s' % (args)]) - return output[0] - # end vtep_ctl - - def _copy_tool_to_ovs_node(self): - ''' - Copies the tool contrail-ovs-tool.sh to the openvswitch node - Std ovs-vtep does not have a way to work with non-default db - So copy the patched ovs-vtep to the node - ''' - pwd = os.getcwd() - with settings(host_string='%s@%s' % (self.ssh_username, self.mgmt_ip), - password=self.ssh_password): - put('%s/tools/tor/contrail-ovs-tool.sh' % (pwd)) - put('%s/tools/tor/ovs-vtep' % (pwd), - '/usr/share/openvswitch/scripts/ovs-vtep') - if self.tor_ovs_protocol == 'pssl': - self.remote_home = run('pwd') - put(self.priv_key_file) - put(self.cert_privkey_file) - run('rm -f %s' % (self.cacert_file)) - - self.ssl_args = ' -p %s/sc-privkey.pem -c %s/sc-cert.pem '\ - '-b %s ' % (self.remote_home, self.remote_home, - self.cacert_file) - self.common_cmd_str += '%s' % (self.ssl_args) - - def _run_ovs_tool_cmd(self, cmd): - output = self.tor_session.run_cmd([cmd], as_sudo=True) - if output[0].failed: - self.logger.error('Ovs tool cmd %s on node %s failed! ' % ( - cmd, self.mgmt_ip)) - else: - self.logger.debug('Started ovs tool with cmd : %s' % ( - cmd)) - # end _run_ovs_tool_cmd - - def config_ovsdb(self): - start_cmd = self.common_cmd_str + ' -T init' - self._run_ovs_tool_cmd(start_cmd) - self.add_ports() - # end config_ovsdb - - def remove_ovsdb(self): - stop_cmd = self.common_cmd_str + ' -T stop' - self._run_ovs_tool_cmd(stop_cmd) - - def stop_ovsdb(self): - return self.remove_ovsdb() - - def restart_ovs(self): - restart_cmd = self.common_cmd_str + ' -T restart' - self._run_ovs_tool_cmd(restart_cmd) - - def start_ovs(self): - stop_cmd = self.common_cmd_str + ' -T start' - self._run_ovs_tool_cmd(start_cmd) - - def delete_ports(self, ports=[]): - if ports: - ports_to_delete = ports - else: - ports_to_delete = self.ports - socket = '--db unix:/var/run/openvswitch/db-%s.sock ' % (self.name) - for port in ports_to_delete: - cmds = [ - 'ovs-vsctl %s del-port %s %s' % (socket, self.name, port), - 'ip link delete %s ' % (port), - ] - self.run_cmd(cmds) - # end delete_ports - - def add_ports(self, ports=[]): - if ports: - ports_to_add = ports - else: - ports_to_add = self.ports - for port in ports_to_add: - hostport = 'host%s' % (port) - socket = '--db unix:/var/run/openvswitch/db-%s.sock ' % (self.name) - cmds = [ - 'ip link delete %s || echo ok ' % (port), - 'ip link add %s type veth peer name %s' % ( - hostport, port), - 'ovs-vsctl %s add-port %s %s' % (socket, self.name, port), - 'ifconfig %s up' % (port), - ] - self.tor_session.run_cmd(cmds, as_sudo=True) - # end add_ports - - @retry(delay=3, tries=5) - def is_logical_switch_present(self, vn_id, expectation=True): - output = self.vtep_ctl('list-ls') - result = 'Contrail-%s' % (vn_id) in output - return (result == expectation) - # end is_logical_switch_present - - def get_any_logical_switch_present(self): - output = self.vtep_ctl('list-ls| head -1') - return output - # end get_any_logical_switch_present - - def get_all_logical_switches(self): - output = self.vtep_ctl('list-ls') - if not output: - return [] - lines = output.split('\n') - return lines - - def get_remote_flood_vtep(self, vn_uuid=None): - ''' - Returns the current flood vtep on the TOR - If VN UUID is passed, lookup is done for the corresponding logical - switch. - If VN UUID is not passed, the first logical switch seen is picked - ''' - if vn_uuid: - logical_switch = 'Contrail-%s' % (vn_uuid) - else: - logical_switch = self.get_any_logical_switch_present() - output = self.vtep_ctl('list-remote-macs %s| grep unknown-dst' % (logical_switch)) - match = re.search('vxlan_over_ipv4/(.*)', output) - if match: - ip = match.group(1) - return ip - # end get_remote_flood_vtep - - def clear_mac(self, vn_uuid, mac_address): - logical_switch = 'Contrail-%s' % (vn_uuid) - self.vtep_ctl('del-ucast-local %s %s' % (logical_switch, mac_address)) - - self.logger.info('Cleared MAC %s in MAC table of %s on %s' % ( - mac_address, logical_switch, self.name)) - # Workaround to avoid backup having the cleaned up mac for 5 min - # Restart the backup tor-agent - self.restart_backup_tor_agent() - # Ovs-vtep doesnt learn MACs automatically after clearing MAC - # Restart ovs-vtep for now - self.restart_ovs() - # end clear_mac - - -class ToRFixtureFactory(object): - ''' Factory for ToR classes - ''' - __tor_classes = { - "juniper": QFXFixture, - "openvswitch": OpenVSwitchFixture, - } - - @staticmethod - def get_tor(*args, **kwargs): - vendor = kwargs.get('vendor', 'juniper') - tor_class = ToRFixtureFactory.__tor_classes.get( - vendor.lower(), None) - - if tor_class: - return tor_class(*args, **kwargs) - raise NotImplementedError("The requested ToR has not been implemented") - - -# end ToRFixtureFactory -if __name__ == "__main__": - ovs_fix = ToRFixtureFactory.get_tor( 'bng-contrail-qfx51-1', '10.204.218.10', vendor='juniper', ssh_username='root', ssh_password='c0ntrail123', - tunnel_ip='99.99.99.99', ports=['ge-0/0/0'], tor_ovs_port='9999', tor_ovs_protocol='pssl', controller_ip='10.204.216.184') - ovs_fix.setUp() - ovs_fix._copy_certs_to_switch() - ovs_fix.config_ovsdb() - #ovs_fix.restart_ovs() - #ovs_fix.remove_ovsdb() - - ovs_fix1 = ToRFixtureFactory.get_tor( 'br0', '10.204.216.195', vendor='openvswitch', ssh_username='root', ssh_password='c0ntrail123', - tunnel_ip='10.204.216.195', ports=['torport1'], tor_ovs_port='6632', tor_ovs_protocol='pssl', controller_ip='10.204.216.184') - ovs_fix1.setUp() - ovs_fix1.config_ovsdb() - - #ovs_fix2 = ToRFixtureFactory.get_tor( 'br1', '10.204.216.195', vendor='openvswitch', ssh_username='root', ssh_password='c0ntrail123', - # tunnel_ip='10.204.216.195', ports=['torport2'], tor_ovs_port='6633', tor_ovs_protocol='pssl', controller_ip='10.204.216.184') - #ovs_fix2.setUp() - #ovs_fix2.config_ovsdb() - - pass diff --git a/fixtures/traffic_tests.py b/fixtures/traffic_tests.py deleted file mode 100755 index 698d7b7e5..000000000 --- a/fixtures/traffic_tests.py +++ /dev/null @@ -1,348 +0,0 @@ -import sys -import os -import fixtures -#from common.contrail_test_init import ContrailTestInit -from nova_test import * -from common.connections import ContrailConnections -sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) -from traffic.core.stream import Stream -from traffic.core.profile import create, ContinuousProfile, ContinuousSportRange -from traffic.core.helpers import Host -from traffic.core.helpers import Sender, Receiver - -class trafficTestFixture(fixtures.Fixture): - - def __init__(self, connections): - self.connections = connections - self.inputs = self.connections.inputs - self.nova_h = self.connections.nova_h - self.logger = self.inputs.logger - # end __init__ - - def setUp(self): - super(trafficTestFixture, self).setUp() - # end setUp - - def startTraffic( - self, name='stream', num_streams=1, start_port=9100, tx_vm_fixture=None, - rx_vm_fixture=None, stream_proto='udp', vm_fip_info=None, - packet_size=100, cfg_profile='ContinuousProfile', start_sport=8000, - total_single_instance_streams=20, chksum=False, pps=100, fip=None, - tx_vn_fixture=None, rx_vn_fixture=None, af=None): - ''' Start traffic based on inputs given.. - Return {'status': True, 'msg': None} if traffic started successfully..else return {'status': False, 'msg': err_msg}.. - Details on inputs: - name : Stream identifier; num_streams : number of separate sendpkts instance streams [will take more memory] - start_port : Destination start port if num_streams is used - tx_vm_fixture & rx_vm_fixture : Needed for vm_ip and vm_mdata_ip [to access vm from compute] - stream_proto : TCP, UDP or ICMP; packet_size : if custom size if needed - start_sport : if ContinuousSportRange is used, only supports UDP, starting number for source port - total_single_instance_streams : if ContinuousSportRange is used, specify number of streams - pps :Number of packets to launch per sec - ContinuousSportRange launches n streams @defined pps, with one instance of sendpkts.. - ''' - self.logger.info("startTraffic data: name- %s, stream_proto-%s, packet_size-%s, total_single_instance_streams-%s, chksum-%s, pps-%s" - % (name, stream_proto, packet_size, total_single_instance_streams, chksum, pps)) - status = True - msg = None - self.packet_size = packet_size - self.chksum = chksum - self.start_port = start_port - self.start_sport = start_sport - self.endport = start_sport + total_single_instance_streams - self.total_single_instance_streams = total_single_instance_streams - self.tx_vm_fixture = tx_vm_fixture - self.rx_vm_fixture = rx_vm_fixture - tx_vn_fq_name = tx_vn_fixture.get_vn_fq_name() if tx_vn_fixture else None - rx_vn_fq_name = rx_vn_fixture.get_vn_fq_name() if rx_vn_fixture else None - af = af if af is not None else self.inputs.get_af() - self.stream_proto = stream_proto - self.vm_fip_info = vm_fip_info - self.traffic_fip = False - if self.vm_fip_info == None: - self.traffic_fip = False - else: - self.traffic_fip = True - if not self.traffic_fip: - self.tx_vm_node_ip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(self.tx_vm_fixture.vm_obj)]['host_ip'] - self.rx_vm_node_ip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(self.rx_vm_fixture.vm_obj)]['host_ip'] - self.tx_local_host = Host( - self.tx_vm_node_ip, - self.inputs.host_data[self.tx_vm_node_ip]['username'], - self.inputs.host_data[self.tx_vm_node_ip]['password']) - self.rx_local_host = Host( - self.rx_vm_node_ip, - self.inputs.host_data[self.rx_vm_node_ip]['username'], - self.inputs.host_data[self.rx_vm_node_ip]['password']) - self.send_host = Host(self.tx_vm_fixture.local_ip, - self.tx_vm_fixture.vm_username, self.tx_vm_fixture.vm_password) - self.recv_host = Host(self.rx_vm_fixture.local_ip, - self.rx_vm_fixture.vm_username, self.rx_vm_fixture.vm_password) - else: - self.tx_vm_node_ip = None - self.rx_vm_node_ip = None - self.tx_local_host = Host( - self.inputs.cfgm_ip, - self.inputs.host_data[self.tx_vm_node_ip]['username'], - self.inputs.host_data[self.tx_vm_node_ip]['password']) - self.rx_local_host = Host( - self.inputs.cfgm_ip, - self.inputs.host_data[self.rx_vm_node_ip]['username'], - self.inputs.host_data[self.rx_vm_node_ip]['password']) - self.send_host = Host(self.vm_fip_info[self.tx_vm_fixture.vm_name]) - self.recv_host = Host(self.vm_fip_info[self.rx_vm_fixture.vm_name]) - self.sender = list() - self.receiver = list() - self.num_streams = 0 - - if fip is None: - self.dst_ips = list(); self.src_ips = list() - if af == 'dual' or af == 'v4': - self.src_ips.extend(self.tx_vm_fixture.get_vm_ips( - vn_fq_name=tx_vn_fq_name, af='v4')) - self.dst_ips.extend(self.rx_vm_fixture.get_vm_ips( - vn_fq_name=rx_vn_fq_name, af='v4')) - if af == 'dual' or af == 'v6': - self.src_ips.extend(self.tx_vm_fixture.get_vm_ips( - vn_fq_name=tx_vn_fq_name, af='v6')) - self.dst_ips.extend(self.rx_vm_fixture.get_vm_ips( - vn_fq_name=rx_vn_fq_name, af='v6')) - else: - self.dst_ips = [fip] - self.src_ips = [self.tx_vm_fixture.vm_ip] - if len(self.dst_ips) > len(self.src_ips): - raise Exception('No of destination ips cant be greater than' - ' source ips, for multi stream case') - - for index in range(len(self.dst_ips)): - name = name + '_dst' + str(index) + '_' - for i in range(num_streams): - self.name = name + self.stream_proto + str(i) - self.dport = start_port + i - m = "Send protocol %s traffic to port %s" % ( - self.stream_proto, self.dport) - if self.stream_proto == 'icmp': - m = "Send protocol %s traffic" % self.stream_proto - self.logger.info(m) - stream = Stream(proto=self.stream_proto, - src=self.src_ips[index], - dst=self.dst_ips[index], - dport=self.dport) - if fip: - listener = self.rx_vm_fixture.vm_ip - else: - listener = self.dst_ips[index] - # stream profile... - if cfg_profile == 'ContinuousSportRange': - profile = ContinuousSportRange(stream=stream, - startport=self.start_sport, - endport=self.endport, - listener=listener, - size=self.packet_size, - chksum=self.chksum, pps=pps) - elif cfg_profile == 'ContinuousProfile': - profile = ContinuousProfile(stream=stream, - listener=listener, - size=self.packet_size, - chksum=self.chksum) - # sender profile... - sender = Sender(self.name, profile, self.tx_local_host, - self.send_host, self.inputs.logger) - receiver = Receiver(self.name, profile, self.rx_local_host, - self.recv_host, self.inputs.logger) - self.logger.info("tx vm - node %s, mdata_ip %s, vm_ip %s" %( - self.tx_local_host.ip, self.send_host.ip, - self.src_ips[index])) - self.logger.info("rx vm - node %s, mdata_ip %s, vm_ip %s" %( - self.rx_local_host.ip, self.recv_host.ip, - self.dst_ips[index])) - receiver.start() - self.logger.info("Starting %s traffic from %s to %s" %( - self.stream_proto, self.src_ips[index], - self.dst_ips[index])) - sender.start() - retries = 10 - j = 0 - sender.sent = None - while j < retries and sender.sent == None: - # wait before checking for stats as it takes time for file - # update with stats - time.sleep(5) - sender.poll() - # end while - if sender.sent == None: - msg = "send %s traffic failure from %s " % ( - self.stream_proto, self.src_ips[index]) - self.logger.info( - "traffic tx stats not available !!, details: %s" % msg) - else: - self.logger.info( - "traffic running good, sent %s pkts so far.." % - sender.sent) - self.sender.append(sender) - self.receiver.append(receiver) - self.num_streams += 1 - if msg != None: - status = False - return {'status': status, 'msg': msg} - # end of startTraffic - - def getLiveTrafficStats(self): - ''' get stats of traffic streams launched using startTraffic.. - Return True if sender & receiver stats are incrementing [confirms that sender is still sending].. - Return False if the stats is not incrementing.. which implies traffic is disrupted.. - Depending on the trigger applied, calling code can use the return value to make decision.. - ''' - ret = {} - ret['msg'] = [] - ret['status'] = None - stats = {} - poll_cnt = 5 - status = {} - for j in range(poll_cnt): - stats['poll' + str(j)] = {} - st = stats['poll' + str(j)] - for i in range(self.num_streams): - st[i] = {} - status[i] = {} - self.receiver[i].poll() - self.sender[i].poll() - if self.stream_proto == 'tcp' or self.stream_proto == 'udp': - st[i]['sent'] = self.sender[i].sent - st[i]['recv'] = self.receiver[i].recv - elif self.stream_proto == 'icmp' or self.stream_proto == 'icmpv6': - st[i]['sent'] = self.sender[i].sent - st[i]['recv'] = self.sender[i].recv - self.logger.info("stream %s: stats sent: %s, stats rev: %s" % - (i, st[i]['sent'], st[i]['recv'])) - # compare stats in this loop with previous to see if traffic - # flowing - if j > 0: - # checking sender.. - if stats['poll' + str(j)][i]['sent'] > stats['poll' + str(j - 1)][i]['sent']: - self.logger.info("stream %s of type %s, sender %s good, sent pkts in last 2 polls: %s, %s" % ( - i, self.stream_proto, self.tx_vm_fixture.vm_ip, stats['poll' + str(j - 1)][i]['sent'], stats['poll' + str(j)][i]['sent'])) - status[i]['sent'] = True - else: - msg = "stream %s of type %s, from sender %s seems to be down; sent pkts in last 2 polls: %s, %s" % ( - i, self.stream_proto, self.tx_vm_fixture.vm_ip, stats['poll' + str(j - 1)][i]['sent'], stats['poll' + str(j)][i]['sent']) - ret['msg'].append(msg) - self.logger.info(msg) - status[i]['sent'] = False - # checking receiver.. - if stats['poll' + str(j)][i]['recv'] > stats['poll' + str(j - 1)][i]['recv']: - self.logger.info("stream %s, of type %s, receiver %s good, recd. pkts in last 2 polls: %s, %s" % ( - i, self.stream_proto, self.rx_vm_fixture.vm_ip, stats['poll' + str(j - 1)][i]['recv'], stats['poll' + str(j)][i]['recv'])) - status[i]['recv'] = True - else: - msg = "stream %s of type %s @receiver %s seems to be down; recd. pkts in last 2 polls: %s, %s" % ( - i, self.stream_proto, self.rx_vm_fixture.vm_ip, stats['poll' + str(j - 1)][i]['recv'], stats['poll' + str(j)][i]['recv']) - ret['msg'].append(msg) - self.logger.info(msg) - status[i]['recv'] = False - if status[i]['sent'] == False or status[i]['recv'] == False: - ret['status'] = False - else: - ret['status'] = True - # end compare if loop - # end for loop for checking all streams - # if stats are incrementing [True case], come out of poll loop.. no need to recheck again. - # if stats dont increment, go into loop to poll multiple times to confirm that its due to traffic not flowing - # and not due to stats file update issue [where file update takes - # sometime to reflect changes] - if ret['status'] == True: - print "breaking loop in %s attempts" % j - break - else: - time.sleep(3) # sleep and poll again.. - return ret - # end of getLiveTrafficStats - - def stopTraffic(self, loose='no', loose_allow=100, wait_for_stop=True): - ''' Stop traffic launched using startTraffic. - Return [] if recv = sent, else, return error info - set loose if you are ok with allowing some loss, used for scale/stress tests. - ''' - msg = [] - setFail = -1 - for i in range(self.num_streams): - self.dport = self.start_port + i - self.sender[i].stop() - self.logger.info( - "Waiting for Receiver to receive all packets in transit after stopping sender..") - if wait_for_stop: - time.sleep( - 60) if self.total_single_instance_streams > 1 else time.sleep(2) - else: - time.sleep(1) - self.receiver[i].stop() - #import pdb; pdb.set_trace() - if self.sender[i].sent == None or self.receiver[i].recv == None: - msg = "Cannot proceed with stats check to compare" - self.logger.error(msg) - return msg - if self.stream_proto == 'tcp' or self.stream_proto == 'udp': - stats = " dest_port: %s, sender %s sent: %s, receiver %s recd: %s" % ( - self.dport, self.tx_vm_fixture.vm_ip, self.sender[i].sent, self.rx_vm_fixture.vm_ip, self.receiver[i].recv) - if self.receiver[i].recv == 0: - msg.append("receiver recv counter is 0 !!") - setFail = 1 - if self.receiver[i].recv != self.sender[i].sent: - if loose == 'yes': - self.logger.info( - "rx less than tx, check within expected number- %s" % stats) - setFail = 0 - if self.sender[i].sent - self.receiver[i].recv < loose_allow: - setFail = 0 - else: - setFail = 1 - elif self.receiver[i].recv > self.sender[i].sent: - self.logger.info( - "rx more than tx, traffic tool issue with filter, can be ignored- %s" % stats) - setFail = 0 - else: - setFail = 1 - if setFail == 1: - msg.append( - "data loss seen, receiver received less than sent !!") - elif self.stream_proto == 'icmp': - stats = " sender %s sent: %s, sender recd responses: %s" % ( - self.tx_vm_fixture.vm_ip, self.sender[i].sent, self.sender[i].recv) - if self.sender[i].sent != self.sender[i].recv: - setFail = 1 - - self.logger.info( - "stats after stopping stream %s for proto %s is %s" % - (i, self.stream_proto, stats)) - if setFail == 1: - msg.extend( - ["traffic failed for stream ", self.stream_proto, stats]) - else: - self.logger.info( - "traffic test for stream %s, proto %s passed" % - (i, self.stream_proto)) - return msg - # end of stopTraffic - - def returnStats(self): - ''' - Returns traffic stats for each stream which has sent - it returns list of all streams for which traffic has sent - ''' - trafficstats = [] - total_pkt_sent = 0 - total_pkt_recv = 0 - for i in range(self.num_streams): - traffic_flow_stat = { - 'src_ip': self.tx_vm_fixture.vm_ip, 'dst_ip': self.rx_vm_fixture.vm_ip, 'dst_port': self.dport, - 'protocol': self.stream_proto, 'sent_traffic': self.sender[i].sent, 'recv_traffic': self.receiver[i].recv} - trafficstats.append(traffic_flow_stat) - if self.sender[i].sent: - total_pkt_sent = total_pkt_sent + self.sender[i].sent - if self.receiver[i].recv: - total_pkt_recv = total_pkt_recv + self.receiver[i].recv - - return {'traffic_stats': trafficstats, 'total_pkt_sent': total_pkt_sent, 'total_pkt_recv': total_pkt_recv} - # end returnStats diff --git a/fixtures/user_test.py b/fixtures/user_test.py deleted file mode 100644 index cdbfdd693..000000000 --- a/fixtures/user_test.py +++ /dev/null @@ -1,255 +0,0 @@ -import os -import fixtures -import uuid -import fixtures - -from common.connections import ContrailConnections -from tcutils.util import retry -from time import sleep -from tcutils.util import get_dashed_uuid - -from common.openstack_libs import ks_client as ksclient -from common.openstack_libs import ks_exceptions -from common.openstack_libs import keystoneclient - -class UserFixture(fixtures.Fixture): - - def __init__(self, connections, username=None, password=None, tenant=None, role='admin', token=None, endpoint=None): - self.inputs= connections.inputs - self.connections= connections - self.logger = self.inputs.logger - if self.inputs.orchestrator == 'vcenter': - # No concept of user in vcenter, However we satisfy the test infra - # with dummy fixture objects - return - insecure = bool(os.getenv('OS_INSECURE', True)) - if not self.inputs.ha_setup: - self.auth_url = os.getenv('OS_AUTH_URL') or \ - 'http://%s:5000/v2.0' % (self.inputs.openstack_ip) - else: - self.auth_url = os.getenv('OS_AUTH_URL') or \ - 'http://%s:5000/v2.0' % (self.inputs.auth_ip) - self.already_present = False - self.username = username - self.password = password - self.tenant = tenant - self.role = role - self.email = str(username) + "@example.com" - self.token = token - self.endpoint = endpoint - self.verify_is_run = False - if self.token: - self.keystone = keystoneclient.Client( - token=self.token, endpoint=self.endpoint) - else: - self.keystone = ksclient.Client( - username=self.inputs.stack_user, password=self.inputs.stack_password, - tenant_name=self.inputs.project_name, auth_url=self.auth_url, - insecure=insecure) - # end __init__ - - def get_role_dct(self, role_name): - - all_roles = self.keystone.roles.list() - for x in all_roles: - if (x.name == role_name): - return x - return None - - def get_user_dct(self, user_name): - - all_users = self.keystone.users.list() - for x in all_users: - if (x.name == user_name): - return x - return None - - def get_tenant_dct(self, tenant_name): - - all_tenants = self.keystone.tenants.list() - for x in all_tenants: - if (x.name == tenant_name): - return x - return None - - def add_user_to_tenant(self, tenant, user, role): - if self.inputs.orchestrator == 'vcenter': - # No concept of user in vcenter - return - configure_role = True - kuser = self.get_user_dct(user) - krole = self.get_role_dct(role) - ktenant = self.get_tenant_dct(tenant) - roles = self.get_role_for_user(user, tenant) - if roles: - for r in roles: - if r.name == role: - configure_role = False - self.logger.info("Already user %s as %s role in tenant %s" - %(user, role, tenant)) - break - if configure_role: - self.keystone.tenants.add_user(ktenant, kuser, krole) - - def remove_user_from_tenant(self, tenant, user, role): - if self.inputs.orchestrator == 'vcenter': - # No concept of user in vcenter - return - user = self.get_user_dct(user) - role = self.get_role_dct(role) - tenant = self.get_tenant_dct(tenant) - self.keystone.tenants.remove_user(tenant, user, role) - - def tenant_list(self, limit=None, marker=None): - - return self.keystone.tenants.list() - - def create_roles(self, role): - - self.keystone.roles.create(role) - - def delete_roles(self, role): - - role = self.get_role_dct(role) - self.keystone.roles.delete(role) - - def add_user_role(self, user_name, role_name, tenant_name=None): - - user = self.get_user_dct(user_name) - role = self.get_role_dct(role_name) - if tenant_name: - tenant = self.get_tenant_dct(tenant_name) - - self.keystone.roles.add_user_role(user, role, tenant) - - def get_role_for_user(self, user, tenant_name=None): - - user = self.get_user_dct(user) - if tenant_name: - tenant = self.get_tenant_dct(tenant_name) - return self.keystone.roles.roles_for_user(user, tenant) - - def remove_user_role(self, user, role, tenant=None): - - user = self.get_user_dct(user) - role = self.get_role_dct(role) - if tenant: - tenant = self.get_tenant_dct(tenant) - - self.keystone.roles.remove_user_role(user, role, tenant) - - def roles_list(self): - - return self.keystone.roles.list() - - def create_user(self, user, password, email='', tenant_name=None, enabled=True): - - tenant_id = self.get_tenant_dct(tenant_name).id - self.keystone.users.create(user, password, email, tenant_id, enabled) - - def delete_user(self, user): - - user = self.get_user_dct(user) - self.keystone.users.delete(user) - - def update_user_tenant(self, user, tenant): - - user = self.get_user_dct(user) - tenant = self.get_tenant_dct(tenant) - self.keystone.users.update_tenant(user, tenant) - - def user_list(self, tenant_id=None, limit=None, marker=None): - - return self.keystone.users.list() - - def _reauthenticate_keystone(self): - if self.token: - self.keystone = keystoneclient.Client( - token=self.token, endpoint=self.endpoint) - else: - self.keystone = ksclient.Client( - username=self.inputs.stack_user, password=self.inputs.stack_password, tenant_name=self.inputs.project_name, auth_url=self.auth_url) - # end _reauthenticate_keystone - - def setUp(self): - super(UserFixture, self).setUp() - if self.inputs.orchestrator == 'vcenter': - # No concept of user in vcenter - return - try: - ks_project = self.keystone.tenants.find(name=self.inputs.project_name) - if ks_project: - self.project_id = get_dashed_uuid(ks_project.id) - self.logger.debug( - 'Project %s already present. Check user %s exist' % - (self.inputs.project_name, self.username)) - if self.get_user_dct(self.username): - self.logger.info('User %s already exist, skip creation' % - self.username) - self.already_present = True - else: - try: - self.create_user( - self.username, self.password, email=self.email, tenant_name=self.inputs.project_name, enabled=True) - assert self.verify_on_setup() - except Exception as e: - self.logger.warn('User creation failed for exception %s...' % (e)) - #if test tenant already created, associate user to tenant - if self.tenant: - if self.get_tenant_dct(self.tenant): - self.logger.info('Tenant %s exists, associate user %s..' % (self.tenant, self.username)) - self.add_user_to_tenant(self.tenant, self.username, self.role) - except ks_exceptions.NotFound, e: - self.logger.info('Project %s not found, skip creating user %s' % ( - self.project_name, self.username)) - # end setUp - - def cleanUp(self): - super(UserFixture, self).cleanUp() - if self.inputs.orchestrator == 'vcenter': - # No concept of user in vcenter - return - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - self._reauthenticate_keystone() - self.logger.info('Deleting user %s' %self.username) - self.delete_user(self.username) - if self.verify_is_run: - assert self.verify_on_cleanup() - else: - self.logger.debug('Skipping the deletion of User %s' % - self.username) - - # end cleanUp - - def verify_on_setup(self): - if self.inputs.orchestrator == 'vcenter': - # No concept of user in vcenter - return True - result = True - if not self.get_user_dct(self.username): - result &= False - self.logger.error('Verification of user %s in keystone ' - 'failed!! ' % (self.username)) - self.verify_is_run = True - return result - # end verify_on_setup - - def verify_on_cleanup(self): - if self.inputs.orchestrator == 'vcenter': - # No concept of user in vcenter - return True - result = True - if self.get_user_dct(self.username): - result &= False - self.logger.error('User %s is still present in Keystone' % ( - self.username)) - return result - # end verify_on_cleanup -# end UserFixture diff --git a/fixtures/vcenter.py b/fixtures/vcenter.py deleted file mode 100644 index ab3851ea5..000000000 --- a/fixtures/vcenter.py +++ /dev/null @@ -1,841 +0,0 @@ -import time -import random -import uuid -import re -import os -from netaddr import IPNetwork -from fabric.context_managers import settings, hide -from fabric.api import run, env -from fabric.operations import get, put -from orchestrator import Orchestrator, OrchestratorAuth -from contrailapi import ContrailApi -from tcutils.util import * -from tcutils.cfgparser import parse_cfg_file -from vnc_api.vnc_api import VncApi -from common.vcenter_libs import _vimtype_dict -from common.vcenter_libs import connect -from common.vcenter_libs import vim -from tcutils.config import vcenter_verification -from pyVmomi import vim, vmodl - -def _vim_obj(typestr, **kwargs): - return _vimtype_dict[typestr](**kwargs) - -def _wait_for_task (task): - while (task.info.state == vim.TaskInfo.State.running or - task.info.state == vim.TaskInfo.State.queued): - time.sleep(2) - if task.info.state != vim.TaskInfo.State.success: - if task.info.state == vim.TaskInfo.State.error: - raise ValueError(task.info.error.localizedMessage) - raise ValueError("Something went wrong in wait_for_task") - return - -def _match_obj(obj, param): - attr = param.keys()[0] - attrs = [attr] - if '.' in attr: - attrs = attr.split('.') - for i in range(len(attrs) - 1): - if not hasattr(obj, attrs[i]): - break - obj = getattr(obj, attrs[i]) - attr = attrs[-1] - return hasattr(obj, attr) and getattr(obj, attr) == param.values()[0] - - -class NFSDatastore: - - __metaclass__ = Singleton - - def __init__(self, inputs, vc): - self.name = 'nfs-ds' - self.path = '/nfs' - self.server = inputs.cfgm_ip - self.vcpath = '/vmfs/volumes/nfs-ds/' - - if vc._find_obj(vc._dc, 'ds', {'name':self.name}): - return - - username = inputs.host_data[self.server]['username'] - password = inputs.host_data[self.server]['password'] - with settings(host_string=username+'@'+self.server, password=password, - warn_only = True, shell = '/bin/sh -l -c'): - sudo('mkdir /nfs') - sudo('apt-get -y install nfs-kernel-server') - sudo("sed -i '/nfs /d' /etc/exports") - sudo('echo "/nfs *(rw,sync,no_root_squash)" >> /etc/exports') - sudo('service nfs-kernel-server restart') - - hosts = [host for cluster in vc._dc.hostFolder.childEntity for host in cluster.host] - spec = _vim_obj('host.NasSpec', remoteHost=self.server, remotePath=self.path, - localPath=self.name, accessMode='readWrite') - for host in hosts: - host.configManager.datastoreSystem.CreateNasDatastore(spec) - -class VcenterVlanMgr: - - __metaclass__ = Singleton - - def __init__(self, dvs): - self._vlans = [(vlan.primaryVlanId, vlan.secondaryVlanId) for vlan in dvs.config.pvlanConfig if vlan.pvlanType == 'isolated'] - - def allocate_vlan(self): - return self._vlans.pop(0) - - def free_vlan(self, vlan): - self._vlans.append(vlan) - -class VcenterOrchestrator(ContrailApi): - - def __init__(self, inputs, host, port, user, pwd, dc_name, vnc, logger): - super(VcenterOrchestrator, self).__init__(inputs, vnc, logger) - self._inputs = inputs - self._host = host - self._port = port - self._user = user - self._passwd = pwd - self._dc_name = dc_name - self._vnc = vnc - self._log = logger - self._images_info = parse_cfg_file('configs/images.cfg') - self._connect_to_vcenter() - self._vlanmgmt = VcenterVlanMgr(self._vs) - self._create_keypair() - self._nfs_ds = NFSDatastore(self._inputs, self) - self.enable_vmotion(self.get_hosts()) - - def is_feature_supported(self, feature): - unsupported_features = ['multi-subnet', 'multi-tenant', 'multi-ipam', 'service-instance'] - return feature not in unsupported_features - - def _connect_to_vcenter(self): - self._si = connect.SmartConnect(host=self._host, port=self._port, user=self._user, pwd=self._passwd) - if not self._si: - raise Exception("Unable to connect to vcenter: %s:%d %s/%s" % (self._host, - self._port, self._user, self._passwd)) - self._content = self._si.RetrieveContent() - if not self._content: - raise Exception("Unable to retrieve content from vcenter") - self._dc = self._find_obj(self._content.rootFolder, 'dc' , {'name' : self._dc_name}) - if not self._dc: - raise Exception("Datacenter %s not found" % self._dc_name) - dvs = self._get_obj_list(self._dc, 'dvs.VSwitch') - if not dvs: - raise Exception("Datacenter %s does not have a distributed virtual switch" % self._dc_name) - if len(dvs) > 1: - for dv in dvs: - if dv.name in self._inputs.dv_switch: - self._vs = dvs[0] - break - else: - self._vs = dvs[0] - - self._clusters_hosts = self._get_clusters_hosts() - if len(self.get_zones()) == 0: - raise Exception("Datacenter %s has no clusters" % self._dc_name) - if len(self.get_hosts()) == 0: - raise Exception("Datacenter %s has no hosts" % self._dc_name) - self._computes = self._get_computes() - - def _find_obj (self, root, vimtype, param): - if vimtype == 'ip.Pool': - items = self._content.ipPoolManager.QueryIpPools(self._dc) - else: - items = self._content.viewManager.CreateContainerView(root, [_vimtype_dict[vimtype]], True).view - for obj in items: - if _match_obj(obj, param): - return obj - return None - - def _get_obj_list (self, root, vimtype): - view = self._content.viewManager.CreateContainerView(root, [_vimtype_dict[vimtype]], True) - return [obj for obj in view.view] - - def _get_clusters_hosts(self): - dd = {} - for cluster in self._get_obj_list(self._dc, 'cluster'): - hosts = [host.name for host in self._get_obj_list(cluster, 'host')] - dd[cluster.name] = hosts - self._log.debug('Vcenter clusters & hosts\n%s' % str(dd)) - return dd - - def get_hosts(self, zone=None): - if zone: - return self._clusters_hosts[zone][:] - return [host for hosts in self._clusters_hosts.values() for host in hosts] - - def get_zones(self): - return self._clusters_hosts.keys() - - def get_image_account(self, image_name): - return (self._images_info[image_name]['username'], - self._images_info[image_name]['password']) - - def get_image_name_for_zone(self, image_name='ubuntu', zone=None): - return image_name - - def enable_vmotion(self, hosts): - for host in hosts: - username = self._inputs.host_data[host]['username'] - password = self._inputs.host_data[host]['password'] - with settings(host_string=username+'@'+host, password=password, - warn_only = True, shell = '/bin/sh -l -c'): - run('vim-cmd hostsvc/vmotion/vnic_set vmk0') - - @threadsafe_generator - def _get_computes(self): - while True: - hosts = [(server, cluster) for cluster, servers in self._clusters_hosts.items() for server in servers] - for host in hosts: - yield host - - def _upload_to_host(self, host, image): - vmx = self._images_info[image].get('vctmpl', None) - loc = self._images_info[image].get('vcpath', None) - vmdk = self._images_info[image].get('vcname', None) - webserver = self._images_info[image]['webserver'] or \ - os.getenv('IMAGE_WEB_SERVER', '10.204.217.158') - if not vmdk: - vmdk = self._images_info[image]['name'] - if not vmx or not loc or not vmdk or ('vmdk' not in vmdk): - raise Exception("no suitable vmdk or template for %s" % image) - - user = self._inputs.host_data[host.name]['username'] - pwd = self._inputs.host_data[host.name]['password'] - url = 'http://%s/%s/' % (webserver, loc) - url_vmx = url + vmx - url_vmdk = url + vmdk - dst = self._nfs_ds.vcpath + image + '/' - dst_vmdk = dst + image + '.vmdk' - tmp_vmdk = dst + vmdk - with settings(host_string='%s@%s' % (user, host.name), password=pwd, - warn_only = True, shell = '/bin/sh -l -c'): - run('mkdir -p %s' % dst) - run('wget %s -P %s' % (url_vmx, dst)) - run('wget %s -P %s' % (url_vmdk, dst)) - run('vmkfstools -i %s -d zeroedthick %s' % (tmp_vmdk, dst_vmdk)) - run('rm %s' % tmp_vmdk) - - return self._nfs_ds.name, image + '/' + vmx - - def _load_and_register_template(self, image): - host_name, cluster_name = next(self._computes) - host = self._find_obj(self._find_obj(self._dc, 'cluster', {'name' : cluster_name}), - 'host', {'name' : host_name}) - ds, vmtx = self._upload_to_host(host, image) - folder = self._dc.vmFolder - _wait_for_task(folder.RegisterVM_Task(path='[%s] %s' % (ds, vmtx), name=image, - asTemplate=True, host=host, pool=None)) - - def create_vm(self, vm_name, image_name, vn_objs, count=1, zone=None, node_name=None, **kwargs): - if self._find_obj(self._dc, 'vm', {'name' : vm_name}): - raise Exception("VM exists with the name %s" % vm_name) - - if zone and ((zone not in self._clusters_hosts) or (not len(self._clusters_hosts[zone]))): - raise Exception("No cluster named %s or no hosts in it" % zone) - - host = None - if node_name: - host = self._find_obj(self._dc, 'host', {'name' : node_name}) - if not host: - raise Exception("host %s not found" % node_name) - - tmpl = self._find_obj(self._dc, "vm", {'name' : image_name}) - if not tmpl: - self._load_and_register_template(image_name) - tmpl = self._find_obj(self._dc, "vm", {'name' : image_name}) - if not tmpl: - raise Exception("template not found") - - nets = [self._find_obj(self._dc, 'dvs.PortGroup', {'name' : vn.name}) for vn in vn_objs] - objs = [] - for _ in range(count): - if host: - tgthost = host - elif zone: - while True: - host_name, cluster_name = next(self._computes) - if cluster_name == zone: - break - tgthost = self._find_obj(self._find_obj(self._dc, 'cluster', {'name' : cluster_name}), - 'host', {'name' : host_name}) - else: - host_name, cluster_name = next(self._computes) - tgthost = self._find_obj(self._find_obj(self._dc, 'cluster', {'name' : cluster_name}), - 'host', {'name' : host_name}) - - vm = VcenterVM.create_in_vcenter(self, vm_name, tmpl, nets, tgthost) - objs.append(vm) - sg_ids = kwargs.get('sg_ids', []) - for sg_id in sg_ids: - self.add_security_group(vm_id=vm.id, sg_id=sg_id) - return objs - - def delete_vm(self, vm, **kwargs): - vm_obj = self._find_obj(self._dc, 'vm', {'name' : vm.name}) - if vm_obj: - if vm_obj.runtime.powerState != 'poweredOff': - _wait_for_task(vm_obj.PowerOff()) - _wait_for_task(vm_obj.Destroy()) - - @retry(tries=30, delay=5) - def wait_till_vm_is_active(self, vm_obj, **kwargs): - vm = self._find_obj(self._dc, 'vm', {'name' : vm_obj.name}) - return vm.runtime.powerState == 'poweredOn' - - def wait_till_vm_status(self, vm_obj, status): - raise Exception('Unimplemented interface') - - def enter_maintenance_mode(self, name): - host = self._find_obj(self._dc, 'host', {'name' : name}) - assert host, "Unable to find host %s" % name - if host.runtime.inMaintenanceMode: - self._log.debug("Host %s already in maintenance mode" % name) - for vm in host.vm: - if vm.summary.config.template: - continue - self._log.debug("Powering off %s" % vm.name) - _wait_for_task(vm.PowerOff()) - self._log.debug("EnterMaintenence mode on host %s" % name) - _wait_for_task(host.EnterMaintenanceMode(timeout=10)) - - def exit_maintenance_mode(self, name): - host = self._find_obj(self._dc, 'host', {'name' : name}) - assert host, "Unable to find host %s" % name - if not host.runtime.inMaintenanceMode: - self._log.debug("Host %s not in maintenance mode" % name) - self._log.debug("ExitMaintenence mode on host %s" % name) - _wait_for_task(host.ExitMaintenanceMode(timeout=10)) - for vm in host.vm: - if vm.summary.config.template: - continue - self._log.debug("Powering on %s" % vm.name) - _wait_for_task(vm.PowerOn()) - - def add_networks_to_vm(self, vm_obj, vns): - nets = [self._find_obj(self._dc, 'dvs.PortGroup', {'name':vn_obj.name}) for vn_obj in vns] - vm_obj.add_networks(nets) - - def delete_networks_from_vm(self, vm_obj, vns): - nets = [self._find_obj(self._dc, 'dvs.PortGroup', {'name':vn_obj.name}) for vn_obj in vns] - vm_obj.delete_networks(nets) - - def change_network_to_vm(self,vm_obj,vn): - net = self._find_obj(self._dc, 'dvs.PortGroup', {'name':vn}) - vm_obj.change_networks(net) - - def get_host_of_vm(self, vm_obj): - host = self._find_obj(self._dc, 'host', {'name' : vm_obj.host}) - contrail_vm = None - for vm in host.vm: - if 'ContrailVM' in vm.name: - contrail_vm = vm - break - return self._inputs.host_data[contrail_vm.summary.guest.ipAddress]['name'] - - def get_networks_of_vm(self, vm_obj, **kwargs): - return vm_obj.nets[:] - - @retry(tries=10, delay=5) - def is_vm_deleted(self, vm_obj, **kwargs): - return self._find_obj(self._dc, 'vm', {'name' : vm_obj.name}) == None - - def get_vm_if_present(self, vm_name, **kwargs): - vmobj = self._find_obj(self._dc, 'vm', {'name' : vm_name}) - if vmobj: - return VcenterVM.create_from_vmobj(self, vmobj) - return None - - def get_vm_by_id(self, vm_id, **kwargs): - vmobj = self._find_obj(self._dc, 'vm', {'summary.config.instanceUuid':vm_id}) - if vmobj: - return VcenterVM.create_from_vmobj(self, vmobj) - return None - - def get_vm_list(self, name_pattern='', **kwargs): - vm_list = [] - vms = self._get_obj_list(self._dc, 'vm') - for vmobj in vms: - if 'ContrailVM' in vmobj.name: - continue - if re.match(r'%s' % name_pattern, vmobj.name, re.M | re.I): - vm_list.append(vmobj) - vm_list = [VcenterVM.create_from_vmobj(self, vmobj) for vmobj in vm_list] - return vm_list - - @retry(delay=5, tries=35) - def get_vm_detail(self, vm_obj, **kwargs): - return vm_obj.get() - - def get_console_output(self, vm_obj, **kwargs): - return None - - def get_vm_ip(self, vm_obj, vn_name=None, **kwargs): - self.get_vm_detail(vm_obj) - if vn_name: - ret = vm_obj.ips.get(vn_name, None) - else: - ret = vm_obj.ips.values() - return [ret] - - def migrate_vm(self, vm_obj, host): - if host == vm_obj.host: - self._log.debug("Target Host %s is same as current host %s" % (host, vm_obj.host)) - return - tgt = self._find_obj(self._dc, 'host', {'name':host}) - assert tgt, 'Migration failed, no such host:%s' % host - vm = self._find_obj(self._dc, 'vm', {'name' : vm_obj.name}) - _wait_for_task(vm.RelocateVM_Task(_vim_obj('vm.Reloc',host=tgt,datastore=tgt.datastore[0]))) - - def _create_keypair(self): - username = self._inputs.host_data[self._inputs.cfgm_ip]['username'] - password = self._inputs.host_data[self._inputs.cfgm_ip]['password'] - with settings( - host_string='%s@%s' % (username, self._inputs.cfgm_ip), - password=password, warn_only=True, abort_on_prompts=True): - rsa_pub_arg = '.ssh/id_rsa' - if exists('.ssh/id_rsa.pub'): # If file exists on remote m/c - get('.ssh/id_rsa.pub', '/tmp/') - else: - run('mkdir -p .ssh') - run('rm -f .ssh/id_rsa*') - run('ssh-keygen -f %s -t rsa -N \'\'' % (rsa_pub_arg)) - get('.ssh/id_rsa.pub', '/tmp/') - - def get_key_file(self): - return self.tmp_key_file - - def put_key_file_to_host(self, host_ip): - username = self._inputs.host_data[self._inputs.cfgm_ip]['username'] - password = self._inputs.host_data[self._inputs.cfgm_ip]['password'] - with hide('everything'): - with settings(host_string='%s@%s' % ( - username, self._inputs.cfgm_ip), - password=password, - warn_only=True, abort_on_prompts=False): - get('.ssh/id_rsa', '/tmp/') - get('.ssh/id_rsa.pub', '/tmp/') - with hide('everything'): - with settings( - host_string='%s@%s' % (self._inputs.host_data[host_ip]['username'], - host_ip), password=self._inputs.host_data[ - host_ip]['password'], - warn_only=True, abort_on_prompts=False): - if self._inputs.cfgm_ips[0] != host_ip: - put('/tmp/id_rsa', '/tmp/id_rsa') - put('/tmp/id_rsa.pub', '/tmp/id_rsa.pub') - run('chmod 600 /tmp/id_rsa') - self.tmp_key_file = '/tmp/id_rsa' - - def create_vn(self, name, subnets, **kwargs): - if self._find_obj(self._dc, 'dvs.PortGroup', {'name' : name}) or self._find_obj(self._dc, - 'ip.Pool', {'name' : 'pool-'+name}): - raise Exception('A VN %s or ip pool %s, exists with the name' % (name, 'pool-'+name)) - if len(subnets) != 1: - raise Exception('Cannot create VN with %d subnets' % len(subnets)) - vlan = self._vlanmgmt.allocate_vlan() - if not vlan: - raise Exception("Vlans exhausted") - try: - dhcp = kwargs.get('enable_dhcp', True) - return VcenterVN.create_in_vcenter(self, name, vlan, subnets, dhcp) - except: - self._vlanmgmt.free_vlan(vlan) - raise - - def delete_vn(self, vn_obj, **kwargs): - self._vlanmgmt.free_vlan(vn_obj.vlan) - self._content.ipPoolManager.DestroyIpPool(self._dc, vn_obj.ip_pool_id, True) - pg = self._find_obj(self._dc, 'dvs.PortGroup', {'name' : vn_obj.name}) - pg.Destroy() - return True - - def get_vn_obj_if_present(self, vn_name, **kwargs): - pg = self._find_obj(self._dc, 'dvs.PortGroup', {'name' : vn_name}) - if pg: - return VcenterVN.create_from_vnobj(self, pg) - return None - - def get_vn_obj_from_id(self, vn_id): - obj = self._vnc.virtual_network_read(id=vn_id) - return self.get_vn_obj_if_present(obj.name) - - def get_vn_name(self, vn_obj, **kwargs): - return vn_obj.name - - def get_vn_id(self, vnobj, **kwargs): - if not vnobj.uuid: - vnobj.get() - return vnobj.uuid - - def get_image_name_for_zone(self, image_name='ubuntu', zone=None): - return image_name - - def run_a_command(self, vm_id , vm_user, vm_password, path_to_cmd, cmd_args = None): - vm = self._find_obj(self._dc, 'vm', {'summary.config.instanceUuid':vm_id}) - creds = _vim_obj('vm.PassAuth', username = vm_user, password = vm_password) - ps = _vim_obj('vm.Prog', programPath=path_to_cmd, arguments=cmd_args) - pm = self._content.guestOperationsManager.processManager - res = pm.StartProgramInGuest(vm, creds, ps) - return res - - def get_vm_tap_interface(self,obj): - return obj['parent_interface'] - - def get_security_group(self, sg, **kwargs): - ret = super(VcenterOrchestrator, self).get_security_group(sg) - if ret: - return ret - return super(VcenterOrchestrator, self).get_security_group(['default-domain', 'vCenter', sg]) - - def get_vcenter_introspect(self): - return vcenter_verification.VMWareVerificationLib(self._inputs) - - def verify_vm_in_vcenter(self, vm_obj): - vm_name = vm_obj.name - vrouter = self._inputs.host_data[self.get_host_of_vm(vm_obj)]['host_ip'] - inspect = self.get_vcenter_introspect() - return inspect.verify_vm_in_vcenter(vrouter,vm_name) - - def verify_vm_not_in_vcenter(self,vm_obj): - vm_name = vm_obj.name - vrouter = self._inputs.host_data[self.get_host_of_vm(vm_obj)]['host_ip'] - inspect = self.get_vcenter_introspect() - return inspect.verify_vm_not_in_vcenter(vrouter,vm_name) - -class Subnets(object): - - def __init__(self,subnet): - self.subnet = subnet - self.pefix = IPNetwork(self.subnet) - - @property - def prefix(self): - return self.pefix - - @property - def hosts(self): - return self.pefix.iter_hosts() - - @property - def netmask(self): - return self.pefix.netmask - - @property - def sub_network(self): - return self.pefix.network - - @property - def range(self): - ip_list = list(self.hosts) - range = str(ip_list[0]) + '#' + str(len(ip_list)) - return range - -class IPv4Subnet(Subnets): - - def __init__(self,subnet): - super(IPv4Subnet,self).__init__(subnet) - -class IPv6Subnet(Subnets): - - def __init__(self,subnet): - super(IPv6Subnet,self).__init__(subnet) - - @property - def range(self): - ip_list = self.pefix.iter_hosts() - ip = next(ip_list) - ip = next(ip_list) - range = str(ip) + '#' + '255' - return range - -class VcenterVN: - - @staticmethod - def create_in_vcenter(vcenter, name, vlan, prefix, dhcp=True): - vn = VcenterVN() - vn.vcenter = vcenter - vn.name = name - vn.vlan = vlan - vn.uuid = None - - v6_network = None - for p in prefix: - if (IPNetwork(p['cidr']).version == 4): - v4_network = IPv4Subnet(p['cidr']) - if (IPNetwork(p['cidr']).version == 6): - v6_network = IPv6Subnet(p['cidr']) - ip_list = list(v4_network.hosts) - - ipam_setting = [_vim_obj('dvs.Blob', key='external_ipam', opaqueData='true')] if not dhcp else None - spec = _vim_obj('dvs.ConfigSpec', name=name, type='earlyBinding', numPorts = len(ip_list), - defaultPortConfig=_vim_obj('dvs.PortConfig', - vlan=_vim_obj('dvs.PVLan', pvlanId=vlan[1])), - vendorSpecificConfig=ipam_setting) - _wait_for_task(vcenter._vs.AddDVPortgroup_Task([spec])) - pg = vcenter._find_obj(vcenter._dc, 'dvs.PortGroup', {'name' : name}) - - if v6_network: - ip_pool = _vim_obj('ip.Pool', name='ip-pool-for-'+name, - ipv4Config=_vim_obj('ip.Config', - subnetAddress = str(v4_network.sub_network), - netmask = str(v4_network.netmask), - range = v4_network.range, - ipPoolEnabled = dhcp), - ipv6Config=_vim_obj('ip.Config', - subnetAddress = str(v6_network.sub_network), - netmask = str(v6_network.netmask), - range = v6_network.range, - ipPoolEnabled = dhcp), - networkAssociation = [_vim_obj('ip.Association', - network=pg, - networkName=name)]) - else: - ip_pool = _vim_obj('ip.Pool', name='ip-pool-for-'+name, - ipv4Config=_vim_obj('ip.Config', - subnetAddress = str(v4_network.sub_network), - netmask = str(v4_network.netmask), - range = v4_network.range, - ipPoolEnabled = dhcp), - networkAssociation = [_vim_obj('ip.Association', - network=pg, - networkName=name)]) - vn.ip_pool_id = vcenter._content.ipPoolManager.CreateIpPool(vcenter._dc, ip_pool) - time.sleep(2) - return vn - - @staticmethod - def create_from_vnobj(vcenter, vn_obj): - vn = VcenterVN() - vn.vcenter = vcenter - vn.name = vn_obj.name - vn.uuid = None - vlan = vn_obj.config.defaultPortConfig.vlan.pvlanId - vn.vlan = (vlan - 1, vlan) - vn.ip_pool_id = vn_obj.summary.ipPoolId - pool = vcenter._find_obj(vcenter._dc, 'ip.Pool', {'id':vn.ip_pool_id}) - vn.prefix = IPNetwork(pool.ipv4Config.subnetAddress+'/'+pool.ipv4Config.netmask) - ip_list = list(vn.prefix.iter_hosts()) - return vn - - @retry(tries=30, delay=5) - def _get_vnc_vn_id(self, fq_name): - try: - obj = self.vcenter._vnc.virtual_network_read(fq_name) - self.uuid = obj.uuid - return True - except: - return False - - def get(self): - fq_name = [u'default-domain',u'vCenter',unicode(self.name)] - if not self._get_vnc_vn_id(fq_name): - raise Exception("Unable to query VN %s from vnc" % self.name) - -class VcenterVM: - - @staticmethod - def create_from_vmobj(vcenter, vmobj): - vm = VcenterVM() - vm.vcenter = vcenter - vm.name = vmobj.name - vm.host = vmobj.runtime.host.name - vm.nets = [net.name for net in vmobj.network] - vm.get(vmobj) - return vm - - @staticmethod - def create_in_vcenter(vcenter, name, template, networks, host): - vm = VcenterVM() - vm.vcenter = vcenter - vm.name = name - vm.host = host.name - vm.nets = [net.name for net in networks] - - intfs = [] - switch_id = vcenter._vs.uuid - for net in networks: - spec = _vim_obj('dev.VDSpec', operation=_vimtype_dict['dev.Ops.add'], - device=_vim_obj('dev.E1000', - addressType='Generated', - connectable=_vim_obj('dev.ConnectInfo', - startConnected=True, - allowGuestControl=True), - backing=_vim_obj('dev.DVPBackingInfo', - port = _vim_obj('dvs.PortConn', - switchUuid=switch_id, - portgroupKey=net.key)))) - intfs.append(spec) - - spec = _vim_obj('vm.Clone', - location=_vim_obj('vm.Reloc', - datastore=host.datastore[0], - pool=host.parent.resourcePool), - config=_vim_obj('vm.Config', deviceChange=intfs), - powerOn=True) - _wait_for_task(template.Clone(folder=vcenter._dc.vmFolder, name=vm.name, - spec=spec)) - vmobj = vcenter._find_obj(vcenter._dc, 'vm', {'name' : vm.name}) - vm.get(vmobj) - return vm - - def get(self, vm=None): - if not vm: - vm = self.vcenter._find_obj(self.vcenter._dc, 'vm', {'name' : self.name}) - self.host = vm.runtime.host.name - self.id = vm.summary.config.instanceUuid - self.macs = {} - self.ips = {} - for intf in vm.guest.net: - self.macs[intf.network] = intf.macAddress - self.ips[intf.network] = intf.ipAddress[0] - return len(self.ips) == len(self.nets) - - def reboot(self, r): - vm = self.vcenter._find_obj(self.vcenter._dc, 'vm', {'name' : self.name}) - assert r != 'SOFT', 'Soft reboot is not supported, use VMFixture.run_cmd_on_vm' - _wait_for_task(vm.ResetVM_Task()) - - def add_networks(self, nets): - vm = self.vcenter._find_obj(self.vcenter._dc, 'vm', {'name' : self.name}) - intfs = [] - for net in nets: - spec = _vim_obj('dev.VDSpec', operation=_vimtype_dict['dev.Ops.add'], - device=_vim_obj('dev.E1000', - addressType='Generated', - connectable=_vim_obj('dev.ConnectInfo', - startConnected=True, - allowGuestControl=True), - backing=_vim_obj('dev.DVPBackingInfo', - port = _vim_obj('dvs.PortConn', - switchUuid=self.vcenter._vs.uuid, - portgroupKey=net.key)))) - intfs.append(spec) - - cfg = _vim_obj('vm.Config', deviceChange=intfs) - _wait_for_task(vm.ReconfigVM_Task(cfg)) - - def delete_networks(self, nets): - vm = self.vcenter._find_obj(self.vcenter._dc, 'vm', {'name' : self.name}) - intfs = [] - for net in nets: - for dev in vm.config.hardware.device: - if isinstance(dev, _vimtype_dict['dev.E1000']) and dev.backing.port.portgroupKey == net.key: - spec = _vim_obj('dev.VDSpec', operation=_vimtype_dict['dev.Ops.remove'], - device=dev) - intfs.append(spec) - - cfg = _vim_obj('vm.Config', deviceChange=intfs) - _wait_for_task(vm.ReconfigVM_Task(cfg)) - - def change_networks(self,net): - vm = self.vcenter._find_obj(self.vcenter._dc, 'vm', {'name' : self.name}) - device_change = [] - try: - for dev in vm.config.hardware.device: - if isinstance(dev, _vimtype_dict['dev.E1000']): - nicspec = _vimtype_dict['dev.VDSpec']() - nicspec.operation = _vimtype_dict['dev.Ops.edit'] - nicspec.device = dev - nicspec.device.wakeOnLanEnabled = True - dvs_port_connection = _vimtype_dict['dvs.PortConn']() - dvs_port_connection.portgroupKey = net.key - dvs_port_connection.switchUuid= self.vcenter._vs.uuid - nicspec.device.backing = _vimtype_dict['dev.DVPBackingInfo']() - nicspec.device.backing.port = dvs_port_connection - - nicspec.device.connectable = _vimtype_dict['dev.ConnectInfo']() - nicspec.device.connectable.startConnected = True - nicspec.device.connectable.allowGuestControl = True - device_change.append(nicspec) - - break - cfg = _vim_obj('vm.Config', deviceChange=device_change) - _wait_for_task(vm.ReconfigVM_Task(cfg)) - vmobj = self.vcenter._find_obj(self.vcenter._dc, 'vm', {'name' : vm.name}) - self.get(vmobj) - #return vm - except vmodl.MethodFault as error: - self._log.debug("Caught vmodl fault : %s" %error.msg) - - @retry(tries=30, delay=5) - def assign_ip(self, intf, ip, gw, mask='255.255.255.0'): - cmd_path = '/usr/bin/sudo' - user = 'ubuntu' - password = 'ubuntu' - try: - args = 'killall -9 dhclient3' - self.vcenter.run_a_command(self.id,user,password,cmd_path,args) - args = 'ifconfig %s %s netmask %s' % (intf, ip, mask) - self.vcenter.run_a_command(self.id,user,password,cmd_path,args) - args = 'route add default gw %s' % (gw) - self.vcenter.run_a_command(self.id,user,password,cmd_path,args) - args = 'ifconfig %s up' % (intf) - self.vcenter.run_a_command(self.id,user,password,cmd_path,args) - except Exception: - return False - time.sleep(60) - return True - - def bring_up_interfaces(self, vcenter ,vm , intfs): - time.sleep(20) - cmd_path = '/usr/bin/sudo' - user = 'ubuntu' - password = 'ubuntu' - vm_id = vm.summary.config.instanceUuid - i = 1 - while i < len(intfs): - intf = 'eth' + str(i) - args = 'ifconfig %s up'%(intf) - try: - vcenter.run_a_command(vm_id,user,password,cmd_path,args) - except Exception as e: - print e - args = 'dhclient %s'%(intf) - try: - vcenter.run_a_command(vm_id,user,password,cmd_path,args) - except Exception as e: - print e - i += 1 - time.sleep(20) - -class VcenterAuth(OrchestratorAuth): - - def __init__(self, user, passwd, project_name, inputs, domain='default-domain'): - self.inputs = inputs - self.user = user - self.passwd = passwd - self.domain = domain - self.project_name = project_name - self.vnc = VncApi(username=user, password=passwd, - tenant_name=project_name, - api_server_host=self.inputs.cfgm_ip, - api_server_port=self.inputs.api_server_port) - - def get_project_id(self, project_name=None): - if not project_name: - project_name = self.project_name - fq_name = [unicode(self.domain), unicode(project_name)] - obj = self.vnc.project_read(fq_name=fq_name) - if obj: - return obj.get_uuid() - return None - - def reauth(self): - raise Exception('Unimplemented interface') - - def create_project(self, name): - raise Exception('Unimplemented interface') - - def delete_project(self, name): - raise Exception('Unimplemented interface') - - def create_user(self, user, passwd): - raise Exception('Unimplemented interface') - - def delete_user(self, user): - raise Exception('Unimplemented interface') - - def add_user_to_project(self, user, project): - raise Exception('Unimplemented interface') diff --git a/fixtures/vdns_fixture.py b/fixtures/vdns_fixture.py deleted file mode 100755 index 3df8d31da..000000000 --- a/fixtures/vdns_fixture.py +++ /dev/null @@ -1,489 +0,0 @@ -import fixtures -from vnc_api.vnc_api import * -from vnc_api.gen.cfixture import ContrailFixture -from vnc_api.gen.resource_test import VirtualDnsRecordTestFixtureGen - -from tcutils.util import retry - - -class VdnsFixture(fixtures.Fixture): - - def __init__(self, inputs=None, connections=None, vdns_name=None, - dns_data=None, dns_domain_name='juniper.net', - ttl=100, record_order='random', uuid=None): - self.vnc_lib = connections.get_vnc_lib_h() - self.api_s_inspect = connections.api_server_inspect - self.cn_inspect = connections.cn_inspect - self.inputs = connections.inputs - self.logger = connections.logger - self.connections = connections - self.dns_domain_name = dns_domain_name - self.ttl = ttl - self.record_order = record_order - self.vdns_name = vdns_name - self.dns_data = dns_data - self.uuid = uuid - self.parent_type = 'domain' - self.fq_name = [self.inputs.domain_name, self.vdns_name] - self.verify_is_run = False - self.obj = None - self.already_present = True - - def read(self): - if self.uuid: - self.obj = self.vnc_lib.virtual_DNS_read(id = self.uuid) - self.vdns_name = self.obj.name - self.fq_name = self.obj.get_fq_name() - - def setUp(self): - super(VdnsFixture, self).setUp() - self.create() - - def create(self): - try: - self.uuid = self.uuid or self.vnc_lib.virtual_DNS_read( - fq_name=self.fq_name).uuid - self.read() - self.logger.debug("vDNS: %s(%s) already present not creating" % - (self.vdns_name, self.uuid)) - except NoIdError: - self.already_present = False - self.logger.debug("Creating VDNS : %s", self.vdns_name) - vdns_data = self.dns_data or self.get_vdns_data() - vdns_obj = VirtualDns(self.vdns_name, virtual_DNS_data=vdns_data, - parent_type=self.parent_type, - fq_name=self.fq_name) - self.uuid = self.vnc_lib.virtual_DNS_create(vdns_obj) - self.obj = self.vnc_lib.virtual_DNS_read(id = self.uuid) - - def get_vdns_data(self): - return VirtualDnsType(domain_name=self.dns_domain_name, - dynamic_records_from_client=True, - default_ttl_seconds=self.ttl, - record_order=self.record_order) - - def get_uuid(self): - return self.uuid - - def get_obj(self): - return self.obj - - def get_fq_name(self): - return self.fq_name - - @property - def vdns_fq_name(self): - return ':'.join(self.get_fq_name()) - - def cleanUp(self): - super(VdnsFixture, self).cleanUp() - self.delete() - - def delete(self, verify=False): - if not self.already_present: - self.logger.debug("Deleting VDNS Entry: %s", self.vdns_name) - self.vnc_lib.virtual_DNS_delete(id = self.uuid) - if self.verify_is_run or verify: - result, msg = self.verify_on_cleanup() - assert result, msg - - def verify_on_setup(self): - retval = True - errmsg = '' - self.logger.info("in verify_on_setup") - try: - vdns = self.vnc_lib.virtual_DNS_read(fq_name=self.fq_name) - self.logger.debug("VDNS: %s created succesfully", self.fq_name) - except NoIdError: - errmsg = errmsg + \ - "\n VDNS: %s not created." % self.vdns_fq_name - self.logger.warn(errmsg) - return False, errmsg - self.logger.info("Verify VDNS entry is shown in control node") - retval1 = self.verify_vdns_in_control_node() - if not retval1: - retval = True and False - errmsg = errmsg + "\n VDNS server " + \ - self.vdns_fq_name + \ - " info not found not found in control node" - self.logger.error("VDNS info not found not found in control node") - self.logger.info("Verify VDNS entry is shown in the API server") - retval2 = self.verify_vdns_in_api_server() - if not retval2: - retval = True and False - errmsg = errmsg + "\n VDNS server " + \ - self.vdns_fq_name + \ - " info not found not found in API server" - self.logger.error("VDNS info not found not found in API server") - self.verify_is_run = True - if not retval: - return False, errmsg - self.logger.info("out of verify_on_setup") - return True, None - - @retry(delay=3, tries=15) - def verify_vdns_in_control_node(self): - ''' verify VDNS data in control node''' - result = True - msg = '' - for cn in self.inputs.bgp_ips: - try: - cn_s_dns = self.cn_inspect[cn].get_cn_vdns( - vdns=str(self.obj.name)) - if self.vdns_fq_name not in cn_s_dns['node_name']: - result = result and False - msg = msg + \ - '\nvdns name info not matching with control name data' - act_cn_vdns_data = cn_s_dns['obj_info'][ - 'data']['virtual-DNS-data'] - print act_cn_vdns_data - exp_vdns_data = self.obj.get_virtual_DNS_data() - if act_cn_vdns_data: - if exp_vdns_data.__dict__['domain_name'] != act_cn_vdns_data['domain-name']: - result = result and False - msg = msg + \ - '\nvdns domain name is not matching with control node data' - if str(exp_vdns_data.__dict__['default_ttl_seconds']) != act_cn_vdns_data['default-ttl-seconds']: - result = result and False - msg = msg + \ - '\nvdns ttl value is not matching with control node data' - if exp_vdns_data.__dict__['record_order'] != act_cn_vdns_data['record-order']: - result = result and False - msg = msg + \ - '\nvdns record order value is not matching with control node data' - if exp_vdns_data.__dict__['next_virtual_DNS'] != act_cn_vdns_data['next-virtual-DNS']: - result = result and False - msg = msg + \ - '\nvdns next virtual DNS data is not matching with control node data' - except Exception as e: - # Return false if we get an key error and for retry - return False - if msg != '': - self.logger.info( - "VDNS info is not matching with control node data\n %s:", msg) - return result - # end verify_dns_in_control_node - - @retry(delay=3, tries=5) - def verify_vdns_in_api_server(self): - ''' verify VDNS data in API server ''' - result = True - api_s_dns = self.api_s_inspect.get_cs_dns( - vdns_name=str(self.obj.name), refresh=True) - print api_s_dns - msg = '' - try: - if self.fq_name != api_s_dns['virtual-DNS']['fq_name']: - result = result and False - msg = msg + \ - '\n fq name data is not matching with api server data' - if self.obj.uuid != api_s_dns['virtual-DNS']['uuid']: - result = result and False - msg = msg + '\n UUID is is not matching with api server data' - - api_vdns_data = api_s_dns['virtual-DNS']['virtual_DNS_data'] - exp_vdns_data = self.obj.get_virtual_DNS_data() - for data in api_vdns_data: - if str(exp_vdns_data.__dict__[data]) != str(api_vdns_data.get(data)): - result = result and False - msg = msg + '\nvdns ' + data + \ - ' is not matching with api server data' - except Exception as e: - # Return false if we get an key error and for retry - return False - - if msg != '': - self.logger.info( - "VDNS info is not matching with API server\n %s:", msg) - return result - # end verify_vdns_in_api_server - - @retry(delay=2, tries=5) - def verify_vdns_not_in_api_server(self): - """Validate VDNS information in API-Server.""" - if self.api_s_inspect.get_cs_dns(vdns_name=str(self.obj.name), refresh=True) is not None: - errmsg = "VDNS information %s still found in the API Server" % self.obj.name - self.logger.warn(errmsg) - return False - else: - self.logger.info( - "VDNS information %s removed from the API Server", self.obj.name) - return True - - @retry(delay=2, tries=25) - def verify_vdns_not_in_control_node(self): - for cn in self.inputs.bgp_ips: - cn_s_dns = self.cn_inspect[cn].get_cn_vdns( - vdns=str(self.obj.name)) - - if cn_s_dns: - errmsg = "VDNS information %s still found in the Control node" % self.obj.name - self.logger.warn(errmsg) - return False - else: - self.logger.info( - "VDNS information %s removed in the Control node", self.obj.name) - return True - - def verify_on_cleanup(self): - retval = True - errmsg = '' - try: - vdns = self.vnc_lib.virtual_DNS_read(fq_name=self.fq_name) - errmsg = errmsg + "VDNS info " + \ - self.fq_name + 'still not removed' - self.logger.warn(errmsg) - return False, errmsg - except NoIdError: - self.logger.info("VDNS info: %s deleted successfully." % - self.fq_name) - status = self.verify_vdns_not_in_api_server() - if not status: - retval = retval and False - errmsg = errmsg + "\nVdns info is not deleted from API Server" - status = self.verify_vdns_not_in_control_node() - if not status: - retval = retval and False - errmsg = errmsg + "\n VDNS info is not deleted from control node" - if not retval: - return False, errmsg - return True, errmsg - - -class VdnsRecordFixture(fixtures.Fixture): - - def __init__(self, inputs=None, connections=None, - virtual_DNS_record_name=None, vdns_fqname=None, - virtual_DNS_record_data=None, uuid=None): - self.vnc_lib = connections.get_vnc_lib_h() - self.api_s_inspect = connections.api_server_inspect - self.cn_inspect = connections.cn_inspect - self.inputs = connections.inputs - self.logger = connections.logger - self.vdns_record_name = virtual_DNS_record_name - self.vdns_record_data = virtual_DNS_record_data - self.parent_fqname = vdns_fqname - self.fq_name = self.parent_fqname + [self.vdns_record_name] - self.uuid = uuid - self.obj = None - self.verify_is_run = False - self.already_present = True - - def read(self): - if self.uuid: - self.obj = self.vnc_lib.virtual_DNS_record_read(id = self.uuid) - self.vdns_record_name = self.obj.name - self.fq_name = self.obj.get_fq_name() - self.logger.debug("Fetched vdns Record Data: %s(%s)"%( - self.fq_name, self.uuid)) - - def setUp(self): - super(VdnsRecordFixture, self).setUp() - self.create() - - def create(self): - try: - self.uuid = self.uuid or self.vnc_lib.virtual_DNS_record_read( - fq_name=self.fq_name).uuid - self.read() - except NoIdError: - self.already_present = False - self.logger.debug("Creating VDNS record data : %s", - self.vdns_record_name) - obj = VirtualDnsRecord(self.vdns_record_name, - parent_type='virtual-DNS', - fq_name=self.fq_name, - virtual_DNS_record_data=self.vdns_record_data) - self.uuid = self.vnc_lib.virtual_DNS_record_create(obj) - self.obj = self.vnc_lib.virtual_DNS_record_read(id = self.uuid) - - @property - def vdns_rec_fq_name(self): - return ':'.join(self.fq_name) - - @property - def vdns_name(self): - return self.fq_name[:-1][-1] - - def cleanUp(self): - super(VdnsRecordFixture, self).cleanUp() - self.delete() - - def delete(self, verify=False): - if not self.already_present: - self.logger.debug("Deleting VDNS record data: %s", - self.vdns_record_name) - self.vnc_lib.virtual_DNS_record_delete(id=self.uuid) - if self.verify_is_run or verify: - result, msg = self.verify_on_cleanup() - assert result, msg - - def verify_on_setup(self): - retval = True - errmsg = '' - self.logger.info("In verify_on_setup") - self.verify_is_run = True - try: - vdns_rec = self.vnc_lib.virtual_DNS_record_read( - fq_name=self.fq_name) - self.logger.debug( - "VDNS record: %s created succesfully", self.fq_name) - except NoIdError: - errmsg = errmsg + \ - "\n VDNS record: %s not created." % self.vdns_rec_fq_name - self.logger.warn(errmsg) - return False, errmsg - - self.logger.info("Verify VDNS record is shown in the API server") - ret_val1 = self.verify_vdns_rec_in_api_server() - if not ret_val1: - retval = True and False - errmsg = errmsg + "\n VDNS record " + \ - self.vdns_rec_fq_name + \ - " is info not found in the control node\n" - self.logger.error( - "VDNS record info not found not found in control node") - self.logger.info("Verify VDNS record is shown in the control node") - ret_val2 = self.verify_vdns_rec_in_cn_node() - if not ret_val2: - retval = True and False - errmsg = errmsg + "\n VDNS record " + \ - self.vdns_rec_fq_name + \ - " is info not found in the control node\n" - self.logger.error( - "VDNS record info not found not found in the control node") - - return retval, errmsg - # end of verify_on_setup - - def verify_on_cleanup(self): - retval = True - errmsg = '' - try: - vdns = self.vnc_lib.virtual_DNS_record_read( - fq_name=self.vdns_rec_fq_name) - errmsg = errmsg + 'VDNS record ' + \ - self.vdns_rec_fq_name + ' info still not removed' - self.logger.warn(errmsg) - return False, errmsg - except NoIdError: - self.logger.info( - "VDNS record info: %s deleted successfully.", self.vdns_rec_fq_name) - return retval, errmsg - - status = self.verify_vdns_rec_not_in_api_server() - if not status: - retval = retval and False - errmsg = errmsg + \ - "\nVDNS record info is not deleted from API server" - status = self.verify_vdns_rec_not_in_control_node() - if not status: - retval = retval and False - errmsg = errmsg + \ - "\nVDNS record info is not deleted from control node" - if not retval: - return False, errmsg - return True, errmsg - # end of verify_on_cleanup - - @retry(delay=5, tries=5) - def verify_vdns_rec_in_cn_node(self): - ''' verify VDNS record data in API in Control node''' - result = True - msg = '' - for cn in self.inputs.bgp_ips: - try: - cn_s_dns = self.cn_inspect[cn].get_cn_vdns_rec( - vdns=self.vdns_name, rec_name=self.vdns_record_name) - if self.vdns_rec_fq_name not in cn_s_dns['node_name']: - result = result and False - msg = msg + \ - '\nvdns name info not matching with control name data' - act_cn_vdns_rec_data = cn_s_dns['obj_info'][ - 'data']['virtual-DNS-record-data'] - exp_vdns_rec_data = self.obj.get_virtual_DNS_record_data() - if act_cn_vdns_rec_data: - if exp_vdns_rec_data.__dict__['record_name'] != act_cn_vdns_rec_data['record-name']: - result = result and False - msg = msg + \ - '\nvdns record name is not matching with control node data' - if str(exp_vdns_rec_data.__dict__['record_ttl_seconds']) != act_cn_vdns_rec_data['record-ttl-seconds']: - result = result and False - msg = msg + \ - '\nvdns record ttl value is not matching with control node data' - if exp_vdns_rec_data.__dict__['record_type'] != act_cn_vdns_rec_data['record-type']: - result = result and False - msg = msg + \ - '\nvdns record type value is not matching with control node data' - if exp_vdns_rec_data.__dict__['record_data'] != act_cn_vdns_rec_data['record-data']: - result = result and False - msg = msg + \ - '\nvdns record data is not matching with control node data' - except Exception as e: - # Return false if we get an key error and for retry - return False - if msg != '': - self.logger.info( - "VDNS record info is not matching with control node data\n %s:", msg) - return result - # end of verify_vdns_rec_in_cn_node - - @retry(delay=5, tries=5) - def verify_vdns_rec_in_api_server(self): - ''' verify VDNS record data in API server ''' - result = True - api_s_dns_rec = self.api_s_inspect.get_cs_dns_rec( - rec_name=self.vdns_record_name, vdns_name=self.vdns_name, refresh=True) - msg = '' - try: - if self.fq_name != api_s_dns_rec['virtual-DNS-record']['fq_name']: - result = result and False - msg = msg + \ - '\n fq name data is not matching with DNS record data' - if self.uuid != api_s_dns_rec['virtual-DNS-record']['uuid']: - result = result and False - msg = msg + '\n UUID is is not matching with DNS record data' - - api_vdns_rec_data = api_s_dns_rec[ - 'virtual-DNS-record']['virtual_DNS_record_data'] - exp_vdns_rec_data = self.obj.get_virtual_DNS_record_data() - for data in api_vdns_rec_data: - if str(exp_vdns_rec_data.__dict__[data]) != str(api_vdns_rec_data.get(data)): - result = result and False - msg = msg + '\nvdns ' + data + \ - ' is not matching with api server DNS record data' - except Exception as e: - # Return false if we get an key error and for retry - return False - if msg != '': - self.logger.info( - "VDNS record info is not matching with API Server data\n %s:", msg) - return result - # end of verify_vdns_rec_in_api_server - - @retry(delay=2, tries=5) - def verify_vdns_rec_not_in_api_server(self): - '''Validate VDNS record data not in API-Server.''' - if self.api_s_inspect.get_cs_dns_rec(rec_name=self.vdns_record_name, vdns_name=self.vdns_name, refresh=True) is not None: - errmsg = "VDNS record information %s still found in the API Server" % self.vdns_record_name - self.logger.warn(errmsg) - return False - else: - self.logger.info( - "VDNS record information %s removed from the API Server", self.vdns_record_name) - return True - - @retry(delay=2, tries=5) - def verify_vdns_rec_not_in_control_node(self): - for cn in self.inputs.bgp_ips: - cn_s_dns = self.cn_inspect[cn].get_cn_vdns_rec(vdns=self.vdns_name, - rec_name=self.vdns_record_name) - if cn_s_dns: - errmsg = "VDNS record information %s still found in the"\ - " Control node" % self.vdns_record_name - self.logger.warn(errmsg) - return False - else: - self.logger.info("VDNS record information %s removed in " - " the Control node"%self.vdns_record_name) - return True diff --git a/fixtures/vm_test.py b/fixtures/vm_test.py deleted file mode 100644 index dafec4859..000000000 --- a/fixtures/vm_test.py +++ /dev/null @@ -1,2562 +0,0 @@ -import shutil -import tempfile -import fixtures -import re -from ipam_test import * -from vn_test import * -from tcutils.util import * -import time -import traceback -from fabric.api import env -from fabric.api import run -from fabric.state import output -from fabric.state import connections as fab_connections -from fabric.operations import get, put -from fabric.context_managers import settings, hide -import socket -import paramiko -from contrail_fixtures import * -import threading -import shlex -from subprocess import Popen, PIPE - -from collections import defaultdict -from tcutils.pkgs.install import PkgHost, build_and_install -from security_group import get_secgrp_id_from_name, list_sg_rules -from tcutils.tcpdump_utils import start_tcpdump_for_intf,\ - stop_tcpdump_for_intf - -env.disable_known_hosts = True -try: - from webui_test import * -except ImportError: - pass -#output.debug= True - -#@contrail_fix_ext () - - -class VMFixture(fixtures.Fixture): - - ''' - Fixture to handle creation, verification and deletion of VM. - image_name : One of cirros-0.3.0-x86_64-uec, redmine-fe, redmine-be, ubuntu - - Deletion of the VM upon exit can be disabled by setting fixtureCleanup= 'no' in params file. - If a VM with the vm_name is already present, it is not deleted upon exit. To forcefully clean them up, set fixtureCleanup= 'force' - Vn object can be a single VN object(vn_obj) or a list of VN objects(vn_objs) but not both - ''' - - def __init__(self, connections, vm_name=None, vn_obj=None, - vn_objs=[], project_name=None, - image_name='ubuntu', subnets=[], - flavor=None, - node_name=None, sg_ids=[], count=1, userdata=None, - port_ids=[], fixed_ips=[], zone=None, vn_ids=[], uuid=None): - self.connections = connections - self.inputs = self.connections.inputs - self.logger = self.connections.logger - self.api_s_inspects = self.connections.api_server_inspects - self.api_s_inspect = self.connections.api_server_inspect - self.agent_inspect = self.connections.agent_inspect - self.cn_inspect = self.connections.cn_inspect - self.ops_inspect = self.connections.ops_inspects - self.orch = self.connections.orch - self.quantum_h = self.connections.quantum_h - self.vnc_lib_h = self.connections.get_vnc_lib_h() - self.nova_h = self.connections.nova_h - self.node_name = node_name - self.zone = zone - self.sg_ids = sg_ids - self.count = count - self.port_ids = port_ids - self.fixed_ips = fixed_ips - self.subnets = subnets - if os.environ.has_key('ci_image'): - image_name = os.environ.get('ci_image') - self.image_name = image_name - self.flavor = flavor - self.project_name = project_name or self.inputs.stack_tenant - self.vm_name = vm_name or get_random_name(self.project_name) - self.vm_id = uuid - self.vm_obj = None - self.vm_ips = list() - self.vn_objs = list((vn_obj and [vn_obj]) or vn_objs or - [self.orch.get_vn_obj_from_id(x) for x in vn_ids]) - if os.environ.has_key('ci_image'): - cidrs = [] - for vn_obj in self.vn_objs: - if vn_obj['network'].has_key('contrail:subnet_ipam'): - cidrs.extend(list(map(lambda obj: obj['subnet_cidr'], - vn_obj['network']['contrail:subnet_ipam']))) - if get_af_from_cidrs(cidrs) != 'v4': - raise v4OnlyTestException('Disabling v6 tests for CI') - self.vn_names = [self.orch.get_vn_name(x) for x in self.vn_objs] - self.vn_fq_names = [':'.join(self.vnc_lib_h.id_to_fq_name(self.orch.get_vn_id(x))) - for x in self.vn_objs] - self.vn_ids = vn_ids - if len(self.vn_objs) == 1: - self.vn_name = self.vn_names[0] - self.vn_fq_name = self.vn_fq_names[0] - self.already_present = False - self.verify_is_run = False - self.analytics_obj = self.connections.analytics_obj - self.agent_vrf_name = {} - self.agent_vrf_id = {} - self.agent_path = {} - self.agent_l2_path = {} - self.tap_intf = {} - self.mac_addr = {} - self.agent_label = {} - self.agent_l2_label = {} - self.agent_vxlan_id = {} - self.local_ips = {} - self.cs_vmi_obj = {} - self.vm_launch_flag = True - self.vm_in_api_flag = True - self.vm_in_agent_flag = True - self.vm_in_cn_flag = True - self.vm_in_op_flag = True - self.verify_vm_not_in_setup = True - self.verify_vm_not_in_api_server_flag = True - self.verify_vm_not_in_agent_flag = True - self.verify_vm_not_in_control_nodes_flag = True - self.verify_vm_not_in_nova_flag = True - self.vm_flows_removed_flag = True - self.printlock = threading.Lock() - self.verify_vm_flag = True - self.userdata = userdata - self.vm_username = None - self.vm_password = None - if self.inputs.verify_thru_gui(): - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.webui = WebuiTest(self.connections, self.inputs) - self._vm_interface = {} - - # end __init__ - - def read(self): - if self.vm_id: - self.vm_obj = self.orch.get_vm_by_id(vm_id=self.vm_id) - if not self.vm_obj: - raise Exception('VM with id %s not found'%self.vm_id) - self.vm_objs = [self.vm_obj] - self.vm_name = self.vm_obj.name - self.vn_names = self.orch.get_networks_of_vm(self.vm_obj) - self.vn_objs = [self.orch.get_vn_obj_if_present(x) - for x in self.vn_names] - self.vn_ids = [self.orch.get_vn_id(x) for x in self.vn_objs] - self.vn_fq_names = [':'.join(self.vnc_lib_h.id_to_fq_name(x)) - for x in self.vn_ids] - self.vn_name = self.vn_names[0] - self.vn_fq_name = self.vn_fq_names[0] - self.vm_ip_dict = self.get_vm_ip_dict() - self.vm_ips = self.get_vm_ips() - - def setUp(self): - super(VMFixture, self).setUp() - self.create() - - def create(self): - if self.vm_id: - return self.read() - self.project_fixture = self.useFixture( - ProjectFixture(vnc_lib_h=self.vnc_lib_h, - project_name=self.project_name, - connections=self.connections)) - self.vn_ids = [self.orch.get_vn_id(x) for x in self.vn_objs] - self.vm_obj = self.orch.get_vm_if_present(self.vm_name, - project_id=self.project_fixture.uuid) - self.vm_objs = self.orch.get_vm_list(name_pattern=self.vm_name, - project_id=self.project_fixture.uuid) - if self.vm_obj: - self.vm_id = self.vm_obj.id - self.already_present = True - with self.printlock: - self.logger.debug('VM %s already present, not creating it' - % (self.vm_name)) - else: - if self.inputs.is_gui_based_config(): - self.webui.create_vm(self) - else: - objs = self.orch.create_vm( - project_uuid=self.project_fixture.uuid, - image_name=self.image_name, - flavor=self.flavor, - vm_name=self.vm_name, - vn_objs=self.vn_objs, - node_name=self.node_name, - zone=self.zone, - sg_ids=self.sg_ids, - count=self.count, - userdata=self.userdata, - port_ids=self.port_ids, - fixed_ips=self.fixed_ips) -# time.sleep(5) - self.vm_obj = objs[0] - self.vm_objs = objs - self.vm_id = self.vm_objs[0].id - self.vm_obj.get() - self.zone = getattr(self.vm_obj,'OS-EXT-AZ:availability_zone', None) - self.image_name = self.orch.get_image_name_for_zone( - image_name=self.image_name, - zone=self.zone) - (self.vm_username, self.vm_password) = self.orch.get_image_account( - self.image_name) - - # end setUp - - def get_uuid(self): - return self.vm_id - - def get_fq_name(self): - return self.vm_name - - def get_name(self): - return self.vm_name - - def get_vm_ips(self, vn_fq_name=None, af=None): - if not af: - af = self.inputs.get_af() - af = ['v4', 'v6'] if 'dual' in af else af - if vn_fq_name: - vm_ips = self.get_vm_ip_dict()[vn_fq_name] - else: - if not getattr(self, 'vm_ips', None): - for vm_obj in self.vm_objs: - for vn_name in self.vn_names: - for ip in self.orch.get_vm_ip(vm_obj, vn_name): - if self.hack_for_v6(ip): - continue - self.vm_ips.append(ip) - vm_ips = self.vm_ips - return [ip for ip in vm_ips if get_af_type(ip) in af] - - def hack_for_v6(self, ip): - if 'v6' in self.inputs.get_af() and not is_v6(ip): - return True - return False - - @property - def vm_ip(self): - return self.vm_ips[0] if self.vm_ips else None - - def verify_vm_launched(self): - self.vm_launch_flag = True - for vm_obj in self.vm_objs: - if not self.orch.get_vm_detail(vm_obj): - self.logger.error('VM %s is not launched yet' %vm_obj.id) - self.vm_launch_flag = False - return False - self.logger.info("VM %s ID is %s" % (vm_obj.name, vm_obj.id)) - self.logger.info('VM %s launched on Node %s' - % (vm_obj.name, self.get_host_of_vm(vm_obj))) - self.vm_ips = self.get_vm_ips() - if not self.vm_ips: - self.logger.error('VM didnt seem to have got any IP') - self.vm_launch_flag = False - return False - self.vm_launch_flag = True - return True - # end verify_vm_launched - - @property - def vm_node_ip(self): - if not getattr(self, '_vm_node_ip', None): - self._vm_node_ip = self.inputs.get_host_ip(self.get_host_of_vm()) - return self._vm_node_ip - - def get_host_of_vm(self, vm_obj=None): - vm_obj = vm_obj or self.vm_obj - attr = '_host_'+vm_obj.name - if not getattr(self, attr, None): - setattr(self, attr, self.orch.get_host_of_vm(vm_obj)) - return getattr(self, attr, None) - - @property - def vm_node_data_ip(self): - if not getattr(self, '_vm_data_node_ip', None): - self._vm_node_data_ip = self.inputs.get_host_data_ip(self.get_host_of_vm()) - return self._vm_node_data_ip - - def get_compute_host(self): - return self.vm_node_data_ip - - def set_vm_creds(self, username, password): - self.vm_username = username - self.vm_password = password - - def get_vm_username(self): - return self.vm_username - - def get_vm_password(self): - return self.vm_password - - @retry(delay=1, tries=5) - def get_vm_obj_from_api_server(self, cfgm_ip=None, refresh=False): - cfgm_ip = cfgm_ip or self.inputs.cfgm_ip - if not getattr(self, 'cs_vm_obj', None): - self.cs_vm_obj = dict() - if not self.cs_vm_obj.get(cfgm_ip) or refresh: - vm_obj = self.api_s_inspects[cfgm_ip].get_cs_vm(self.vm_id, refresh) - self.cs_vm_obj[cfgm_ip] = vm_obj - ret = True if self.cs_vm_obj[cfgm_ip] else False - return (ret, self.cs_vm_obj[cfgm_ip]) - - def get_vm_objs(self): - for cfgm_ip in self.inputs.cfgm_ips: - vm_obj = self.get_vm_obj_from_api_server(cfgm_ip)[1] - if not vm_obj: - return None - return self.cs_vm_obj - - @retry(delay=1, tries=5) - def get_vmi_obj_from_api_server(self, cfgm_ip=None, refresh=False): - cfgm_ip = cfgm_ip or self.inputs.cfgm_ip - if not getattr(self, 'cs_vmi_objs', None): - self.cs_vmi_objs = dict() - if not self.cs_vmi_objs.get(cfgm_ip) or refresh: - vmi_obj = self.api_s_inspects[cfgm_ip].get_cs_vmi_of_vm( - self.vm_id, refresh) - self.cs_vmi_objs[cfgm_ip] = vmi_obj - ret = True if self.cs_vmi_objs[cfgm_ip] else False - return (ret, self.cs_vmi_objs[cfgm_ip]) - - def get_vmi_objs(self, refresh=False): - for cfgm_ip in self.inputs.cfgm_ips: - vmi_obj = self.get_vmi_obj_from_api_server(cfgm_ip, refresh)[1] - if not vmi_obj: - return None - return self.cs_vmi_objs - - @retry(delay=1, tries=5) - def get_iip_obj_from_api_server(self, cfgm_ip=None, refresh=False): - cfgm_ip = cfgm_ip or self.inputs.cfgm_ip - if not getattr(self, 'cs_instance_ip_objs', None): - self.cs_instance_ip_objs = dict() - if not self.cs_instance_ip_objs.get(cfgm_ip) or refresh: - iip_obj = self.api_s_inspects[cfgm_ip].get_cs_instance_ips_of_vm( - self.vm_id, refresh) - self.cs_instance_ip_objs[cfgm_ip] = iip_obj - ret = True if self.cs_instance_ip_objs[cfgm_ip] else False - return (ret, self.cs_instance_ip_objs[cfgm_ip]) - - def get_iip_objs(self, refresh=False): - for cfgm_ip in self.inputs.cfgm_ips: - iip_obj = self.get_iip_obj_from_api_server(cfgm_ip, refresh)[1] - if not iip_obj: - return None - return self.cs_instance_ip_objs - - def get_vm_ip_dict(self): - if not getattr(self, 'vm_ip_dict', None): - self.vm_ip_dict = defaultdict(list) - iip_objs = self.get_iip_obj_from_api_server()[1] - for iip_obj in iip_objs: - ip = iip_obj.ip - if self.hack_for_v6(ip): - continue - self.vm_ip_dict[iip_obj.vn_fq_name].append(ip) - return self.vm_ip_dict - - def add_security_group(self, secgrp): - self.orch.add_security_group(vm_id=self.vm_obj.id, sg_id=secgrp) - - def remove_security_group(self, secgrp): - self.orch.remove_security_group(vm_id=self.vm_obj.id, sg_id=secgrp) - - def verify_security_group(self, secgrp): - - result = False - errmsg = "Security group %s is not attached to the VM %s" % (secgrp, - self.vm_name) - cs_vmi_objs = self.get_vmi_obj_from_api_server(refresh=True)[1] - for cs_vmi_obj in cs_vmi_objs: - vmi = cs_vmi_obj['virtual-machine-interface'] - if vmi.has_key('security_group_refs'): - sec_grps = vmi['security_group_refs'] - for sec_grp in sec_grps: - if secgrp == sec_grp['to'][-1]: - self.logger.info( - "Security group %s is attached \ - to the VM %s", secgrp, self.vm_name) - result = True - - if not result: - self.logger.warn(errmsg) - return result, errmsg - - result, msg = self.verify_sec_grp_in_agent(secgrp) - if not result: - self.logger.warn(msg) - return result, msg - - result, msg = self.verify_sg_acls_in_agent(secgrp) - if not result: - self.logger.warn(msg) - return result, msg - - return result, None - - @retry(delay=2, tries=4) - def verify_sec_grp_in_agent(self, secgrp, domain='default-domain'): - # this method verifies sg secgrp attached to vm info in agent - secgrp_fq_name = ':'.join([domain, - self.project_name, - secgrp]) - - sg_id = get_secgrp_id_from_name( - self.connections, - secgrp_fq_name) - - inspect_h = self.agent_inspect[self.vm_node_ip] - sg_info = inspect_h.get_sg(sg_id) - if sg_info: - self.logger.info("Agent: Security group %s is attached to the VM %s", - secgrp, self.vm_name) - return True, None - - errmsg = "Agent: Security group %s is NOT attached to the VM %s" % (secgrp, - self.vm_name) - return False, errmsg - - @retry(delay=2, tries=4) - def verify_sg_acls_in_agent(self, secgrp, domain='default-domain'): - secgrp_fq_name = ':'.join([domain, - self.project_name, - secgrp]) - - sg_id = get_secgrp_id_from_name( - self.connections, - secgrp_fq_name) - - rules = self.orch.get_security_group_rules(sg_id) - inspect_h = self.agent_inspect[self.vm_node_ip] - acls_list = inspect_h.get_sg_acls_list(sg_id) - - errmsg = "sg acl rule not found in agent" - result = False - for rule in rules: - result = False - uuid = rule.get('id', None) - if not uuid: - uuid = rule['rule_uuid'] - for acl in acls_list: - for r in acl['entries']: - if r.has_key('uuid'): - if r['uuid'] == uuid: - result = True - break - if result: - break - if not result: - return result, errmsg - - return True, None - - def verify_on_setup(self, force=False): - if not (self.inputs.verify_on_setup or force): - self.logger.info('Skipping VM %s verification' % (self.vm_name)) - return True - result = True - vm_status = self.orch.wait_till_vm_is_active(self.vm_obj) - if type(vm_status) is tuple: - if vm_status[1] in 'ERROR': - self.logger.warn("VM in error state. Asserting...") - return False - if vm_status[1] != 'ACTIVE': - return False - elif not vm_status: - return False - - self.verify_vm_launched() - if len(self.vm_ips) < 1: - return False - - self.verify_vm_flag = True - if self.inputs.verify_thru_gui(): - self.webui.verify_vm(self) - result = self.verify_vm_in_api_server() - if not result: - self.logger.error('VM %s verification in API Server failed' - % (self.vm_name)) - return result - result = self.verify_vm_in_agent() - if not result: - self.logger.error('VM %s verification in Agent failed' - % (self.vm_name)) - return result - result = self.verify_vm_in_control_nodes() - if not result: - self.logger.error('Route verification for VM %s in Controlnodes' - ' failed ' % (self.vm_name)) - return result - result = self.verify_vm_in_opserver() - if not result: - self.logger.error('VM %s verification in Opserver failed' - % (self.vm_name)) - return result - - self.verify_is_run = True - return result - # end verify_on_setup - - def mini_verify_on_setup(self): - result = True - if not self.verify_vm_launched(): - return False - if not self.verify_vm_in_api_server(): - self.logger.error('VM %s verification in API Server failed' - % (self.vm_name)) - result = result and False - if not self.verify_vm_in_agent(): - self.logger.error('VM %s verification in Agent failed' - % (self.vm_name)) - result = result and False - self.verify_is_run = True - return result - # end mini_verify_on_setup - - def get_vrf_id(self, vn_fq_name, vn_vrf_name): - inspect_h = self.agent_inspect[self.vm_node_ip] - (domain, project, vn) = vn_fq_name.split(':') - agent_vrf_objs_vn = inspect_h.get_vna_vrf_objs(domain, project, vn) - agent_vrf_obj_vn = self.get_matching_vrf( - agent_vrf_objs_vn['vrf_list'], - vn_vrf_name) - vn_vrf_id = agent_vrf_obj_vn['ucindex'] - return vn_vrf_id - - # end get_vrf_id - - def chk_vmi_for_vrf_entry(self, vn_fq_name): - try: - cs_vmi_objs_vm = self.get_vmi_obj_from_api_server()[1] - inspect_h = self.agent_inspect[self.vm_node_ip] - for vmi_obj in cs_vmi_objs_vm: - tap_intf = {} - tmp_vmi_id = vmi_obj.uuid - tap_intf[vn_fq_name] = inspect_h.get_vna_tap_interface_by_vmi( - vmi_id=tmp_vmi_id)[0] - vrf_entry = tap_intf[vn_fq_name]['fip_list'][0]['vrf_name'] - return vrf_entry - except IndexError, e: - self.logger.error('No VRF Entry listed') - return None - - # end chk_vmi_for_vrf_entry - - def chk_vmi_for_fip(self, vn_fq_name): - try: - cs_vmi_objs_vm = self.get_vmi_obj_from_api_server()[1] - inspect_h = self.agent_inspect[self.vm_node_ip] - for vmi_obj in cs_vmi_objs_vm: - tap_intf = {} - tmp_vmi_id = vmi_obj.uuid - tap_intf = inspect_h.get_vna_tap_interface_by_vmi( - vmi_id=tmp_vmi_id)[0] - fip_list = tap_intf['fip_list'] - for fip in fip_list: - if vn_fq_name in fip['vrf_name']: - fip_addr_vm = fip['ip_addr'] - return fip_addr_vm - except IndexError, e: - self.logger.error('No FIP Address listed') - return None - # end chk_vmi_for_fip - - @retry(delay=2, tries=15) - def verify_vm_in_api_server(self): - '''Validate API-Server objects for a VM. - - Checks if Instance IP in API Server is same as what - Orchestration system gave it. - Checks if the virtual-machine-interface's VN in API Server is correct. - ''' - self.vm_in_api_flag = True - - self.get_vm_objs() - self.get_vmi_objs() - self.get_iip_objs() - - for cfgm_ip in self.inputs.cfgm_ips: - self.logger.info("Verifying in api server %s" % (cfgm_ip)) - if not self.cs_instance_ip_objs[cfgm_ip]: - with self.printlock: - self.logger.error('Instance IP of VM ID %s not seen in ' - 'API Server ' % (self.vm_id)) - self.vm_in_api_flag = self.vm_in_api_flag and False - return False - - for ips in self.get_vm_ip_dict().values(): - if set(ips) | set(self.vm_ips) != set(self.vm_ips): - with self.printlock: - self.logger.warn('Instance IP %s from API Server is ' - ' not found in VM IP list %s' % (ips, str(self.vm_ips))) - self.vm_in_api_flag = self.vm_in_api_flag and False - return False - for vmi_obj in self.cs_vmi_objs[self.inputs.cfgm_ip]: - vmi_vn_id = vmi_obj.vn_uuid - vmi_vn_fq_name = vmi_obj.vn_fq_name - # ToDo: msenthil the checks have to be other way around - if vmi_vn_id not in self.vn_ids: - with self.printlock: - self.logger.warn('VMI %s of VM %s is not mapped to the ' - 'right VN ID in API Server' % (vmi_vn_id, self.vm_name)) - self.vm_in_api_flag = self.vm_in_api_flag and False - return False - self.cs_vmi_obj[vmi_vn_fq_name] = vmi_obj - with self.printlock: - self.logger.info("API Server validations for VM %s passed in api server %s" - % (self.vm_name, self.inputs.cfgm_ip)) - self.vm_in_api_flag = self.vm_in_api_flag and True - return True - # end verify_vm_in_api_server - - @retry(delay=2, tries=25) - def verify_vm_not_in_api_server(self): - - self.verify_vm_not_in_api_server_flag = True - for ip in self.inputs.cfgm_ips: - self.logger.info("Verifying in api server %s" % (ip)) - api_inspect = self.api_s_inspects[ip] - if api_inspect.get_cs_vm(self.vm_id, refresh=True) is not None: - with self.printlock: - self.logger.warn("VM ID %s of VM %s is still found in API Server" - % (self.vm_id, self.vm_name)) - self.verify_vm_not_in_api_server_flag = self.verify_vm_not_in_api_server_flag and False - return False - if api_inspect.get_cs_vr_of_vm(self.vm_id, refresh=True) is not None: - with self.printlock: - self.logger.warn('API-Server still seems to have VM reference ' - 'for VM %s' % (self.vm_name)) - self.verify_vm_not_in_api_server_flag = self.verify_vm_not_in_api_server_flag and False - return False - if api_inspect.get_cs_vmi_of_vm(self.vm_id, - refresh=True) is not None: - with self.printlock: - self.logger.warn("API-Server still has VMI info of VM %s" - % (self.vm_name)) - self.verify_vm_not_in_api_server_flag = self.verify_vm_not_in_api_server_flag and False - return False - with self.printlock: - self.logger.info( - "VM %s information is fully removed in API-Server " % (self.vm_name)) - self.verify_vm_not_in_api_server_flag = self.verify_vm_not_in_api_server_flag and True - return True - # end verify_vm_not_in_api_server - - def get_tap_intf_of_vmi(self, vmi_uuid): - inspect_h = self.agent_inspect[self.vm_node_ip] - vna_tap_id = inspect_h.get_vna_tap_interface_by_vmi(vmi_id=vmi_uuid) - return vna_tap_id[0] - - def get_tap_intf_of_vm(self): - inspect_h = self.agent_inspect[self.vm_node_ip] - tap_intfs = inspect_h.get_vna_tap_interface_by_vm(vm_id=self.vm_id) - return tap_intfs - - def get_vmi_ids(self): - if not getattr(self, 'vmi_ids', None): - self.vmi_ids = dict() - vmi_objs = self.get_vmi_obj_from_api_server()[1] - for vmi_obj in vmi_objs: - self.vmi_ids[vmi_obj.vn_fq_name] = vmi_obj.uuid - return self.vmi_ids - - def get_mac_addr_from_config(self): - if not getattr(self, 'mac_addr', None): - vmi_objs = self.get_vmi_obj_from_api_server()[1] - for vmi_obj in vmi_objs: - self.mac_addr[vmi_obj.vn_fq_name] = vmi_obj.mac_addr - return self.mac_addr - - def get_agent_label(self): - if not getattr(self, 'agent_label', None): - for (vn_fq_name, vmi) in self.get_vmi_ids().iteritems(): - self.agent_label[vn_fq_name] = self.get_tap_intf_of_vmi(vmi)['label'] - return self.agent_label - - def get_local_ips(self): - if not getattr(self, 'local_ips', None): - for (vn_fq_name, vmi) in self.get_vmi_ids().iteritems(): - self.local_ips[vn_fq_name] = self.get_tap_intf_of_vmi(vmi)['mdata_ip_addr'] - return self.local_ips - - def get_local_ip(self): - if not getattr(self, '_local_ip', None): - local_ips = self.get_local_ips() - if len(self.vn_fq_names) > 1: - for vn_fq_name in self.vn_fq_names: - if vn_fq_name in local_ips and local_ips[vn_fq_name] != '0.0.0.0': - if self.ping_vm_from_host(vn_fq_name): - self._local_ip = self.local_ips[vn_fq_name] - break - else: - self._local_ip = local_ips and local_ips.values()[0] - return getattr(self, '_local_ip', '') - - @property - def local_ip(self): - return self.get_local_ip() - - @retry(delay=2, tries=20) - def verify_vm_in_agent(self): - ''' Verifies whether VM has got created properly in agent. - - ''' - self.vm_in_agent_flag = True - - #Verification in vcenter plugin introspect - #vcenter introspect not working.disabling vcenter verification till. - #if getattr(self.orch,'verify_vm_in_vcenter',None): - # assert self.orch.verify_vm_in_vcenter(self.vm_obj) - - inspect_h = self.agent_inspect[self.vm_node_ip] - for vn_fq_name in self.vn_fq_names: - (domain, project, vn) = vn_fq_name.split(':') - agent_vn_obj = inspect_h.get_vna_vn(domain, project, vn) - if not agent_vn_obj: - self.logger.warn('VN %s is not seen in agent %s' - % (vn_fq_name, self.vm_node_ip)) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - - # Check if the VN ID matches between the Orchestration S and Agent - # ToDo: msenthil should be == check of vn_id[vn_fq_name] rather - # than list match - if agent_vn_obj['uuid'] not in self.vn_ids: - self.logger.warn('Unexpected VN UUID %s found in agent %s ' - 'Expected: One of %s' % (agent_vn_obj['uuid'], - self.vm_node_ip, self.vn_ids)) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - try: - vna_tap_id = self.get_tap_intf_of_vmi(self.get_vmi_ids()[vn_fq_name]) - except Exception as e: - vna_tap_id = None - - self.tap_intf[vn_fq_name] = vna_tap_id - if not self.tap_intf[vn_fq_name]: - self.logger.error('Tap interface in VN %s for VM %s not' - 'seen in agent %s ' - % (vn_fq_name, self.vm_name, self.vm_node_ip)) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - mac_addr = self.tap_intf[vn_fq_name]['mac_addr'] - if mac_addr != self.get_mac_addr_from_config()[vn_fq_name]: - with self.printlock: - self.logger.error('VM Mac address for VM %s not seen in' - 'agent %s or VMI mac is not matching with API' - 'Server information' % (self.vm_name, self.vm_node_ip)) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - try: - self.tap_intf[vn_fq_name] = inspect_h.get_vna_intf_details( - self.tap_intf[vn_fq_name]['name'])[0] - except Exception as e: - return False - - self.logger.info("tap intf: %s" % (str(self.tap_intf[vn_fq_name]))) - - self.agent_vrf_name[vn_fq_name] = self.tap_intf[ - vn_fq_name]['vrf_name'] - - self.logger.info("agent vrf name: %s" % - (str(self.agent_vrf_name[vn_fq_name]))) - - try: - agent_vrf_objs = inspect_h.get_vna_vrf_objs( - domain, project, vn) - except Exception as e: - agent_vrf_objs = None - - self.logger.info("vrf obj : %s" % (str(agent_vrf_objs))) - if not agent_vrf_objs: - return False - # Bug 1372858 - try: - agent_vrf_obj = self.get_matching_vrf( - agent_vrf_objs['vrf_list'], - self.agent_vrf_name[vn_fq_name]) - except Exception as e: - self.logger.warn("Exception: %s" % (e)) - return False - - self.agent_vrf_id[vn_fq_name] = agent_vrf_obj['ucindex'] - self.agent_path[vn_fq_name] = list() - self.agent_label[vn_fq_name] = list() - try: - for vm_ip in self.vm_ip_dict[vn_fq_name]: - self.agent_path[vn_fq_name].append( - inspect_h.get_vna_active_route( - vrf_id=self.agent_vrf_id[vn_fq_name], - ip=vm_ip)) - except Exception as e: - return False - if not self.agent_path[vn_fq_name]: - with self.printlock: - self.logger.warn('No path seen for VM IP %s in agent %s' - % (self.vm_ip_dict[vn_fq_name], self.vm_node_ip)) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - for agent_path in self.agent_path[vn_fq_name]: - agent_label = agent_path['path_list'][0]['label'] - self.agent_label[vn_fq_name].append(agent_label) - - if agent_path['path_list'][0]['nh']['itf'] != \ - self.tap_intf[vn_fq_name]['name']: - self.logger.warning("Active route in agent for %s is " - "not pointing to right tap interface. It is %s " - % (self.vm_ip_dict[vn_fq_name], - agent_path['path_list'][0]['nh']['itf'])) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - else: - self.logger.debug('Active route in agent is present for' - ' VMI %s ' % (self.tap_intf[vn_fq_name]['name'])) - - if self.tap_intf[vn_fq_name]['label'] != agent_label: - self.logger.warning('VM %s label mismatch! ,' - ' Expected : %s , Got : %s' % (self.vm_name, - self.tap_intf[vn_fq_name]['label'], agent_label)) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - else: - self.logger.debug('VM %s labels in tap-interface and ' - 'the route do match' % (self.vm_name)) - - # Check if tap interface is set to Active - if self.tap_intf[vn_fq_name]['active'] != 'Active': - self.logger.warn('VM %s : Tap interface %s is not set to ' - 'Active, it is : %s ' % (self.vm_name, - self.tap_intf[ - vn_fq_name]['name'], - self.tap_intf[vn_fq_name]['active'])) - else: - with self.printlock: - self.logger.debug('VM %s : Tap interface %s is set to ' - ' Active' % (self.vm_name, - self.tap_intf[vn_fq_name]['name'])) - self.local_ips[vn_fq_name] = self.tap_intf[ - vn_fq_name]['mdata_ip_addr'] - with self.printlock: - self.logger.debug('Tap interface %s detail : %s' % ( - self.tap_intf[vn_fq_name]['name'], self.tap_intf[vn_fq_name])) - - with self.printlock: - self.logger.info('Starting Layer 2 verification in Agent') - # L2 verification - try: - self.agent_l2_path[vn_fq_name] = inspect_h.get_vna_layer2_route( - vrf_id=self.agent_vrf_id[vn_fq_name], - mac=mac_addr) - except Exception as e: - self.agent_l2_path[vn_fq_name] = None - if not self.agent_l2_path[vn_fq_name]: - with self.printlock: - self.logger.warning('No Layer 2 path is seen for VM MAC ' - '%s in agent %s' % (mac_addr, - self.vm_node_ip)) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - else: - with self.printlock: - self.logger.info('Layer 2 path is seen for VM MAC %s ' - 'in agent %s' % (mac_addr, - self.vm_node_ip)) - self.agent_l2_label[vn_fq_name] = self.agent_l2_path[ - vn_fq_name]['routes'][0]['path_list'][0]['label'] - self.agent_vxlan_id[vn_fq_name] = self.agent_l2_path[ - vn_fq_name]['routes'][0]['path_list'][0]['vxlan_id'] - - # Check if Tap interface of VM is present in the Agent layer - # route table - if self.agent_l2_path[vn_fq_name]['routes'][0]['path_list'][0][ - 'nh']['itf'] != self.tap_intf[vn_fq_name]['name']: - with self.printlock: - self.logger.warn("Active layer 2 route in agent for %s " - "is not pointing to right tap interface." - " It is %s " - % (self.vm_ip_dict[vn_fq_name], - self.agent_l2_path[vn_fq_name][ - 'routes'][0]['path_list'][0]['nh']['itf'])) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - else: - with self.printlock: - self.logger.info( - 'Active layer 2 route in agent is present for VMI %s ' % - (self.tap_intf[vn_fq_name]['name'])) - if self.agent_l2_path[vn_fq_name]['routes'][0]['path_list'][0]['active_tunnel_type'] == 'VXLAN': - if self.agent_vxlan_id[vn_fq_name] != \ - self.tap_intf[vn_fq_name]['vxlan_id']: - with self.printlock: - self.logger.warn("vxlan_id mismatch between interface " - "introspect %s and l2 route table %s" - % (self.tap_intf[vn_fq_name]['vxlan_id'], - self.agent_vxlan_id[vn_fq_name])) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - - else: - with self.printlock: - self.logger.info('vxlan_id (%s) matches bw route table' - ' and interface table' - % self.agent_vxlan_id[vn_fq_name]) - - else: - - if self.agent_l2_label[vn_fq_name] !=\ - self.tap_intf[vn_fq_name]['l2_label']: - with self.printlock: - self.logger.warn("L2 label mismatch between interface " - "introspect %s and l2 route table %s" - % (self.tap_intf[vn_fq_name]['l2_label'], - self.agent_l2_label[vn_fq_name])) - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - else: - with self.printlock: - self.logger.info('L2 label(%s) matches bw route table' - - ' and interface table' - % self.agent_l2_label[vn_fq_name]) - - # api_s_vn_obj = self.api_s_inspect.get_cs_vn( - # project=vn_fq_name.split(':')[1], vn=vn_fq_name.split(':')[2], refresh=True) - # if api_s_vn_obj['virtual-network']['network_ipam_refs'][0]['attr']['ipam_subnets'][0]['enable_dhcp']: - # if (self.agent_l2_path[vn_fq_name]['routes'][0]['path_list'][0]['flood_dhcp']) != 'false': - # with self.printlock: - # self.logger.warn("flood_dhcp flag is set to True \ - # for mac %s " - # %(self.agent_l2_path[vn_fq_name]['mac']) ) - # self.vm_in_agent_flag = self.vm_in_agent_flag and False - # return False - # else: - # if (self.agent_l2_path[vn_fq_name]['routes'][0]['path_list'][0]['flood_dhcp']) != 'true': - # with self.printlock: - # self.logger.warn("flood_dhcp flag is set to False \ - # for mac %s " - # %(self.agent_l2_path[vn_fq_name]['mac']) ) - # self.vm_in_agent_flag = self.vm_in_agent_flag and False - # return False - - # L2 verification end here - # Check if VN for the VM and route for the VM is present on all - # compute nodes - if not self.verify_in_all_agents(vn_fq_name): - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - - # end for vn_fq_name in self.vn_fq_names - - # Ping to VM IP from host - if '169.254' not in self.local_ip: - with self.printlock: - self.logger.error('Ping to one of the 169.254.x.x IPs of the VM' - ' should have passed. It failed! ') - self.vm_in_agent_flag = self.vm_in_agent_flag and False - return False - with self.printlock: - self.logger.info("VM %s Verifications in Agent is fine" % - (self.vm_name)) - self.vm_in_agent_flag = self.vm_in_agent_flag and True - return True - # end verify_vm_in_agent - - def get_matching_vrf(self, vrf_objs, vrf_name): - self.logger.info("vrf_objs: %s" % (str(vrf_objs))) - self.logger.info("vrf_name: %s" % (str(vrf_name))) - return [x for x in vrf_objs if x['name'] == vrf_name][0] - - def reset_state(self, state): - self.vm_obj.reset_state(state) - - def ping_vm_from_host(self, vn_fq_name): - ''' Ping the VM metadata IP from the host - ''' - host = self.inputs.host_data[self.vm_node_ip] - output = '' - with hide('everything'): - with settings( - host_string='%s@%s' % (host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False): - output = run('ping %s -c 1' % (self.local_ips[vn_fq_name])) - expected_result = ' 0% packet loss' - self.logger.debug(output) - if expected_result not in output: - self.logger.warn( - "Ping to Metadata IP %s of VM %s failed!" % - (self.local_ips[vn_fq_name], self.vm_name)) - return False - else: - self.logger.info( - 'Ping to Metadata IP %s of VM %s passed' % - (self.local_ips[vn_fq_name], self.vm_name)) - return True - # end ping_vm_from_host - - def verify_in_all_agents(self, vn_fq_name): - ''' Verify if the corresponding VN for a VM is present in all compute nodes. - Also verifies that a route is present in all compute nodes for the VM IP - ''' - if len(self.inputs.compute_ips) > 10: - self.logger.warn('Skipping verification on all agents since ' - 'there are more than 10 computes in the box, ' - 'until the subroutine supports gevent/mp') - return True - (domain, project, vn_name) = vn_fq_name.split(':') - for compute_ip in self.inputs.compute_ips: - inspect_h = self.agent_inspect[compute_ip] - vn = inspect_h.get_vna_vn(domain, project, vn_name) - # The VN for the VM under test may or may not be present on other agent - # nodes. Proceed to check only if VN is present - if vn is None: - continue - - if vn['name'] != vn_fq_name: - self.logger.warn( - 'VN %s in agent is not the same as expected : %s ' % - (vn['name'], vn_fq_name)) - return False - else: - self.logger.debug('VN %s is found in Agent of node %s' % - (vn['name'], compute_ip)) - if not vn['uuid'] in self.vn_ids: - self.logger.warn( - 'VN ID %s from agent is in VN IDs list %s of the VM in ' - 'Agent node %s' % (vn['uuid'], self.vn_ids, compute_ip)) - return False -# TODO : To be uncommented once the sandesh query with service-chaining works -# if vn['vrf_name'] != self.agent_vrf_name : -# self.logger.warn('VN VRF of %s in agent is not the same as expected VRF of %s' %( vn['vrf_name'], self.agent_vrf_name )) -# return False - agent_vrf_objs = inspect_h.get_vna_vrf_objs( - domain, project, vn_name) - agent_vrf_obj = self.get_matching_vrf( - agent_vrf_objs['vrf_list'], - self.agent_vrf_name[vn_fq_name]) - agent_vrf_id = agent_vrf_obj['ucindex'] - for vm_ip in self.vm_ip_dict[vn_fq_name]: - agent_path = inspect_h.get_vna_active_route( - vrf_id=agent_vrf_id, ip=vm_ip) - agent_label = agent_path['path_list'][0]['label'] - if agent_label not in self.agent_label[vn_fq_name]: - self.logger.warn( - 'The route for VM IP %s in Node %s is having ' - 'incorrect label. Expected: %s, Seen : %s' % ( - vm_ip, compute_ip, - self.agent_label[vn_fq_name], agent_label)) - return False - - self.logger.debug( - 'VRF IDs of VN %s is consistent in agent %s' % - (vn_fq_name, compute_ip)) - self.logger.debug( - 'Route for VM IP %s is consistent in agent %s ' % - (self.vm_ip_dict[vn_fq_name], compute_ip)) - self.logger.debug( - 'VN %s verification for VM %s in Agent %s passed ' % - (vn_fq_name, self.vm_name, compute_ip)) - - self.logger.info( - 'Starting all layer 2 verification in agent %s' % (compute_ip)) - agent_l2_path = inspect_h.get_vna_layer2_route( - vrf_id=agent_vrf_id, - mac=self.get_mac_addr_from_config()[vn_fq_name]) - agent_l2_label = agent_l2_path[ - 'routes'][0]['path_list'][0]['label'] - if agent_l2_label != self.agent_l2_label[vn_fq_name]: - self.logger.warn('The route for VM MAC %s in Node %s ' - 'is having incorrect label. Expected: %s, Seen: %s' - % (self.mac_addr[vn_fq_name], compute_ip, - self.agent_l2_label[vn_fq_name], agent_l2_label)) - return False - self.logger.info( - 'Route for VM MAC %s is consistent in agent %s ' % - (self.mac_addr[vn_fq_name], compute_ip)) - # end for - return True - # end verify_in_all_agents - - def ping_to_vn(self, dst_vm_fixture, vn_fq_name=None, af=None, *args, **kwargs): - ''' - Ping all the ips belonging to a specific VN of a VM from another - Optionally can specify the address family too (v4, v6 or dual) - return False if any of the ping fails - ''' - vm_ips = dst_vm_fixture.get_vm_ips(vn_fq_name=vn_fq_name, af=af) - for ip in vm_ips: - if not self.ping_to_ip(ip=ip, *args, **kwargs): - return False - return True - - def ping_to_ip(self, ip, return_output=False, other_opt='', size='56', count='5'): - '''Ping from a VM to an IP specified. - - This method logs into the VM from the host machine using ssh and runs ping test to an IP. - ''' - host = self.inputs.host_data[self.vm_node_ip] - output = '' - fab_connections.clear() - af = get_af_type(ip) - try: -# self.orch.put_key_file_to_host(self.vm_node_ip) - with hide('everything'): - with settings(host_string='%s@%s' % (host['username'], - self.vm_node_ip), password=host['password'], - warn_only=True, abort_on_prompts=False): - vm_host_string = '%s@%s' % ( - self.vm_username, self.local_ip) - if af is None: - cmd = "python -c 'import socket; socket.getaddrinfo" +\ - "(\"%s\"\, None\, socket.AF_INET6)'" % ip - output = run_fab_cmd_on_node(host_string=vm_host_string, - password=self.vm_password, - cmd=cmd) - util = 'ping' if output else 'ping6' - else: - util = 'ping6' if af == 'v6' else 'ping' - cmd = '%s -s %s -c %s %s %s' % (util, - str(size), str(count), other_opt, ip) - output = run_fab_cmd_on_node(host_string=vm_host_string, - password=self.vm_password, - cmd=cmd) - self.logger.debug(output) - if return_output == True: - return output - except Exception, e: - self.logger.exception( - 'Exception occured while trying ping from VM') - return False - expected_result = ' 0% packet loss' - try: - if expected_result not in output: - self.logger.warn("Ping to IP %s from VM %s failed" % - (ip, self.vm_name)) - return False - else: - self.logger.info('Ping to IP %s from VM %s passed' % - (ip, self.vm_name)) - return True - except Exception as e: - self.logger.warn("Got exception in ping_to_ip:%s" % (e)) - return False - # end ping_to_ip - - def ping_to_ipv6(self, *args, **kwargs): - '''Ping from a VM to an IPV6 specified. - - This method logs into the VM from the host machine using ssh and runs ping6 test to an IPV6. - ''' - return self.ping_to_ip(*args, **kwargs) - # end ping_to_ipv6 - - @retry(delay=1, tries=10) - def ping_with_certainty(self, ip=None, return_output=False, other_opt='', - size='56', count='5', expectation=True, - dst_vm_fixture=None, vn_fq_name=None, af=None): - ''' - Better to call this instead of ping_to_ip. - Set expectation to False if you want ping to fail - Can be used for both ping pass and fail scenarios with retry - ''' - if dst_vm_fixture: - output = self.ping_to_vn(dst_vm_fixture=dst_vm_fixture, - vn_fq_name=vn_fq_name, af=af, - return_output=False, size=size, - other_opt=other_opt, count=count) - else: - output = self.ping_to_ip(ip=ip, return_output=False, - other_opt=other_opt, size=size, - count=count) - return (output == expectation) - - def verify_vm_not_in_orchestrator(self): - if not self.orch.is_vm_deleted(self.vm_obj): - with self.printlock: - self.logger.warn("VM %s is still found in Compute(nova) " - "server-list" % (self.vm_name)) - return False - return True - - @retry(delay=2, tries=20) - def verify_vm_not_in_agent(self): - '''Verify that the VM is fully removed in all Agents. - - ''' - #Verification in vcenter plugin introspect - #if getattr(self.orch,'verify_vm_not_in_vcenter',None): - # assert self.orch.verify_vm_not_in_vcenter(self.vm_obj) - - result = True - self.verify_vm_not_in_agent_flag = True - self.vrfs = dict() - self.vrfs = self.get_vrf_ids_accross_agents() - inspect_h = self.agent_inspect[self.vm_node_ip] - # Check if VM is in agent's active VMList: - if self.vm_id in inspect_h.get_vna_vm_list(): - with self.printlock: - self.logger.warn("VM %s is still present in agent's active " - "VMList" % (self.vm_name)) - self.verify_vm_not_in_agent_flag = self.verify_vm_not_in_agent_flag and False - result = result and False - if len(inspect_h.get_vna_tap_interface_by_vm(vm_id=self.vm_id)) != 0: - with self.printlock: - self.logger.warn("VMI/TAP interface(s) is still seen for VM " - "%s in agent" % (self.vm_name)) - self.verify_vm_not_in_agent_flag = \ - self.verify_vm_not_in_agent_flag and False - result = result and False - for k, v in self.vrfs.items(): - inspect_h = self.agent_inspect[k] - for vn_fq_name in self.vn_fq_names: - if vn_fq_name in v: - for vm_ip in self.vm_ip_dict[vn_fq_name]: - if inspect_h.get_vna_active_route( - vrf_id=v[vn_fq_name], - ip=vm_ip) is not None: - self.logger.warn( - "Route for VM %s, IP %s is still seen in agent %s" % - (self.vm_name, vm_ip, self.vm_node_ip)) - self.verify_vm_not_in_agent_flag = \ - self.verify_vm_not_in_agent_flag and False - result = result and False - else: - continue - if result: - self.logger.info( - "VM %s is removed in Compute, and routes are removed " - "in all agent nodes" % (self.vm_name)) - return result - # end verify_vm_not_in_agent - - @retry(delay=2, tries=20) - def verify_vm_routes_not_in_agent(self): - '''Verify that the VM routes is fully removed in all Agents. This will specfically address the scenario where VM interface is down ir shutoff - ''' - result = True - inspect_h = self.agent_inspect[self.vm_node_ip] - for vn_fq_name in self.vn_fq_names: - for compute_ip in self.inputs.compute_ips: - inspect_h = self.agent_inspect[compute_ip] - for vm_ip in self.vm_ip_dict[vn_fq_name]: - if inspect_h.get_vna_active_route( - vrf_id=self.agent_vrf_id[vn_fq_name], - ip=vm_ip) is not None: - self.logger.warn( - "Route for VM %s, IP %s is still seen in agent %s " % - (self.vm_name, vm_ip, compute_ip)) - self.verify_vm_not_in_agent_flag = self.verify_vm_not_in_agent_flag and False - result = result and False - if result: - self.logger.info( - "VM %s routes are removed " - "in all agent nodes" % (self.vm_name)) - return result - - def get_control_nodes(self): - bgp_ips = {} - vm_host = self.vm_node_ip - try: - bgp_ips = self.inputs.build_compute_to_control_xmpp_connection_dict( - self.connections) - bgp_ips = bgp_ips[vm_host] - except Exception as e: - self.logger.exception("Exception in get_control_nodes") - finally: - return bgp_ips - - def get_ctrl_nodes_in_rt_group(self): - if getattr(self, 'bgp_ips', None): - return self.bgp_ips - rt_list = [] - peer_list = [] - for vn_fq_name in self.vn_fq_names: - vn_name = vn_fq_name.split(':')[-1] - ri_name = vn_fq_name + ':' + vn_name - ri = self.vnc_lib_h.routing_instance_read(fq_name=[ri_name]) - rt_refs = ri.get_route_target_refs() - for rt_ref in rt_refs: - rt_obj = self.vnc_lib_h.route_target_read(id=rt_ref['uuid']) - rt_list.append(rt_obj.name) - for rt in rt_list: - ctrl_node = self.get_active_controller() - ctrl_node = self.inputs.get_host_ip(ctrl_node) - peer_list.append(ctrl_node) - rt_group_entry = self.cn_inspect[ - ctrl_node].get_cn_rtarget_group(rt) - if rt_group_entry['peers_interested'] is not None: - for peer in rt_group_entry['peers_interested']: - if peer in self.inputs.host_names: - peer = self.inputs.host_data[peer]['host_ip'] - peer_list.append(peer) - else: - self.logger.info( - '%s is not defined as a control node in the topology' % peer) - self.bgp_ips = list(set(peer_list)) - return self.bgp_ips - # end get_ctrl_nodes_in_rt_group - - @retry(delay=5, tries=20) - def verify_vm_in_control_nodes(self): - ''' Validate routes are created in Control-nodes for this VM - - ''' - self.vm_in_cn_flag = True - for vn_fq_name in self.vn_fq_names: - for cn in self.get_ctrl_nodes_in_rt_group(): - vn_name = vn_fq_name.split(':')[-1] - ri_name = vn_fq_name + ':' + vn_name - # Check for VM route in each control-node - for vm_ip in self.vm_ip_dict[vn_fq_name]: - cn_routes = self.cn_inspect[cn].get_cn_route_table_entry( - ri_name=ri_name, prefix=vm_ip) - if not cn_routes: - with self.printlock: - self.logger.warn( - 'No route found for VM IP %s in Control-node %s' % - (vm_ip, cn)) - self.vm_in_cn_flag = self.vm_in_cn_flag and False - return False - if cn_routes[0]['next_hop'] != self.vm_node_data_ip: - with self.printlock: - self.logger.warn( - 'Next hop for VM %s is not set to %s in Control-node' - ' Route table' % (self.vm_name, self.vm_node_data_ip)) - self.vm_in_cn_flag = self.vm_in_cn_flag and False - return False - # Label in agent and control-node should match - if cn_routes[0]['label'] not in self.agent_label[vn_fq_name]: - with self.printlock: - self.logger.warn( - "Label for VM %s differs between Control-node " - "%s and Agent, Expected: %s, Seen: %s" % - (self.vm_name, cn, self.agent_label[vn_fq_name], - cn_routes[0]['label'])) - self.logger.debug( - 'Route in CN %s : %s' % (cn, str(cn_routes))) - self.vm_in_cn_flag = self.vm_in_cn_flag and False - return False - if self.verify_l2_routes_in_control_nodes() != True: - with self.printlock: - self.logger.warn("L2 verification for VM failed") - return False - self.vm_in_cn_flag = self.vm_in_cn_flag and True - with self.printlock: - self.logger.info("Verification in Control-nodes" - " for VM %s passed" % (self.vm_name)) - return True - # end verify_vm_in_control_nodes - - def verify_l2_routes_in_control_nodes(self): - for vn_fq_name in self.vn_fq_names: - for cn in self.get_ctrl_nodes_in_rt_group(): - ri_name = vn_fq_name + ':' + vn_fq_name.split(':')[-1] - self.logger.info('Starting all layer2 verification' - ' in %s Control Node' % (cn)) - for vm_ip in self.vm_ip_dict[vn_fq_name]: - if is_v6(vm_ip): - self.logger.info('Skipping L2 verification of v6 ' - ' route on cn %s, not supported' % (cn)) - continue - prefix = self.get_mac_addr_from_config()[vn_fq_name] + ',' + vm_ip - # Computing the ethernet tag for prefix here, - # format is EncapTyepe-IP(0Always):0-VXLAN-MAC,IP - if vn_fq_name in self.agent_vxlan_id.keys(): - ethernet_tag = "2-0:0" + '-' +\ - self.agent_vxlan_id[vn_fq_name] - else: - ethernet_tag = "2-0:0-0" - prefix = ethernet_tag + '-' + prefix - cn_l2_routes = self.cn_inspect[cn].get_cn_route_table_entry( - ri_name=ri_name, - prefix=prefix, - table='evpn.0') - if not cn_l2_routes: - self.logger.warn('No layer2 route found for VM MAC %s ' - 'in CN %s: ri_name %s, prefix: %s' % ( - self.mac_addr[vn_fq_name], cn, - ri_name, prefix)) - self.vm_in_cn_flag = self.vm_in_cn_flag and False - return False - else: - self.logger.info('Layer2 route found for VM MAC %s in \ - Control-node %s' % (self.mac_addr[vn_fq_name], cn)) - if cn_l2_routes[0]['next_hop'] != self.vm_node_data_ip: - self.logger.warn( - "Next hop for VM %s is not set to %s in " - "Control-node Route table" % (self.vm_name, - self.vm_node_data_ip)) - self.vm_in_cn_flag = self.vm_in_cn_flag and False - return False - if cn_l2_routes[0]['tunnel_encap'][0] == 'vxlan': - # Label in agent and control-node should match - if cn_l2_routes[0]['label'] != \ - self.agent_vxlan_id[vn_fq_name]: - with self.printlock: - self.logger.warn("L2 Label for VM %s differs " - " between Control-node %s and Agent, " - "Expected: %s, Seen: %s" % (self.vm_name, - cn, self.agent_vxlan_id[ - vn_fq_name], - cn_l2_routes[0]['label'])) - self.logger.debug('Route in CN %s : %s' % (cn, - str(cn_l2_routes))) - self.vm_in_cn_flag = self.vm_in_cn_flag and False - return False - else: - with self.printlock: - self.logger.info("L2 Label for VM %s same " - "between Control-node %s and Agent, " - "Expected: %s, Seen: %s" % - (self.vm_name, cn, - self.agent_vxlan_id[ - vn_fq_name], - cn_l2_routes[0]['label'])) - else: - # Label in agent and control-node should match - if cn_l2_routes[0]['label'] != \ - self.agent_l2_label[vn_fq_name]: - with self.printlock: - self.logger.warn("L2 Label for VM %s differs " - "between Control-node %s and Agent, " - "Expected: %s, Seen: %s" % (self.vm_name, - cn, self.agent_l2_label[ - vn_fq_name], - cn_l2_routes[0]['label'])) - self.logger.debug( - 'Route in CN %s: %s' % (cn, str(cn_l2_routes))) - self.vm_in_cn_flag = self.vm_in_cn_flag and False - return False - else: - with self.printlock: - self.logger.info("L2 Label for VM %s same " - "between Control-node %s and Agent, " - "Expected: %s, Seen: %s" % - (self.vm_name, cn, - self.agent_l2_label[ - vn_fq_name], - cn_l2_routes[0]['label'])) - # end for - return True - # end verify_l2_routes_in_control_nodes - - @retry(delay=2, tries=25) - def verify_vm_not_in_control_nodes(self): - ''' Validate that routes for VM is removed in control-nodes. - - ''' - result = True - self.verify_vm_not_in_control_nodes_flag = True - - for vn_fq_name in self.vn_fq_names: - ri_name = vn_fq_name + ':' + vn_fq_name.split(':')[-1] - for cn in self.get_ctrl_nodes_in_rt_group(): - # Check for VM route in each control-node - for vm_ip in self.vm_ip_dict[vn_fq_name]: - cn_routes = self.cn_inspect[cn].get_cn_route_table_entry( - ri_name=ri_name, prefix=vm_ip) - if cn_routes is not None: - with self.printlock: - self.logger.warn("Control-node %s still seems to " - "have route for VMIP %s" % (cn, vm_ip)) - self.verify_vm_not_in_control_nodes_flag =\ - self.verify_vm_not_in_control_nodes_flag and False - result = result and False - # end for - if result: - with self.printlock: - self.logger.info( - "Routes for VM %s is removed in all control-nodes" - % (self.vm_name)) - return result - # end verify_vm_not_in_control_nodes - - def _get_ops_intf_index(self, ops_intf_list, vn_fq_name): - for intf in ops_intf_list: - _intf = self.analytics_obj.get_intf_uve(intf) - if not _intf: - return None - vn_name = _intf['virtual_network'] - if vn_name == vn_fq_name: - return ops_intf_list.index(intf) - return None - - @retry(delay=2, tries=45) - def verify_vm_in_opserver(self): - ''' Verify VM objects in Opserver. - ''' - self.logger.info("Verifying the vm in opserver") - result = True - self.vm_in_op_flag = True - for ip in self.inputs.collector_ips: - self.logger.info("Verifying in collector %s ..." % (ip)) - self.ops_vm_obj = self.ops_inspect[ip].get_ops_vm(self.vm_id) - ops_intf_list = self.ops_vm_obj.get_attr('Agent', 'interface_list') - if not ops_intf_list: - self.logger.warn( - 'Failed to get VM %s, ID %s info from Opserver' % - (self.vm_name, self.vm_id)) - self.vm_in_op_flag = self.vm_in_op_flag and False - return False - for vn_fq_name in self.vn_fq_names: - vm_in_pkts = None - vm_out_pkts = None - ops_index = self._get_ops_intf_index(ops_intf_list, vn_fq_name) - if ops_index is None: - self.logger.error( - 'VN %s is not seen in opserver for VM %s' % - (vn_fq_name, self.vm_id)) - self.vm_in_op_flag = self.vm_in_op_flag and False - return False - ops_intf = ops_intf_list[ops_index] - #ops_data = self.analytics_obj.get_intf_uve(ops_intf) - #ops_data = ops_intf_list[ops_index] - for vm_ip in self.vm_ip_dict[vn_fq_name]: - try: - if is_v6(vm_ip): - op_data = self.analytics_obj.get_vm_attr(ops_intf,'ip6_address') - else: - op_data = self.analytics_obj.get_vm_attr(ops_intf,'ip_address') - except Exception as e: - return False - - if vm_ip != op_data: - self.logger.warn( - "Opserver doesnt list IP Address %s of vm %s" % ( - vm_ip, self.vm_name)) - self.vm_in_op_flag = self.vm_in_op_flag and False - result = result and False - # end if - self.ops_vm_obj = self.ops_inspect[ip].get_ops_vm(self.vm_id) - # end if - self.logger.info("Verifying vm in vn uve") - for intf in ops_intf_list: - intf = self.analytics_obj.get_intf_uve(intf) - virtual_network = intf['virtual_network'] - ip_address = [intf['ip_address'], intf['ip6_address']] - #intf_name = intf['name'] - intf_name = intf - self.logger.info("vm uve shows interface as %s" % (intf_name)) - self.logger.info("vm uve shows ip address as %s" % - (ip_address)) - self.logger.info("vm uve shows virtual netowrk as %s" % - (virtual_network)) - vm_in_vn_uve = self.analytics_obj.verify_vn_uve_for_vm( - vn_fq_name=virtual_network, vm=self.vm_id) - if not vm_in_vn_uve: - self.vm_in_op_flag = self.vm_in_op_flag and False - result = result and False - - # Verifying vm in vrouter-uve - self.logger.info("Verifying vm in vrouter uve") - computes = [] - for ip in self.inputs.collector_ips: - self.logger.info("Getting info from collector %s.." % (ip)) - agent_host = self.analytics_obj.get_ops_vm_uve_vm_host( - ip, self.vm_id) - if agent_host not in computes: - computes.append(agent_host) - if (len(computes) > 1): - self.logger.warn( - "Collectors doesnt have consistent info for vm uve") - self.vm_in_op_flag = self.vm_in_op_flag and False - result = result and False - self.logger.info("vm uve shows vrouter as %s" % (computes)) - - for compute in computes: - vm_in_vrouter = self.analytics_obj.verify_vm_list_in_vrouter_uve( - vm_uuid=self.vm_id, vrouter=compute) - if vm_in_vrouter: - self.vm_in_op_flag = self.vm_in_op_flag and True - result = result and True - else: - self.vm_in_op_flag = self.vm_in_op_flag and False - result = result and False - # Verify tap interface/conected networks in vrouter uve - self.logger.info("Verifying vm tap interface/vn in vrouter uve") - self.vm_host = self.inputs.host_data[self.vm_node_ip]['name'] - self.tap_interfaces = self.agent_inspect[ - self.vm_node_ip].get_vna_tap_interface_by_vm(vm_id=self.vm_id) - for intf in self.tap_interfaces: - self.tap_interface = intf['config_name'] - self.logger.info("expected tap interface of vm uuid %s is %s" % - (self.vm_id, self.tap_interface)) - self.logger.info("expected virtual network of vm uuid %s is %s" % - (self.vm_id, intf['vn_name'])) - is_tap_thr = self.analytics_obj.verify_vm_list_in_vrouter_uve( - vm_uuid=self.vm_id, - vn_fq_name=intf['vn_name'], - vrouter=self.vm_host, - tap=self.tap_interface) - - if is_tap_thr: - self.vm_in_op_flag = self.vm_in_op_flag and True - result = result and True - else: - self.vm_in_op_flag = self.vm_in_op_flag and False - result = result and False - - if self.analytics_obj.verify_vm_link(self.vm_id): - self.vm_in_op_flag = self.vm_in_op_flag and True - result = result and True - else: - self.vm_in_op_flag = self.vm_in_op_flag and False - result = result and False - - if result: - self.logger.info("VM %s validation in Opserver passed" % - (self.vm_name)) - else: - self.logger.warn('VM %s validation in Opserver failed' % - (self.vm_name)) - return result - - # end verify_vm_in_opserver - - @retry(delay=3, tries=15) - def tcp_data_transfer(self, localip, fip, datasize=1024): - '''Send data file from a VM to an IP specified. - - This method logs into the VM from the host machine using ssh and sends a - data file to an IP. - ''' - output = '' - url = 'http://%s/' % fip - cmd = 'curl -I -m 25 --connect-timeout 25 %s' % url - self.run_cmd_on_vm(cmds=[cmd]) - output = self.return_output_values_list[0] - if '200 OK' not in output: - self.logger.warn("Tcp data transfer to IP %s from VM %s" - " failed" % (fip, self.vm_name)) - return False - else: - self.logger.info("Tcp data transfer to IP %s from VM %s" - " Passed" % (fip, self.vm_name)) - return True - # end tcp_data_transfer - - def get_vrf_ids_accross_agents(self): - vrfs = dict() - try: - for ip in self.inputs.compute_ips: - inspect_h = self.agent_inspect[ip] - dct = dict() - for vn_fq_name in self.vn_fq_names: - vrf_id = inspect_h.get_vna_vrf_id(vn_fq_name) - if vrf_id: - dct.update({vn_fq_name: vrf_id[0]}) - if dct: - vrfs[ip] = dct - except Exception as e: - self.logger.exception('Exception while getting VRF id') - finally: - return vrfs - - def cleanUp(self): - super(VMFixture, self).cleanUp() - self.delete() - - def delete(self, verify=False): - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - if len(self.port_ids)!=0: - for each_port_id in self.port_ids: - self.interface_detach(each_port_id) - for vm_obj in list(self.vm_objs): - for sec_grp in self.sg_ids: - self.logger.info("Removing the security group" - " from VM %s" % (vm_obj.name)) - self.remove_security_group(sec_grp) - self.logger.info("Deleting the VM %s" % (vm_obj.name)) - if self.inputs.is_gui_based_config(): - self.webui.delete_vm(self) - else: - self.orch.delete_vm(vm_obj) - self.vm_objs.remove(vm_obj) - time.sleep(5) - self.verify_cleared_from_setup(verify=verify) - else: - self.logger.info('Skipping the deletion of VM %s' % - (self.vm_name)) - # end cleanUp - - def verify_cleared_from_setup(self, check_orch=True, verify=False): - # Not expected to do verification when self.count is > 1, right now - if self.verify_is_run or verify: - assert self.verify_vm_not_in_api_server() - if check_orch: - assert self.verify_vm_not_in_orchestrator() - assert self.verify_vm_not_in_agent() - assert self.verify_vm_not_in_control_nodes() - assert self.verify_vm_not_in_nova() - - assert self.verify_vm_flows_removed() - for vn_fq_name in self.vn_fq_names: - self.analytics_obj.verify_vm_not_in_opserver( - self.vm_id, - self.inputs.host_data[self.vm_node_ip]['name'], - vn_fq_name) - - # Trying a workaround for Bug 452 - # end if - return True - - @retry(delay=2, tries=25) - def verify_vm_not_in_nova(self): - result = True - self.verify_vm_not_in_nova_flag = True - # In environments which does not have mysql token file, skip the check - if not self.inputs.get_mysql_token(): - return result - for vm_obj in self.vm_objs: - result = result and self.orch.is_vm_deleted(vm_obj) - self.verify_vm_not_in_nova_flag =\ - self.verify_vm_not_in_nova_flag and result - return result - # end verify_vm_not_in_nova - - def tftp_file_to_vm(self, file, vm_ip): - '''Do a tftp of the specified file to the specified VM - - ''' - host = self.inputs.host_data[self.vm_node_ip] - output = '' - if "TEST_DELAY_FACTOR" in os.environ: - delay_factor = os.environ.get("TEST_DELAY_FACTOR") - else: - delay_factor = "1.0" - timeout = math.floor(40 * float(delay_factor)) - try: - with hide('everything'): - with settings(host_string='%s@%s' % ( - host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False): - if os.environ.has_key('ci_image'): - i = 'tftp -p -r %s -l %s %s' % (file, file, vm_ip) - else: - i = 'timeout %d atftp -p -r %s -l %s %s' % (timeout, - file, file, vm_ip) - self.run_cmd_on_vm(cmds=[i], timeout=timeout + 10) - except Exception, e: - self.logger.exception( - 'Exception occured while trying to tftp the file') - # end tftp_file_to_vm - - def scp_file_to_vm(self, file, vm_ip, dest_vm_username='ubuntu'): - '''Do a scp of the specified file to the specified VM - - ''' - host = self.inputs.host_data[self.vm_node_ip] - output = '' - - # We need to retry following section and scale it up if required (for slower VMs - # TODO: Use @retry annotation instead - if "TEST_DELAY_FACTOR" in os.environ: - delay_factor = os.environ.get("TEST_DELAY_FACTOR") - else: - delay_factor = "1.0" - timeout = math.floor(40 * float(delay_factor)) - - try: - self.orch.put_key_file_to_host(self.vm_node_ip) - with hide('everything'): - with settings(host_string='%s@%s' % ( - host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False): - self.get_rsa_to_vm() - i = 'timeout %d scp -o StrictHostKeyChecking=no -i id_rsa %s %s@[%s]:' % ( - timeout, file, dest_vm_username, vm_ip) - cmd_outputs = self.run_cmd_on_vm( - cmds=[i], timeout=timeout + 10) - self.logger.debug(cmd_outputs) - except Exception, e: - self.logger.exception( - 'Exception occured while trying to scp the file ') - # end scp_file_to_vm - - def put_pub_key_to_vm(self): - self.logger.debug('Copying public key to VM %s' % (self.vm_name)) - self.orch.put_key_file_to_host(self.vm_node_ip) - auth_file = '.ssh/authorized_keys' - self.run_cmd_on_vm(['mkdir -p ~/.ssh']) - host = self.inputs.host_data[self.vm_node_ip] - with hide('everything'): - with settings( - host_string='%s@%s' % (host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False): - fab_put_file_to_vm(host_string='%s@%s' % ( - self.vm_username, self.local_ip), - password=self.vm_password, - src='/tmp/id_rsa.pub', dest='/tmp/') - cmds = [ - 'cat /tmp/id_rsa.pub >> ~/%s' % (auth_file), - 'chmod 600 ~/%s' % (auth_file), - 'cat /tmp/id_rsa.pub >> /root/%s' % (auth_file), - 'chmod 600 /root/%s' % (auth_file), - 'chown %s ~/%s' % (self.vm_username, auth_file), - 'chgrp %s ~/%s' % (self.vm_username, auth_file), - '''sed -i -e 's/no-port-forwarding.*sleep 10\" //g' ~root/.ssh/authorized_keys'''] - self.run_cmd_on_vm(cmds, as_sudo=True) - - @retry(delay=10, tries=5) - def check_file_transfer(self, dest_vm_fixture, dest_vn_fq_name=None, mode='scp', - size='100', fip=None, expectation=True, af=None): - ''' - Creates a file of "size" bytes and transfers to the VM in dest_vm_fixture using mode scp/tftp - ''' - filename = 'testfile' - # Create file - cmd = 'dd bs=%s count=1 if=/dev/zero of=%s' % (size, filename) - self.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - - if fip: - dest_vm_ips = [fip] - else: - dest_vm_ips = dest_vm_fixture.get_vm_ips( - vn_fq_name=dest_vn_fq_name, af=af) - if mode == 'scp': - dest_vm_fixture.run_cmd_on_vm( - cmds=['cp -f ~root/.ssh/authorized_keys ~/.ssh/'], as_sudo=True) - absolute_filename = filename - elif mode == 'tftp': - # Create the file on the remote machine so that put can be done - absolute_filename = '/var/lib/tftpboot/' + filename - dest_vm_fixture.run_cmd_on_vm( - cmds=['sudo touch %s' % (absolute_filename), - 'sudo chmod 777 %s' % (absolute_filename)]) - else: - self.logger.error('No transfer mode specified!!') - return False - - for dest_vm_ip in dest_vm_ips: - if mode == 'scp': - self.scp_file_to_vm(filename, vm_ip=dest_vm_ip, - dest_vm_username=dest_vm_fixture.vm_username) - else: - self.tftp_file_to_vm(filename, vm_ip=dest_vm_ip) - self.run_cmd_on_vm(cmds=['sync']) - # Verify if file size is same - out_dict = dest_vm_fixture.run_cmd_on_vm( - cmds=['wc -c %s' % (absolute_filename)]) - if size in out_dict.values()[0]: - self.logger.info('File of size %s is trasferred successfully to \ - %s by %s ' % (size, dest_vm_ip, mode)) - if not expectation: - return False - else: - self.logger.warn('File of size %s is not trasferred fine to %s \ - by %s' % (size, dest_vm_ip, mode)) - dest_vm_fixture.run_cmd_on_vm( - cmds=['rm -f %s' % (absolute_filename)]) - if mode == 'tftp': - dest_vm_fixture.run_cmd_on_vm( - cmds=['sudo touch %s' % (absolute_filename), - 'sudo chmod 777 %s' % (absolute_filename)]) - if expectation: - return False - return True - # end check_file_transfer - - def get_rsa_to_vm(self): - '''Get the rsa file to the VM from the agent - - ''' - host = self.inputs.host_data[self.vm_node_ip] - output = '' - try: - self.orch.put_key_file_to_host(self.vm_node_ip) - with hide('everything'): - with settings( - host_string='%s@%s' % ( - host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False): - key_file = self.orch.get_key_file() - fab_put_file_to_vm(host_string='%s@%s' % ( - self.vm_username, self.local_ip), - password=self.vm_password, - src=key_file, dest='~/') - self.run_cmd_on_vm(cmds=['chmod 600 id_rsa']) - - except Exception, e: - self.logger.exception( - 'Exception occured while trying to get the rsa file to the \ - VM from the agent') - # end get_rsa_to_vm - - def config_via_netconf(self, cmds=None): - '''run cmds on VM - ''' - host = self.inputs.host_data[self.vm_node_ip] - output = '' - try: - self.orch.put_key_file_to_host(self.vm_node_ip) - fab_connections.clear() - with hide('everything'): - with settings( - host_string='%s@%s' % (host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False): - self.logger.debug('Running Cmd on %s' % - self.vm_node_ip) - output = run_netconf_on_node( - host_string='%s@%s' % ( - self.vm_username, self.local_ip), - password=self.vm_password, - cmds=cmds) - return output - except Exception, e: - self.logger.exception( - 'Exception occured while trying ping from VM') - return False - # end get_config_via_netconf - - def run_cmd_on_vm(self, cmds=[], as_sudo=False, timeout=30, as_daemon=False): - '''run cmds on VM - - ''' - self.return_output_cmd_dict = {} - self.return_output_values_list = [] - cmdList = cmds - host = self.inputs.host_data[self.vm_node_ip] - output = '' - try: - self.orch.put_key_file_to_host(self.vm_node_ip) - fab_connections.clear() - with hide('everything'): - with settings( - host_string='%s@%s' % (host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False): - for cmd in cmdList: - self.logger.debug('Running Cmd on %s: %s' % ( - self.vm_node_ip, cmd)) - output = run_fab_cmd_on_node( - host_string='%s@%s' % ( - self.vm_username, self.local_ip), - password=self.vm_password, - cmd=cmd, - as_sudo=as_sudo, - timeout=timeout, - as_daemon=as_daemon) - self.logger.debug(output) - self.return_output_values_list.append(output) - self.return_output_cmd_dict = dict( - zip(cmdList, self.return_output_values_list)) - return self.return_output_cmd_dict - except Exception, e: - self.logger.exception( - 'Exception occured while trying ping from VM') - return self.return_output_cmd_dict - - def get_vm_ip_from_vm(self, vn_fq_name=None): - ''' Get VM IP from Ifconfig output executed on VM - ''' - vm_ip = None - if not vn_fq_name: - vn_fq_name = self.vn_fq_names[0] - cmd = "ifconfig | grep %s" % (self.tap_intf[vn_fq_name]['ip_addr']) - self.run_cmd_on_vm(cmds=[cmd]) - output = self.return_output_cmd_dict[cmd] - match = re.search('inet addr:(.+?) Bcast:', output) - if match: - vm_ip = match.group(1) - return vm_ip - # end def - - def wait_till_vm_is_up(self): - status = self.wait_till_vm_up() - return_status = None - if type(status) == tuple: - return_status = status[0] - elif type(status) == bool: - return_status = status - - # Get the console output in case of failures - if not return_status: - self.logger.debug(self.get_console_output()) - return return_status - - def wait_till_vm_is_active(self): - status = self.orch.wait_till_vm_is_active(self.vm_obj) - if type(status) == tuple: - if status[1] in 'ERROR': - return False - elif status[1] in 'ACTIVE': - return True - elif type(status) == bool: - return status - - @retry(delay=5, tries=10) - def wait_till_vm_up(self): - vm_status = self.orch.wait_till_vm_is_active(self.vm_obj) - if type(vm_status) == tuple: - if vm_status[1] in 'ERROR': - self.logger.warn("VM in error state. Asserting...") - return (False, 'final') -# assert False - - if vm_status[1] != 'ACTIVE': - result = result and False - return result - elif type(vm_status) == bool and not vm_status: - return (vm_status, 'final') - - result = self.verify_vm_launched() - #console_check = self.nova_h.wait_till_vm_is_up(self.vm_obj) - #result = result and self.nova_h.wait_till_vm_is_up(self.vm_obj) - # if not console_check : - # import pdb; pdb.set_trace() - # self.logger.warn('Console logs didnt give enough info on bootup') - self.vm_obj.get() - result = result and self._gather_details() - result = result and self.wait_for_ssh_on_vm() - if not result: - self.logger.error('VM %s does not seem to be fully up' % ( - self.vm_name)) - self.logger.error('Console output: %s' % self.get_console_output()) - return result - return True - # end wait_till_vm_is_up - - def scp_file_transfer_cirros(self, dest_vm_fixture, fip=None, size='100'): - ''' - Creates a file of "size" bytes and transfers to the VM in dest_vm_fixture using mode scp/tftp - ''' - filename = 'testfile' - dest_vm_ip = dest_vm_fixture.vm_ip - import pexpect - # Create file - cmd = 'dd bs=%s count=1 if=/dev/zero of=%s' % (size, filename) - self.run_cmd_on_vm(cmds=[cmd]) - host = self.inputs.host_data[self.vm_node_ip] - - if "TEST_DELAY_FACTOR" in os.environ: - delay_factor = os.environ.get("TEST_DELAY_FACTOR") - else: - delay_factor = "1.0" - timeout = math.floor(40 * float(delay_factor)) - - with settings(host_string='%s@%s' % (host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False): - handle = pexpect.spawn( - 'ssh -F /dev/null -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s' % (self.vm_username, self.local_ip)) - handle.timeout = int(timeout) - i = handle.expect(['\$ ', 'password:']) - if i == 0: - pass - if i == 1: - handle.sendline('cubswin:)') - handle.expect('\$ ') - if fip: - handle.sendline('scp %s %s@%s:~/.' % - (filename, dest_vm_fixture.vm_username, fip)) - else: - handle.sendline( - 'scp %s %s@%s:~/.' % (filename, dest_vm_fixture.vm_username, dest_vm_fixture.vm_ip)) - i = handle.expect( - ['Do you want to continue connecting', '[P,p]assword']) - if i == 0: - handle.sendline('y') - handle.expect('[P,p]assword') - handle.sendline('cubswin:)') - elif i == 1: - handle.sendline('cubswin:)') - else: - self.logger.warn('scp file to VM failed') - out_dict = dest_vm_fixture.run_cmd_on_vm( - cmds=['ls -l %s' % (filename)]) - if size in out_dict.values()[0]: - self.logger.info('File of size %s is trasferred successfully to \ - %s ' % (size, dest_vm_fixture.vm_name)) - return True - else: - self.logger.warn('File of size %s is not trasferred fine to %s \ - !! Pls check logs' % (size, dest_vm_fixture.vm_name)) - return False - - # end scp_file_transfer_cirros - - def get_console_output(self): - return self.orch.get_console_output(self.vm_obj) - - @retry(delay=5, tries=20) - def wait_for_ssh_on_vm(self): - self.logger.info('Waiting to SSH to VM %s, IP %s' % (self.vm_name, - self.vm_ip)) - host = self.inputs.host_data[self.vm_node_ip] - with settings(host_string='%s@%s' % (host['username'], - self.vm_node_ip), password=host['password'], - warn_only=True, abort_on_prompts=False): - # Check if ssh from compute node to VM works(with retries) - if fab_check_ssh('@'.join([self.vm_username, self.local_ip]), - self.vm_password): - self.logger.info('VM %s is ready for SSH connections'%( - self.vm_name)) - return True - self.logger.error('VM %s is NOT ready for SSH connections'%( - self.vm_name)) - return False - # end wait_for_ssh_on_vm - - def copy_file_to_vm(self, localfile, dstdir=None, force=False): - host = self.inputs.get_host_ip(self.vm_node_ip) - filename = localfile.split('/')[-1] - if dstdir: - remotefile = dstdir+'/'+filename - else: - remotefile = filename - self.inputs.copy_file_to_server(host, localfile, '/tmp/', filename, force) - cmd = 'fab -u %s -p "%s" -H %s ' % ( - self.vm_username, self.vm_password, self.local_ip) - cmd = cmd + 'fput:%s,%s'%('/tmp/'+filename, remotefile) - self.inputs.run_cmd_on_server(host, cmd) - - def get_vm_ipv6_addr_from_vm(self, intf='eth0', addr_type='link'): - ''' Get VM IPV6 from Ifconfig output executed on VM - ''' - vm_ipv6 = None - cmd = "ifconfig %s| awk '/inet6/ {print $3}'" % (intf) - self.run_cmd_on_vm(cmds=[cmd]) - if cmd in self.return_output_cmd_dict.keys(): - output = self.return_output_cmd_dict[cmd] - if (addr_type == 'link'): - match = re.search('inet6 addr:(.+?) Scope:Link', output) - elif (addr_type == 'global'): - match = re.search('inet6 addr:(.+?) Scope:Global', output) - else: - match = None - - if match: - vm_ipv6 = match.group(1) - return vm_ipv6 - - def get_active_controller(self): - ''' Get the active contol node. - ''' - active_controller = None - inspect_h = self.agent_inspect[self.vm_node_ip] - agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status() - for entry in agent_xmpp_status: - if entry['cfg_controller'] == 'Yes' \ - and entry['state'] == 'Established': - active_controller = entry['controller_ip'] - if not active_controller: - self.logger.error('Active controlloer is not found') - return active_controller - - def install_pkg(self, pkgname="Traffic"): - if pkgname == "Traffic": - self.logger.info("Skipping installation of traffic package on VM") - return True - username = self.inputs.host_data[self.inputs.cfgm_ip]['username'] - password = self.inputs.host_data[self.inputs.cfgm_ip]['password'] - pkgsrc = PkgHost(self.inputs.cfgm_ips[0], self.vm_node_ip, - username, password) - self.orch.put_key_file_to_host(self.vm_node_ip) - key = self.orch.get_key_file() - pkgdst = PkgHost(self.local_ip, key=key, user=self.vm_username, - password=self.vm_password) - fab_connections.clear() - - assert build_and_install(pkgname, pkgsrc, pkgdst, self.logger) - - @retry(delay=2, tries=15) - def verify_vm_flows_removed(self): - cmd = 'flow -l ' - result = True - # TODO Change the logic so that check is not global(causes problems - # when run in parallel if same IP is across Vns or projects) - # May be we could match on NH index along with IP - return result - self.vm_flows_removed_flag = True - output = self.inputs.run_cmd_on_server(self.vm_node_ip, cmd, - self.inputs.host_data[ - self.vm_node_ip]['username'], - self.inputs.host_data[self.vm_node_ip]['password']) - matches = [x for x in self.vm_ips if '%s:' % x in output] - if matches: - self.logger.warn( - "One or more flows still present on Compute node after VM delete : %s" % (output)) - result = False - else: - self.logger.info("All flows for the VM deleted on Compute node") - self.vm_flows_removed_flag = self.vm_flows_removed_flag and result - return result - # end verify_vm_flows_removed - - def start_webserver(self, listen_port=8000, content=None): - '''Start Web server on the specified port. - ''' - host = self.inputs.host_data[self.vm_node_ip] - fab_connections.clear() - try: - with settings(host_string='%s@%s'%(host['username'], - self.vm_node_ip), password=host['password'], - warn_only=True, abort_on_prompts=False): - vm_host_string = '%s@%s'%(self.vm_username, self.local_ip) - cmd = 'echo %s >& index.html'%(content or self.vm_name) - output = run_fab_cmd_on_node(host_string=vm_host_string, - password=self.vm_password, - cmd=cmd) - cmd = 'python -m SimpleHTTPServer %d &> /dev/null'%listen_port - output = run_fab_cmd_on_node(host_string=vm_host_string, - password=self.vm_password, - cmd=cmd, as_daemon=True) - self.logger.debug(output) - except Exception, e: - self.logger.exception( - 'Exception occured while starting webservice on VM') - return False - # end webserver - - def provision_static_route( - self, - prefix='111.1.0.0/16', - tenant_name=None, - api_server_ip='127.0.0.1', - api_server_port='8082', - oper='add', - virtual_machine_interface_id='', - route_table_name='my_route_table', - user='admin', - password='contrail123'): - - if not tenant_name: - tenant_name = self.inputs.stack_tenant - cmd = "python /opt/contrail/utils/provision_static_route.py --prefix %s \ - --tenant_name %s \ - --api_server_ip %s \ - --api_server_port %s\ - --oper %s \ - --virtual_machine_interface_id %s \ - --user %s\ - --password %s\ - --route_table_name %s" % (prefix, - tenant_name, - api_server_ip, - api_server_port, - oper, - virtual_machine_interface_id, - user, - password, - route_table_name) - args = shlex.split(cmd) - process = Popen(args, stdout=PIPE) - stdout, stderr = process.communicate() - if stderr: - self.logger.warn("Route could not be created , err : \n %s" % - (stderr)) - else: - self.logger.info("%s" % (stdout)) - - def _gather_details(self): - self.cs_vmi_obj = {} - self.get_vmi_objs() - self.vm_id = self.vm_objs[0].id - # Figure out the local metadata IP of the VM reachable from host - inspect_h = self.agent_inspect[self.vm_node_ip] - - cfgm_ip = self.inputs.cfgm_ips[0] - api_inspect = self.api_s_inspects[cfgm_ip] - vmi_objs = self.get_vmi_obj_from_api_server(cfgm_ip, refresh=True)[1] - for vmi_obj in vmi_objs: - vmi_vn_fq_name = ':'.join( - vmi_obj['virtual-machine-interface']['virtual_network_refs'][0]['to']) - self.cs_vmi_obj[vmi_vn_fq_name] = vmi_obj - - for vn_fq_name in self.vn_fq_names: - (domain, project, vn) = vn_fq_name.split(':') - vnic_type=self.get_vmi_type(self.cs_vmi_obj[vn_fq_name]) - if vnic_type != unicode('direct'): - vna_tap_id = inspect_h.get_vna_tap_interface_by_vmi( - vmi_id=self.cs_vmi_obj[vn_fq_name][ - 'virtual-machine-interface']['uuid']) - self.tap_intf[vn_fq_name] = vna_tap_id[0] - self.tap_intf[vn_fq_name] = inspect_h.get_vna_intf_details( - self.tap_intf[vn_fq_name]['name'])[0] - if 'Active' not in self.tap_intf[vn_fq_name]['active']: - self.logger.warn('VMI %s status is not active, it is %s' % ( - self.tap_intf[vn_fq_name]['name'], - self.tap_intf[vn_fq_name]['active'])) - return False - self.local_ips[vn_fq_name] = self.tap_intf[ - vn_fq_name]['mdata_ip_addr'] - self.mac_addr[vn_fq_name] = self.tap_intf[vn_fq_name]['mac_addr'] - if '169.254' not in self.local_ip: - self.logger.warn('VM metadata IP is not 169.254.x.x') - return False - return True - # end _gather_details - - def interface_attach(self, port_id=None, net_id=None, fixed_ip=None): - self.logger.info('Attaching port %s to VM %s' % - (port_id, self.vm_obj.name)) - return self.vm_obj.interface_attach(port_id, net_id, fixed_ip) - - def interface_detach(self, port_id): - self.logger.info('Detaching port %s from VM %s' % - (port_id, self.vm_obj.name)) - return self.vm_obj.interface_detach(port_id) - - def reboot(self, type='SOFT'): - self.vm_obj.reboot(type) - - def wait_till_vm_status(self, status='ACTIVE'): - return self.orch.wait_till_vm_status(self.vm_obj, status) - - def wait_till_vm_boots(self): - return self.nova_h.wait_till_vm_is_up(self.vm_obj) - - def get_arp_entry(self, ip_address=None, mac_address=None): - out_dict = self.run_cmd_on_vm(["arp -an"]) - return search_arp_entry(out_dict.values()[0], ip_address, mac_address) - # end get_arp_entry - - def get_gateway_ip(self): - cmd = '''netstat -anr |grep ^0.0.0.0 | awk '{ print \\\\$2 }' ''' - out_dict = self.run_cmd_on_vm([cmd]) - return out_dict.values()[0].rstrip('\r') - # end get_gateway_ip - - def get_gateway_mac(self): - return self.get_arp_entry(ip_address=self.get_gateway_ip())[1] - - def migrate(self, compute): - self.orch.migrate_vm(self.vm_obj, compute) - - def start_tcpdump(self, interface=None, filters=''): - ''' This is similar to start_tcpdump_for_vm_intf() in tcpdump_utils.py - But used here too, for ease of use - ''' - if not interface: - interface = self.tap_intf.values()[0]['name'] - compute_ip = self.vm_node_ip - compute_user = self.inputs.host_data[compute_ip]['username'] - compute_password = self.inputs.host_data[compute_ip]['password'] - - (session, pcap) = start_tcpdump_for_intf(compute_ip, compute_user, - compute_password, interface, filters, self.logger) - return (session, pcap) - # end start_tcpdump - - def stop_tcpdump(self, session, pcap): - stop_tcpdump_for_intf(session, pcap, self.logger) - - def get_vm_interface_name(self, mac_address=None): - ''' - Given a MAC address, returns the corresponding interface name - in the VM - - Note that ifconfig output for some distros like fedora is diff - from that in Ubuntu/Cirros - Ubuntu has ifconfig with format - p1p2 Link encap:Ethernet HWaddr 00:25:90:c3:0a:f3 - - and redhat-based distros has - virbr0: flags=4099 mtu 1500 - inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255 - ether 42:7b:49:5e:cf:12 txqueuelen 0 (Ethernet) - ''' - if not mac_address: - mac_address = self.mac_addr.values()[0] - if mac_address in self._vm_interface.keys(): - return self._vm_interface[mac_address] - ubuntu_cmd = 'ifconfig | grep "%s" | awk \'{print \\\\$1}\' | head -1' %( - mac_address) - redhat_cmd = 'ifconfig | grep -i -B 2 "%s" | grep flags | '\ - 'awk \'{print \\\\$1}\'' % (mac_address) - cmd = 'test -f /etc/redhat-release && %s || %s' % (redhat_cmd, - ubuntu_cmd) - name = self.run_cmd_on_vm([cmd]).strip(':') - self._vm_interface[mac_address] = name - return name - # end get_vm_interface_name - - def arping(self, ip, interface=None): - if not interface: - interface_mac = self.mac_addr.values()[0] - interface = self.get_vm_interface_name(interface_mac) - - cmd = 'arping -i %s -c 1 -r %s' % (interface, ip) - outputs = self.run_cmd_on_vm([cmd]) - my_output = outputs.values()[0] - self.logger.debug('On VM %s, arping to %s on %s returned :%s' % ( - self.vm_name, ip, interface, my_output)) - formatted_output = remove_unwanted_output(my_output) - return (my_output.succeeded, formatted_output) - # end arping - - def run_dhclient(self, interface=None): - if not interface: - interface_mac = self.mac_addr.values()[0] - interface = self.get_vm_interface_name(interface_mac) - cmds = ['dhclient -r %s ; dhclient %s' % (interface, interface)] - outputs = self.run_cmd_on_vm(cmds, as_sudo=True, timeout=10) - my_output = outputs.values()[0] - self.logger.debug('On VM %s, dhcp on %s returned :%s' % ( - self.vm_name, interface, my_output)) - formatted_output = remove_unwanted_output(my_output) - return (my_output.succeeded, formatted_output) - # end run_dhclient - - def add_static_arp(self, ip, mac): - self.run_cmd_on_vm(['arp -s %s %s' % (ip, mac)], as_sudo=True) - self.logger.info('Added static arp %s:%s on VM %s' % (ip, mac, - self.vm_name)) - # end add_static_arp - - def run_python_code(self, code, as_sudo=True): - folder = tempfile.mkdtemp() - filename_short = 'program.py' - filename = '%s/%s' % (folder, filename_short) - fh = open(filename, 'w') - fh.write(code) - fh.close() - - host = self.inputs.host_data[self.vm_node_ip] - with settings( - host_string='%s@%s' % (host['username'], self.vm_node_ip), - password=host['password'], - warn_only=True, abort_on_prompts=False, - hide='everything'): - self.copy_file_to_vm(filename, '/tmp', force=True) - outputs = self.run_cmd_on_vm(['python /tmp/%s' % (filename_short)], - as_sudo=as_sudo) - shutil.rmtree(folder) - return outputs.values()[0] - # end run_python_code - def get_vmi_type(self, vm_obj): - for element in vm_obj['virtual-machine-interface']['virtual_machine_interface_bindings']['key_value_pair']: - if element['key'] == 'vnic_type': - return element['value'] - - -# end VMFixture - -class VMData(object): - - """ Class to store VM related data. - """ - - def __init__(self, name, vn_obj, image='ubuntu', project='admin', flavor='m1.tiny'): - self.name = name - self.vn_obj = vn_obj - self.image = image - self.project = project - self.flavor = flavor - - -class MultipleVMFixture(fixtures.Fixture): - - """ - Fixture to handle creation, verification and deletion of multiple VMs. - - Deletion of the VM upon exit can be disabled by setting fixtureCleanup= 'no' - in params file. If a VM with the vm_name is already present, it is not - deleted upon exit. To forcefully clean them up, set fixtureCleanup= 'force' - """ - - def __init__(self, connections, vms=[], vn_objs=[], image_name='ubuntu', - vm_count_per_vn=2, flavor=None, project_name=None): - """ - vms : List of dictionaries of VMData objects. - or - vn_objs : List of tuples of VN name and VNfixture.obj returned by the - get_all_fixture method of MultipleVNFixture. - - """ - - self.connections = connections - self.nova_h = self.connections.nova_h - if not project_name: - project_name = connections.inputs.project_name - self.project_name = project_name - self.vms = vms - self.vm_count = vm_count_per_vn - self.vn_objs = vn_objs - self.flavor = flavor - self.image_name = image_name - self.inputs = self.connections.inputs - self.logger = self.inputs.logger - # end __init__ - - def create_vms_in_vn(self, name, image, flavor, project, vn_obj): - for c in range(self.vm_count): - vm_name = '%s_vm_%s' % (name, c) - try: - vm_fixture = self.useFixture(VMFixture(image_name=image, - project_name=project, flavor=flavor, connections=self.connections, - vn_obj=vn_obj, vm_name=vm_name)) - except Exception, err: - self.logger.error(err) - self.logger.debug(traceback.format_exc()) - break - else: - self._vm_fixtures.append((vm_name, vm_fixture)) - - def setUp(self): - super(MultipleVMFixture, self).setUp() - self._vm_fixtures = [] - if self.vms: - for vm in vms: - self.create_vms_in_vn(vm.name, vm.image, vm.flavor, vm.project, - vm.vn_obj) - elif self.vn_objs: - for vn_name, vn_obj in self.vn_objs: - self.create_vms_in_vn(vn_name, self.image_name, self.flavor, - self.project_name, vn_obj) - else: - self.logger.error("One of vms, vn_objs is required.") - - def verify_on_setup(self): - # TODO - # Not expected to do verification when self.count > 1 - - created_vms = len(self._vm_fixtures) - expected_vms = len(self.vms) - if self.vn_objs: - expected_vms = self.vm_count * len(self.vn_objs) - - if created_vms != expected_vms: - return False - - result = True - for vm_name, vm_fixture in self._vm_fixtures: - result &= vm_fixture.verify_on_setup() - - return result - - def get_all_fixture(self): - return self._vm_fixtures - - def wait_for_ssh_on_vm(self): - - result = True - for vm_name, vm_fixture in self._vm_fixtures: - result &= vm_fixture.wait_for_ssh_on_vm() - - return result - - def wait_till_vm_is_up(self): - - result = True - for vm_name, vm_fixture in self._vm_fixtures: - result &= vm_fixture.wait_till_vm_is_up() - - return result diff --git a/fixtures/vn_policy_test.py b/fixtures/vn_policy_test.py deleted file mode 100644 index fb049da84..000000000 --- a/fixtures/vn_policy_test.py +++ /dev/null @@ -1,121 +0,0 @@ -import fixtures -from vnc_api import vnc_api -import inspect -from quantum_test import * -try: - from webui_test import * -except ImportError: - pass - -class VN_Policy_Fixture(fixtures.Fixture): - - """ Fixture to take care of linking VN & Policies. Has methods to attach & detach policies to/from VN. - Useful when VNs are created without policy and attached later.. - Fixture will take care of cleanup in reverse order. - Ex. createVN, createPolicy, attachVNPolicy, test, cleanup [detachVNPolicy, delete Policy, deleteVN] - """ - - def __init__(self, connections, vn_name, vn_obj, vn_policys, project_name, options='openstack', policy_obj=[]): - - self.connections = connections - self.inputs = self.connections.inputs - self.quantum_h = self.connections.quantum_h - self.project_name = project_name - self.vnc_lib = self.connections.vnc_lib - self.api_s_inspect = self.connections.api_server_inspect - self.logger = self.inputs.logger - self.vn_policys = vn_policys - self.policy_obj = policy_obj - self.vn_obj = vn_obj - self.skip_verify = 'no' - self.vn = vn_name - self.already_present = False - self.option = options if self.inputs.orchestrator == 'openstack' else 'contrail' - if self.inputs.verify_thru_gui(): - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.webui = WebuiTest(self.connections, self.inputs) - # end __init__ - - def setUp(self): - super(VN_Policy_Fixture, self).setUp() - policy_of_vn = self.api_s_inspect.get_cs_vn_policys( - project=self.project_name, vn=self.vn, refresh=True) - if policy_of_vn: - for policy in policy_of_vn: - if policy in self.vn_policys: - self.logger.info( - "Policy:%s already Associated to VN:%s'" % - (policy, self.vn)) - self.already_present = True - else: - if self.policy_obj[self.vn]: - self.logger.info("Setup step: Associating the policy to VN'") - if self.option == 'openstack': - policy_fq_names = [ - self.quantum_h.get_policy_fq_name(x) for x in self.policy_obj[self.vn]] - if self.inputs.is_gui_based_config(): - self.webui.bind_policies(self) - else: - self.vn_obj[self.vn].bind_policies( - policy_fq_names, self.vn_obj[self.vn].vn_id) - self.logger.info('Associated Policy:%s to %s' % - (policy_fq_names, self.vn)) - elif self.option == 'contrail': - ref_tuple = [] - vn_update_rsp = None - vnc_obj = self.vn_obj[self.vn].getObj() - policys = self.policy_obj[self.vn] - for seq, conf_policy in enumerate(policys): - vnc_obj.add_network_policy(conf_policy, - vnc_api.VirtualNetworkPolicyType( - sequence=vnc_api.SequenceType(major=seq, minor=0))) - vn_update_rsp = self.vnc_lib.virtual_network_update(vnc_obj) - self.logger.info('Associated Policy to %s' % (self.vn)) - return self - # end attachPolicytoVN - - # end setUp - - def cleanUp(self): - super(VN_Policy_Fixture, self).cleanUp() - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if do_cleanup: - self.detach_Policy_VN() - else: - self.logger.info('Skipping policy detach from VN %s' % (self.vn)) - # end cleanUp - - def detach_Policy_VN(self): - self.logger.info('Detaching the Policy for VN :%s ' % (self.vn)) - policy_fq_names = [] - if self.policy_obj[self.vn]: - policy_of_vn = self.api_s_inspect.get_cs_vn_policys( - project=self.project_name, vn=self.vn, refresh=True) - if policy_of_vn: - if self.option == 'openstack': - for policy in policy_of_vn: - policy_fq_names.append(self.api_s_inspect.get_cs_policy( - project=self.project_name, policy=policy)['network-policy']['fq_name']) - if self.inputs.is_gui_based_config(): - self.webui.detach_policies(self) - else: - self.vn_obj[self.vn].unbind_policies( - self.vn_obj[self.vn].vn_id, policy_fq_names) - self.logger.info('Detached Policy:%s from %s' % - (policy_fq_names, self.vn)) - elif self.option == 'contrail': - vn_update_rsp = None - vnc_obj = self.vn_obj[self.vn].getObj() - for conf_policy in self.policy_obj[self.vn]: - vnc_obj.del_network_policy(conf_policy) - vn_update_rsp = self.vnc_lib.virtual_network_update(vnc_obj) - self.logger.info('Detached Policy from %s' % (self.vn)) - - # end of detach_policy_VN diff --git a/fixtures/vn_test.py b/fixtures/vn_test.py deleted file mode 100644 index dc5844091..000000000 --- a/fixtures/vn_test.py +++ /dev/null @@ -1,1355 +0,0 @@ -import fixtures -from ipam_test import * -from project_test import * -from tcutils.util import * -from vnc_api.vnc_api import * -from netaddr import * -from time import sleep -from contrail_fixtures import * -import inspect -from common.policy import policy_test_utils -import threading -import sys -from quantum_test import NetworkClientException -try: - from webui_test import * -except ImportError: - pass - -class NotPossibleToSubnet(Exception): - - """Raised when a given network/prefix is not possible to be subnetted to - required numer of subnets. - """ - pass - - -#@contrail_fix_ext () -class VNFixture(fixtures.Fixture): - - ''' Fixture to create and verify and delete VNs. - - Deletion of the VN upon exit can be disabled by setting fixtureCleanup=no - If a VN with the vn_name is already present, it is not deleted upon exit. Use fixtureCleanup=force to force a delete. - - vn_fixture= VNFixture(...) - vn_fixture.obj : VN object dict from the stack - vn_fixture.vn_id : UUID of the VN - vn_fixture.vn_name : Name of the VN - vn_fixture.vn_fq_name : FQ name of the VN - ''' - def __init__(self, connections, inputs=None, vn_name=None, policy_objs=[], - subnets=[], project_name=None, router_asn='64512', - rt_number=None, ipam_fq_name=None, option='quantum', - forwarding_mode=None, vxlan_id=None, shared=False, - router_external=False, clean_up=True, project_obj= None, - af=None, empty_vn=False, enable_dhcp=True, - dhcp_option_list=None, disable_gateway=False, - uuid=None, sriov_enable=False, sriov_vlan=None, - sriov_provider_network=None): - self.connections = connections - self.inputs = inputs or connections.inputs - self.logger = self.connections.logger - self.orch = self.connections.orch - self.quantum_h = self.connections.quantum_h - self.vnc_lib_h = self.connections.get_vnc_lib_h() - self.api_s_inspect = self.connections.api_server_inspect - self.agent_inspect = self.connections.agent_inspect - self.cn_inspect = self.connections.cn_inspect - self.analytics_obj = self.connections.analytics_obj - self.domain_name = self.connections.domain_name - self.project_name = project_name or self.connections.project_name - self.vn_name = vn_name or get_random_name(self.project_name) - self.project_id = self.connections.get_project_id() - self.uuid = uuid - self.obj = None - self.ipam_fq_name = ipam_fq_name or NetworkIpam().get_fq_name() - self.policy_objs = policy_objs - self.af = self.get_af_from_subnet(subnets=subnets) or af or self.inputs.get_af() - if self.inputs.get_af() == 'v6' and self.af == 'v4': - raise v4OnlyTestException("Skipping Test. v4 specific testcase") - #Forcing v4 subnet creation incase of v6. Reqd for ssh to host - self.af = 'dual' if 'v6' in self.af else self.af - if self.inputs.orchestrator == 'vcenter' and subnets and (len(subnets) != 1): - raise Exception('vcenter: Multiple subnets not supported') - if not subnets and not empty_vn: - subnets = get_random_cidrs(stack=self.af) - if subnets and self.get_af_from_subnet(subnets=subnets) == 'v6': - subnets.extend(get_random_cidrs(stack='v4')) - self.vn_subnets = subnets - self._parse_subnets() - if self.inputs.verify_thru_gui(): - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.webui = WebuiTest(self.connections, self.inputs) - self.router_asn = router_asn - self.rt_number = rt_number - self.option = option - self.forwarding_mode = forwarding_mode - self.vxlan_id = vxlan_id - self.shared = shared - self.router_external = router_external - self.clean_up = clean_up - self.lock = threading.Lock() - self.already_present = False - self.verify_is_run = False - self.verify_result = True - self.verify_not_in_result = True - self.api_verification_flag = True - self.cn_verification_flag = True - self.policy_verification_flag = None - self.pol_verification_flag = None - self.op_verification_flag = True - self.not_in_agent_verification_flag = True - self.not_in_api_verification_flag = True - self.not_in_cn_verification_flag = True - self.project_obj = project_obj - self.vn_fq_name = None - self.enable_dhcp = enable_dhcp - self.sriov_enable = sriov_enable - self.sriov_vlan = sriov_vlan - self.sriov_provider_network = sriov_provider_network - self.dhcp_option_list = dhcp_option_list - self.disable_gateway = disable_gateway - self.vn_port_list=[] - self.vn_with_route_target = [] - # end __init__ - - def read(self): - if self.uuid: - self.obj = self.orch.get_vn_obj_from_id(self.uuid) - self.api_vn_obj = self.vnc_lib_h.virtual_network_read(id=self.uuid) - self.vn_name = self.api_vn_obj.name - self.vn_fq_name = self.api_vn_obj.get_fq_name_str() - self.fq_name = self.api_vn_obj.get_fq_name() - ipam = get_network_ipam_refs() - if ipam: - subnets = [x['subnet']['ip_prefix']+'/'+\ - x['subnet']['ip_prefix_len'] - for x in ipam[0]['attr']['ipam_subnets']] - self.vn_subnets = subnets - self._parse_subnets() - else: - subnets = None - self.vn_subnets = [] - self.logger.info('Fetched VN: %s(%s) with subnets %s' - %(self.vn_fq_name, self.uuid, subnets)) - - def get_uuid(self): - return self.uuid - - @property - def vn_id(self): - return self.get_uuid() - - def get_vrf_name(self): - return self.vn_fq_name + ':' + self.vn_name - - @property - def ri_name(self): - return self.get_vrf_name() - - @property - def vrf_name(self): - return self.get_vrf_name() - - def _parse_subnets(self): - # If the list is just having cidrs - if self.vn_subnets and (type(self.vn_subnets[0]) is str or - type(self.vn_subnets[0]) is unicode): - self.vn_subnets = [{'cidr': x} for x in self.vn_subnets] - # end _parse_subnets - - def get_cidrs(self, af=None): - subnets = [x['cidr'] for x in self.vn_subnets] - if af == 'dual': - return subnets - if self.af == 'dual' and self.inputs.get_af() == 'v6': - af = 'v6' - if not af: - return subnets - return [x for x in subnets if af == get_af_type(x)] - - def get_name(self): - return self.vn_name - - def get_vn_fq_name(self): - return self.vn_fq_name - - def get_af_from_subnet(self, subnets): - af = None - if subnets: - if type(subnets[0]) is dict: - subnets = [subnet['cidr'] for subnet in subnets] - af = get_af_from_cidrs(cidrs= subnets) - return af - - @retry(delay=10, tries=10) - def _create_vn_orch(self): - try: - self.obj = self.orch.get_vn_obj_if_present(self.vn_name, - project_id=self.project_id) - if not self.obj: - self.obj = self.orch.create_vn( - self.vn_name, - self.vn_subnets, - ipam_fq_name=self.ipam_fq_name, - shared=self.shared, - router_external=self.router_external, - enable_dhcp=self.enable_dhcp, - sriov_enable=self.sriov_enable, - sriov_vlan=self.sriov_vlan, - sriov_provider_network=self.sriov_provider_network, - disable_gateway=self.disable_gateway) - self.logger.debug('Created VN %s' %(self.vn_name)) - else: - self.already_present = True - self.logger.debug('VN %s already present, not creating it' % - (self.vn_name)) - self.uuid = self.orch.get_vn_id(self.obj) - self.vn_fq_name = ':'.join( - self.vnc_lib_h.id_to_fq_name(self.uuid)) - self.api_vn_obj = self.vnc_lib_h.virtual_network_read(id=self.uuid) - return True - except NetworkClientException as e: - with self.lock: - self.logger.exception( - "Got exception as %s while creating %s" % (e, self.vn_name)) - # We shall retry if it is Service Unavailable - if '503' in str(e) or '504' in str(e): - return False - raise NetworkClientException(message=str(e)) - - def get_vn_list_in_project(self, project_uuid): - - return self.vnc_lib_h.virtual_networks_list(parent_id=project_uuid) - - def verify_if_vn_already_present(self, vn_obj, project): - - to_be_created_vn_fq_name = vn_obj.get_fq_name() - vn_list = self.get_vn_list_in_project(project.uuid) - if not vn_list: - return False - else: - for elem in vn_list['virtual-networks']: - if(elem['fq_name'] == to_be_created_vn_fq_name): - return True - else: - continue - return False - - def get_vn_uid(self, vn_obj, project_uuid): - - uid = None - try: - to_be_created_vn_fq_name = vn_obj.get_fq_name() - vn_list = self.get_vn_list_in_project(project_uuid) - for elem in vn_list['virtual-networks']: - if(elem['fq_name'] == to_be_created_vn_fq_name): - uid = elem['uuid'] - except Exception as e: - self.logger.exception("API exception %s" % (e)) - finally: - return uid - - def _create_vn_api(self, vn_name, project): - if self.inputs.orchestrator == 'vcenter': - raise Exception('vcenter: no support for VN creation through VNC-api') - try: - self.api_vn_obj = VirtualNetwork( - name=vn_name, parent_obj=project.project_obj) - if not self.verify_if_vn_already_present(self.api_vn_obj, project.project_obj): - self.uuid = self.vnc_lib_h.virtual_network_create( - self.api_vn_obj) - with self.lock: - self.logger.info("Created VN %s using api-server" % ( - self.vn_name)) - else: - with self.lock: - self.logger.info("VN %s already present" % (self.vn_name)) - self.uuid = self.get_vn_uid( - self.api_vn_obj, project.project_obj.uuid) - ipam = self.vnc_lib_h.network_ipam_read( - fq_name=self.ipam_fq_name) - ipam_sn_lst = [] - # The dhcp_option_list and enable_dhcp flags will be modified for all subnets in an ipam - for net in self.vn_subnets: - network, prefix = net['cidr'].split('/') - ipam_sn = IpamSubnetType( - subnet=SubnetType(network, int(prefix))) - if self.dhcp_option_list: - ipam_sn.set_dhcp_option_list(self.dhcp_option_list) - if not self.enable_dhcp: - ipam_sn.set_enable_dhcp(self.enable_dhcp) - ipam_sn_lst.append(ipam_sn) - self.api_vn_obj.add_network_ipam(ipam, VnSubnetsType(ipam_sn_lst)) - self.vnc_lib_h.virtual_network_update(self.api_vn_obj) - self.vn_fq_name = self.api_vn_obj.get_fq_name_str() - self.obj = self.quantum_h.get_vn_obj_if_present(self.vn_name, - self.project_id) - if self.obj is None: - raise ValueError('could not find %s in neutron/quantum' % (self.vn_name)) - - except Exception as e: - with self.lock: - self.logger.exception( - 'Api exception while creating network %s' % (self.vn_name)) - - def get_api_obj(self): - return self.api_vn_obj - - def getObj(self): - return self.api_vn_obj - - def setUp(self): - super(VNFixture, self).setUp() - self.create() - - def create(self): - if self.uuid: - return self.read() - with self.lock: - self.logger.info("Creating vn %s.." % (self.vn_name)) - if not self.project_obj: - self.project_obj = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib_h, - project_name=self.project_name, - connections=self.connections)) - self.project_id = self.project_obj.uuid - if self.inputs.is_gui_based_config(): - self.webui.create_vn(self) - elif (self.option == 'api'): - self._create_vn_api(self.vn_name, self.project_obj) - else: - self._create_vn_orch() - - # Bind policies if any - if self.policy_objs: - if isinstance(self.policy_objs[0], NetworkPolicy): - policy_fq_names = [obj.fq_name for obj in self.policy_objs] - else: - policy_fq_names = [ - self.quantum_h.get_policy_fq_name(x) for x in self.policy_objs] - self.bind_policies(policy_fq_names, self.uuid) - else: - # Update self.policy_objs to pick acls which are already - # bound to the VN - self.update_vn_object() - # end if - - # Configure route target - if self.rt_number is not None: - self.add_route_target() - self.vn_with_route_target.append(self.uuid) - - # Configure forwarding mode - if self.forwarding_mode is not None: - self.add_forwarding_mode( - self.project_obj.project_fq_name, self.vn_name, self.forwarding_mode) - - # Configure vxlan_id - if self.vxlan_id is not None: - self.set_vxlan_id() - - # Populate the VN Subnet details - if self.inputs.orchestrator == 'openstack': - self.vn_subnet_objs = self.quantum_h.get_subnets_of_vn(self.uuid) - # end setUp - - def create_subnet(self, vn_subnet, ipam_fq_name): - if self.inputs.orchestrator == 'vcenter': - raise Exception('vcenter: subnets not supported') - self.quantum_h.create_subnet(vn_subnet, self.uuid, ipam_fq_name) - self.vn_subnets.append([{'cidr': vn_subnet}]) - - def create_subnet_af(self, af, ipam_fq_name): - if 'v4' in af or 'dual' in af: - self.create_subnet(vn_subnet= get_random_cidr(af='v4'), - ipam_fq_name= ipam_fq_name) - if 'v6' in af or 'dual' in af: - self.create_subnet(vn_subnet= get_random_cidr(af='v6'), - ipam_fq_name= ipam_fq_name) - - def create_port(self, net_id, subnet_id=None, ip_address=None, - mac_address=None, no_security_group=False, - security_groups=[], extra_dhcp_opts=None, sriov=False): - if self.inputs.orchestrator == 'vcenter': - raise Exception('vcenter: ports not supported') - fixed_ips = [{'subnet_id': subnet_id, 'ip_address': ip_address}] - port_rsp = self.quantum_h.create_port( - net_id, - fixed_ips, - mac_address, - no_security_group, - security_groups, - extra_dhcp_opts, - sriov) - self.vn_port_list.append(port_rsp['id']) - return port_rsp - - def delete_port(self, port_id, quiet=False): - if self.inputs.orchestrator == 'vcenter': - raise Exception('vcenter: ports not supported') - is_port_present=self.quantum_h.get_port(port_id) - if is_port_present is not None: - self.quantum_h.delete_port(port_id) - - - def verify_on_setup_without_collector(self): - # once api server gets restarted policy list for vn in not reflected in - # vn uve so removing that check here - result = True - if not self.verify_vn_in_api_server(): - result = result and False - self.logger.error( - "One or more verifications in API Server for VN %s failed" % (self.vn_name)) - if not self.verify_vn_in_control_nodes(): - result = result and False - self.logger.error( - "One or more verifications in Control-nodes for VN %s failed" % (self.vn_name)) - if not self.verify_vn_policy_in_api_server(): - result = result and False - self.logger.error(ret['msg']) - if not self.verify_vn_in_opserver(): - result = result and False - self.logger.error( - "One or more verifications in OpServer for VN %s failed" % (self.vn_name)) - - self.verify_is_run = True - self.verify_result = result - return result - - def verify_on_setup(self): - result = True - if not self.verify_vn_in_api_server(): - result = result and False - self.logger.error( - "One or more verifications in API Server for VN %s failed" % (self.vn_name)) - return result - if not self.verify_vn_in_control_nodes(): - result = result and False - self.logger.error( - "One or more verifications in Control-nodes for VN %s failed" % (self.vn_name)) - return result - if not self.verify_vn_policy_in_api_server(): - result = result and False - self.logger.error(ret['msg']) - if not self.verify_vn_in_opserver(): - result = result and False - self.logger.error( - "One or more verifications in OpServer for VN %s failed" % (self.vn_name)) - return result - if self.inputs.verify_thru_gui(): - self.webui.verify_vn(self) - if self.policy_objs: - self.verify_vn_policy_in_vn_uve() - if not self.policy_verification_flag['result']: - result = result and False - self.logger.error( - "One or more verifications of policy for VN %s failed" % (self.vn_name)) - if self.policy_objs: - if not self.pol_verification_flag: - result = result and False - self.logger.warn("Attached policy not shown in vn uve %s" % - (self.vn_name)) - - self.verify_is_run = True - self.verify_result = result - return result - # end verify - - @retry(delay=5, tries=10) - def verify_vn_in_api_server(self): - """ Checks for VN in API Server. - - False If VN Name is not found - False If all Subnet prefixes are not found - """ - self.api_verification_flag = True - self.api_s_vn_obj = self.api_s_inspect.get_cs_vn( - project=self.project_name, vn=self.vn_name, refresh=True) - if not self.api_s_vn_obj: - self.logger.warn("VN %s is not found in API-Server" % - (self.vn_name)) - self.api_verification_flag = self.api_verification_flag and False - return False - if self.api_s_vn_obj['virtual-network']['uuid'] != self.uuid: - self.logger.warn( - "VN Object ID %s in API-Server is not what was created" % (self.uuid)) - self.api_verification_flag = self.api_verification_flag and False - return False - - subnets = self.api_s_vn_obj[ - 'virtual-network']['network_ipam_refs'][0]['attr']['ipam_subnets'] - for vn_subnet in self.vn_subnets: - subnet_found = False - vn_subnet_cidr = str(IPNetwork(vn_subnet['cidr']).ip) - for subnet in subnets: - if subnet['subnet']['ip_prefix'] == vn_subnet_cidr: - subnet_found = True - if not subnet_found: - self.logger.warn( - "VN Subnet IP %s not found in API-Server for VN %s" % - (vn_subnet_cidr, self.vn_name)) - self.api_verification_flag = self.api_verification_flag and False - return False - # end for - self.api_s_route_targets = self.api_s_inspect.get_cs_route_targets( - vn_id=self.uuid) - if not self.api_s_route_targets: - errmsg = "Route targets not found in API-Server for VN %s" % self.vn_name - self.logger.error(errmsg) - self.api_verification_flag = self.api_verification_flag and False - return False - self.rt_names = self.api_s_inspect.get_cs_rt_names( - self.api_s_route_targets) - - if not self.rt_names: - self.logger.warn( - 'RT names not yet present for VN %s', self.vn_name) - return False - - if self.rt_number: - if not any(item.endswith(self.rt_number) for item in self.rt_names): - self.logger.warn('RT %s is not found in API Server RT list %s ' %( - self.rt_number, self.rt_names)) - self.api_verification_flag = self.api_verification_flag and False - return False - self.api_verification_flag = self.api_verification_flag and True - self.logger.info("Verifications in API Server for VN %s passed" % - (self.vn_name)) - self.api_s_routing_instance = self.api_s_inspect.get_cs_routing_instances( - vn_id=self.uuid) - return True - # end verify_vn_in_api_server - - @retry(delay=5, tries=10) - def verify_vn_policy_in_vn_uve(self): - ''' verify VN's policy name in vn uve''' - result = True - # Expectation for this verification is not valid anymore with - # multi-cfgm, skipping this verification - self.pol_verification_flag = result - return result - try: - for ip in self.inputs.collector_ips: - self.policy_in_vn_uve = self.analytics_obj.get_vn_uve_attched_policy( - ip, vn_fq_name=self.vn_fq_name) - self.logger.info("Attached policy in vn %s uve %s" % - (self.vn_name, self.policy_in_vn_uve)) - policy_list = [] - for elem in self.policy_objs: - policy = elem['policy']['fq_name'] - policy_name = str(policy[0]) + ':' + \ - (str(policy[1])) + ':' + (str(policy[2])) - policy_list.append(policy_name) - for pol in policy_list: - if pol in self.policy_in_vn_uve: - result = result and True - else: - result = result and False - except Exception as e: - self.logger.exception('Got exception as %s' % (e)) - result = result and False - finally: - self.pol_verification_flag = result - return result - - def verify_vn_policy_not_in_vn_uve(self): - ''' verify VN's policy name not in vn uve''' - result = True - # Expectation for this verification is not valid anymore with - # multi-cfgm, skipping this verification - self.pol_verification_flag = result - return result - for ip in self.inputs.collector_ips: - self.policy_in_vn_uve = self.analytics_obj.get_vn_uve_attched_policy( - ip, vn_fq_name=self.vn_fq_name) - if self.policy_in_vn_uve: - self.logger.warn("Attached policy not deleted in vn %s uve" % - (self.vn_name)) - result = result and False - else: - result = result and True - return result - - def get_policy_attached_to_vn(self): - vn_policys = [] - for p in self.policy_objs: - vn_policys.append(p['policy']['name']) - return vn_policys - - def get_allowed_peer_vns_by_policy(self): - ''' This is allowed list and not actual peer list, which is based on action by both peers''' - pol_name_list = [] - allowed_peer_vns = [] - vn = self.vnc_lib_h.virtual_network_read(id=self.uuid) - if vn: - pol_list_ref = vn.get_network_policy_refs() - if pol_list_ref: - for pol in pol_list_ref: - pol_name_list.append(str(pol['to'][2])) - if pol_name_list: - for pol in pol_name_list: - pol_object = self.api_s_inspect.get_cs_policy( - domain=self.domain_name, project=self.project_name, policy=pol, refresh=True) - pol_rules = pol_object[ - 'network-policy']['network_policy_entries']['policy_rule'] - self.logger.debug( - "vn: %s, inspecting following rules for route verification: %s" % - (self.vn_fq_name, pol_rules)) - for rule in pol_rules: - # Only for those rules, where local vn is listed, pick the peer... - # Also, local vn can appear as source or dest vn - rule_vns = [] - src_vn = rule['src_addresses'][0][ - 'virtual_network'] - rule_vns.append(src_vn) - dst_vn = rule['dst_addresses'][0][ - 'virtual_network'] - rule_vns.append(dst_vn) - if self.vn_fq_name in rule_vns: - rule_vns.remove(self.vn_fq_name) - # Consider peer VN route only if the action is set to - # pass - if rule['action_list']['simple_action'] == 'pass': - self.logger.debug( - "Local VN: %s, Peer VN %s is a valid peer" % (self.vn_fq_name, rule_vns[0])) - allowed_peer_vns.append(rule_vns[0]) - else: - self.logger.debug("Local VN: %s, skip route to VN %s as the action is not set to allow" % ( - self.vn_fq_name, rule_vns[0])) - elif 'any' in rule_vns: - if rule['action_list']['simple_action'] == 'pass': - self.logger.debug( - "any VN is a valid pair for this vn %s" % (self.vn_fq_name)) - allowed_peer_vns.append('any') - else: - self.logger.info( - "Local VN: %s, skip the VNs in this rule as the local VN is not listed & the rule is a no-op: %s" % - (self.vn_fq_name, rule_vns)) - return allowed_peer_vns - - def verify_vn_policy_in_api_server(self): - ''' verify VN's policy data in api-server with data in quantum database''' - if self.inputs.orchestrator == 'vcenter': - self.policy_verification_flag = {'result': True, 'msg': None} - return self.policy_verification_flag - - me = inspect.getframeinfo(inspect.currentframe())[2] - result = True - err_msg = [] - out = None - self.logger.info( - "====Verifying policy data for %s in API_Server ======" % - (self.vn_name)) - self.api_s_vn_obj = self.api_s_inspect.get_cs_vn( - project=self.project_name, vn=self.vn_name, refresh=True) - try: - vn_pol = self.api_s_vn_obj[ - 'virtual-network']['network_policy_refs'] - except: - self.logger.debug("=>VN %s has no policy to be verified" % - (self.vn_name)) - self.policy_verification_flag = {'result': result, 'msg': err_msg} - return {'result': result, 'msg': err_msg} - - # vn_pol is a list of dicts with policy info - # check no. of policies in api-s and quantum db for vn - if len(vn_pol) != len(self.policy_objs): - msg = "VN: " + self.vn_name + \ - ", No. of policies not same between api-s and quantum db" - self.logger.error(msg) - err_msg.append(msg) - self.logger.debug("Data in API-S: \n") - for policy in vn_pol: - self.logger.debug('%s, %s' % (policy['to'], policy['uuid'])) - self.logger.debug("Data in Quantum: \n") - for policy in self.policy_objs: - self.logger.debug('%s, %s' % - (policy['policy']['id'], policy['policy']['fq_name'])) - - # Compare attached policy_fq_names & uuid's - for policy in vn_pol: - fqn = policy['to'] - id = policy['uuid'] - self.logger.info( - "==>Verifying data for policy with id: %s, fqn: %s" % (id, fqn)) - # check if policy with this id exists in quantum - d = policy_test_utils.get_dict_with_matching_key_val( - 'id', id, self.policy_objs, 'policy') - if d['state'] == None: - err_msg.append(d['ret']) - else: - out = policy_test_utils.compare_args( - 'policy_fqn', fqn, d['ret']['policy']['fq_name']) - if out: - err_msg.append(out) - - if err_msg: - result = False - err_msg.insert(0, me + ":" + self.vn_name) - self.logger.info("verification: %s, status: %s" % (me, result)) - self.policy_verification_flag = {'result': result, 'msg': err_msg} - return {'result': result, 'msg': err_msg} - - # end verify_vn_policy_in_api_server - - @retry(delay=5, tries=3) - def verify_vn_not_in_api_server(self): - '''Verify that VN is removed in API Server. - - ''' - if self.api_s_inspect.get_cs_vn(project=self.project_name, vn=self.vn_name, refresh=True): - self.logger.warn("VN %s is still found in API-Server" % - (self.vn_name)) - self.not_in_api_verification_flag = False - return False - self.logger.info("VN %s is not found in API Server" % (self.vn_name)) - self.not_in_api_verification_flag = True - return True - # end verify_vn_not_in_api_server - - @retry(delay=5, tries=25) - def verify_vn_in_control_nodes(self): - """ Checks for VN details in Control-nodes. - - False if RT does not match the RT from API-Server for each of control-nodes - """ - self.api_s_route_targets = self.api_s_inspect.get_cs_route_targets( - vn_id=self.uuid) - - self.cn_verification_flag = True - for cn in self.inputs.bgp_ips: - cn_config_vn_obj = self.cn_inspect[cn].get_cn_config_vn( - vn_name=self.vn_name, project=self.project_name) - if not cn_config_vn_obj: - self.logger.warn('Control-node %s does not have VN %s info ' % - (cn, self.vn_name)) - self.cn_verification_flag = self.cn_verification_flag and False - return False - self.logger.debug("Control-node %s : VN object is : %s" % - (cn, cn_config_vn_obj)) - if self.vn_fq_name not in cn_config_vn_obj['node_name']: - self.logger.warn( - 'IFMAP View of Control-node is not having the VN detail of %s' % (self.vn_fq_name)) - self.cn_verification_flag = self.cn_verification_flag and False - return False - # TODO UUID verification to be done once the API is available - cn_object = self.cn_inspect[ - cn].get_cn_routing_instance(ri_name=self.ri_name) - if not cn_object: - self.logger.warn( - 'No Routing Instance found in CN %s with name %s' % - (cn, self.ri_name)) - self.cn_verification_flag = self.cn_verification_flag and False - return False - try: - rt_names = self.api_s_inspect.get_cs_rt_names( - self.api_s_route_targets) - if cn_object['export_target'][0] not in rt_names: - self.logger.warn( - "Route target %s for VN %s is not found in Control-node %s" % - (rt_names, self.vn_name, cn)) - self.cn_verification_flag = self.cn_verification_flag and False - return False - except Exception as e: - self.logger.exception( - "Got exception from control node verification as %s" % (e)) - self.cn_verification_flag = self.cn_verification_flag and False - return False - # end for - self.logger.info( - 'Control-node Config, RI and RT verification for VN %s passed' % - (self.vn_name)) - self.cn_verification_flag = self.cn_verification_flag and True - return True - # end verify_vn_in_control_node - - def verify_vn_policy_not_in_api_server(self, policy_name): - ''' verify VN's policy data in removed api-server''' - self.logger.info( - "====Verifying policy %s data removed from %s in API_Server ======" % - (policy_name, self.vn_name)) - found = False - - # Get VN object from API Server - vn = self.vnc_lib_h.virtual_network_read(id=self.uuid) - # Get the policy list from VN - pol_ref = vn.get_network_policy_refs() - - if not pol_ref: - self.logger.info("=> VN %s has no reference policys" % - (self.vn_name)) - return found - # If we have more policies with VN and iterate it. - for pol in pol_ref: - policy = self.vnc_lib_h.network_policy_read(id=pol['uuid']) - if (str(policy.name) == policy_name): - found = True - self.logger.info("Policy info is found in API-Server") - break - if not found: - self.logger.info("Policy info is not found in API-Server") - return found - # end verify_vn_policy_not_in_api_server - - @retry(delay=5, tries=20) - def verify_vn_not_in_control_nodes(self): - '''Verify that VN details are not in any Control-node - - ''' - result = True - self.not_in_cn_verification_flag = True - for cn in self.inputs.bgp_ips: - cn_object = self.cn_inspect[ - cn].get_cn_routing_instance(ri_name=self.ri_name) - if cn_object: - self.logger.warn( - "Routing instance for VN %s is still found in Control-node %s" % (self.vn_name, cn)) - result = result and False - self.not_in_cn_verification_flag = result - # end for - if self.cn_inspect[cn].get_cn_config_vn(vn_name=self.vn_name, project=self.project_name): - self.logger.warn("Control-node config DB still has VN %s" % - (self.vn_name)) - result = result and False - self.not_in_cn_verification_flag = result - - if result: - self.logger.info( - "Routing instances and Config db in Control-nodes does not have VN %s info" % (self.vn_name)) - return result - # end verify_vn_not_in_control_nodes - - @retry(delay=5, tries=30) - def verify_vn_not_in_agent(self): - ''' Verify that VN is removed in all agent nodes. - ''' - for compute_ip in self.inputs.compute_ips: - inspect_h = self.agent_inspect[compute_ip] - vn = inspect_h.get_vna_vn( - project=self.project_name, vn_name=self.vn_name) - print vn - if vn: - self.logger.warn('VN %s is still found in %s ' % - (self.vn_name, compute_ip)) - return False - self.not_in_agent_verification_flag = False - vrf_objs = inspect_h.get_vna_vrf_objs( - project=self.project_name, vn_name=self.vn_name) - if len(vrf_objs['vrf_list']) != 0: - self.logger.warn( - 'VRF %s for VN %s is still found in agent %s' % - (str(self.ri_name), self.vn_name, compute_ip)) - self.not_in_agent_verification_flag = False - return False - self.logger.info('VN %s is not present in Agent %s ' % - (self.vn_name, compute_ip)) - # end for - self.not_in_agent_verification_flag = True - return True - # end verify_vn_not_in_agent - - def verify_vn_in_opserver(self): - '''Verify vn in the opserver''' - - self.logger.info("Verifying the vn in opserver") - res = self.analytics_obj.verify_vn_link(self.vn_fq_name) - self.op_verification_flag = res - return res - - def del_host_route(self, prefix): - prefix = [prefix] if type(prefix) is str else prefix - self.del_host_routes(prefixes=[prefix]) - # end del_host_route - - def del_host_routes(self, prefixes): - vnc_lib = self.vnc_lib_h - self.logger.info('Deleting %s from host_routes via %s in %s' % - (prefixes, self.ipam_fq_name[-1], self.vn_name)) - vn_obj = vnc_lib.virtual_network_read( - fq_name=self.vn_fq_name.split(':')) - for subnet in vn_obj.get_network_ipam_refs()[0]['attr'].get_ipam_subnets(): - for prefix in prefixes: - if IPNetwork(subnet.subnet.ip_prefix).version == IPNetwork(prefix).version: - subnet.get_host_routes().delete_route(RouteTableType(RouteType(prefix))) - vn_obj._pending_field_updates.add('network_ipam_refs') - vnc_lib.virtual_network_update(vn_obj) - # end delete_host_routes - - def add_host_route(self, prefix): - prefix = [prefix] if type(prefix) is str else prefix - self.add_host_routes(prefixes=[prefix]) - # end add_host_route - - def add_host_routes(self, prefixes): - vnc_lib = self.vnc_lib_h - self.logger.info('Adding %s as host_route via %s in %s' % - (prefixes, self.ipam_fq_name[-1], self.vn_name)) - vn_obj = vnc_lib.virtual_network_read( - fq_name=self.vn_fq_name.split(':')) - for subnet in vn_obj.get_network_ipam_refs()[0]['attr'].get_ipam_subnets(): - list_of_prefix = [] - for prefix in prefixes: - if IPNetwork(subnet.subnet.ip_prefix).version == IPNetwork(prefix).version: - list_of_prefix.append(RouteType(prefix=prefix, next_hop=subnet.default_gateway)) - subnet.set_host_routes(RouteTableType(list_of_prefix)) - vn_obj._pending_field_updates.add('network_ipam_refs') - vnc_lib.virtual_network_update(vn_obj) - # end add_host_routes - - def add_route_target(self, routing_instance_name=None, router_asn=None, - route_target_number=None): - routing_instance_name = routing_instance_name or self.ri_name - router_asn = router_asn or self.router_asn - route_target_number = route_target_number or self.rt_number - vnc_lib = self.vnc_lib_h - - rt_inst_fq_name = routing_instance_name.split(':') - rtgt_val = "target:%s:%s" % (router_asn, route_target_number) - net_obj = vnc_lib.virtual_network_read(fq_name=rt_inst_fq_name[:-1]) - route_targets = net_obj.get_route_target_list() - if route_targets and (rtgt_val not in route_targets.get_route_target()): - route_targets.add_route_target(rtgt_val) - else: - route_targets = RouteTargetList([rtgt_val]) - net_obj.set_route_target_list(route_targets) - - vnc_lib.virtual_network_update(net_obj) - # end add_route_target - - def del_route_target(self, routing_instance_name=None, router_asn=None, - route_target_number=None): - - result = True - routing_instance_name = routing_instance_name or self.ri_name - router_asn = router_asn or self.router_asn - route_target_number = route_target_number or self.rt_number - vnc_lib = self.vnc_lib_h - - rt_inst_fq_name = routing_instance_name.split(':') - rtgt_val = "target:%s:%s" % (router_asn, route_target_number) - net_obj = vnc_lib.virtual_network_read(fq_name=rt_inst_fq_name[:-1]) - - if rtgt_val not in net_obj.get_route_target_list().get_route_target(): - self.logger.error("%s not configured for VN %s" % - (rtgt_val, rt_inst_fq_name[:-1])) - result = False -# net_obj.get_route_target_list().get_route_target().remove(rtgt_val) - route_targets = net_obj.get_route_target_list() - route_targets.delete_route_target(rtgt_val) - if route_targets.get_route_target(): - net_obj.set_route_target_list(route_targets) - else: - net_obj.set_route_target_list(None) - vnc_lib.virtual_network_update(net_obj) - return result - # end del_route_target - - def verify_vn_route_target(self, policy_peer_vns): - ''' For expected rt_import data, we need to inspect policy attached to both the VNs under test.. - Both VNs need to have rule in policy with action as pass to other VN.. - This data needs to come from calling test code as policy_peer_vns''' - self.logger.info("Verifying RT for vn %s, RI name is %s" % - (self.vn_fq_name, self.ri_name)) - self.policy_peer_vns = policy_peer_vns - compare = False - for i in range(len(self.inputs.bgp_ips)): - cn = self.inputs.bgp_ips[i] - self.logger.info("Checking VN RT in control node %s" % cn) - cn_ref = self.cn_inspect[cn] - vn_ri = cn_ref.get_cn_routing_instance(ri_name=self.ri_name) - act_rt_import = vn_ri['import_target'] - act_rt_export = vn_ri['export_target'] - self.logger.info("act_rt_import is %s, act_rt_export is %s" % - (act_rt_import, act_rt_export)) - exp_rt = self.get_rt_info() - self.logger.info("exp_rt_import is %s, exp_rt_export is %s" % - (exp_rt['rt_import'], exp_rt['rt_export'])) - compare_rt_export = policy_test_utils.compare_list( - self, exp_rt['rt_export'], act_rt_export) - compare_rt_import = policy_test_utils.compare_list( - self, exp_rt['rt_import'], act_rt_import) - self.logger.info( - "compare_rt_export is %s, compare_rt_import is %s" % (compare_rt_export, compare_rt_import)) - if (compare_rt_export and compare_rt_import): - compare = True - else: - self.logger.info( - "verify_vn_route_target failed in control node %s" % cn) - return False - return compare - # end verify_route_target - - def get_matching_vrf(self, vrf_objs, vrf_name): - return [x for x in vrf_objs if x['name'] == vrf_name][0] - # end get_matching_vrf - - def get_rt_info(self): - vn = self.vnc_lib_h.virtual_network_read(fq_name_str=self.vn_fq_name) - pol_name_list = [] - rt_import_list = [] - rt_export_list = [] - - rt_list1 = self.api_s_inspect.get_cs_route_targets(vn_id=vn.uuid) - rt_name1 = self.api_s_inspect.get_cs_rt_names(rt_obj=rt_list1) - rt_export_list = rt_name1 - rt_import_list.append(rt_name1[0]) - - # Get the valid peer VN list for route exchange from calling code as it needs - # to be looked from outside of VN fixture... - dst_vn_name_list = self.policy_peer_vns - print "VN list for RT import is %s" % dst_vn_name_list - - # Get the RT for each VN found in policy list - if dst_vn_name_list: - for vn_name in dst_vn_name_list: - vn_obj = self.vnc_lib_h.virtual_network_read( - fq_name_str=vn_name) - rt_list = self.api_s_inspect.get_cs_route_targets( - vn_id=vn_obj.uuid) - rt_names = self.api_s_inspect.get_cs_rt_names(rt_obj=rt_list) - for rt_name in rt_names: - rt_import_list.append(rt_name) - - return {'rt_export': rt_export_list, 'rt_import': rt_import_list} - # end get_rt_info - - def add_subnet(self, subnet): - if self.inputs.orchestrator == 'vcenter': - raise Exception('vcenter: subnets not supported') - # Get the Quantum details - quantum_obj = self.quantum_h.get_vn_obj_if_present(self.vn_name, - self.project_id) - #cidr = unicode(subnet) - if type(subnet) is str: - cidr = {'cidr': subnet} - - #ipam_fq_name = quantum_obj['network']['contrail:subnet_ipam'][0]['ipam_fq_name'] - ipam_fq_name = None - net_id = quantum_obj['network']['id'] - - # Create subnet - self.quantum_h.create_subnet(cidr, net_id, ipam_fq_name) - # end add_subnet - - def set_vxlan_id(self, vxlan_id=None): - if not vxlan_id: - vxlan_id = self.vxlan_id - - self.logger.debug('Updating VxLAN id of VN %s to %s' % ( - self.vn_fq_name, vxlan_id)) - vnc_lib = self.vnc_lib_h - vn_obj = vnc_lib.virtual_network_read(id=self.uuid) - vn_properties_obj = vn_obj.get_virtual_network_properties() \ - or VirtualNetworkType() - vn_properties_obj.set_vxlan_network_identifier(int(vxlan_id)) - vn_obj.set_virtual_network_properties(vn_properties_obj) - vnc_lib.virtual_network_update(vn_obj) - - # end set_vxlan_id - - def get_vxlan_id(self): - vnc_lib_fixture = self.connections.vnc_lib_fixture - vxlan_mode = vnc_lib_fixture.get_vxlan_mode() - vn_obj = self.vnc_lib_h.virtual_network_read(id=self.uuid) - if vxlan_mode == 'automatic': - return vn_obj.get_virtual_network_network_id() - else: - vn_prop_obj = vn_obj.get_virtual_network_properties() - return vn_prop_obj['vxlan_network_identifier'] - return None - # end get_vxlan_id - - def add_forwarding_mode(self, project_fq_name, vn_name, forwarding_mode): - vnc_lib = self.vnc_lib_h - # Figure out VN - vni_list = vnc_lib.virtual_networks_list( - parent_fq_name=project_fq_name)['virtual-networks'] - for vni_record in vni_list: - if (vni_record['fq_name'][0] == project_fq_name[0] and - vni_record['fq_name'][1] == project_fq_name[1] and - vni_record['fq_name'][2] == vn_name): - vni_obj = vnc_lib.virtual_network_read(id=vni_record['uuid']) - # if (vxlan_id is not None): - # vni_obj_properties.set_vxlan_network_identifier(int(vxlan_id)) - if (forwarding_mode is not None): - vni_obj_properties = vni_obj.get_virtual_network_properties( - ) or VirtualNetworkType() - vni_obj_properties.set_forwarding_mode(forwarding_mode) - vni_obj.set_virtual_network_properties(vni_obj_properties) - vnc_lib.virtual_network_update(vni_obj) - - def cleanUp(self): - super(VNFixture, self).cleanUp() - self.delete() - - def delete(self, verify=False): - do_cleanup = True - if self.inputs.fixture_cleanup == 'no': - do_cleanup = False - if self.already_present: - do_cleanup = False - if self.inputs.fixture_cleanup == 'force': - do_cleanup = True - if self.clean_up == False: - do_cleanup = False - - if do_cleanup: - # Cleanup the route target if created - if self.uuid in self.vn_with_route_target: - self.logger.info('Deleting RT for VN %s ' % (self.vn_name)) - self.del_route_target() - self.logger.info("Deleting the VN %s " % self.vn_name) - if len(self.vn_port_list)!=0: - for each_port_id in self.vn_port_list: - self.delete_port(port_id=each_port_id) - if self.inputs.is_gui_based_config(): - self.webui.delete_vn(self) - elif (self.option == 'api'): - self.logger.info("Deleting the VN %s using Api server" % - self.vn_name) - self.vnc_lib_h.virtual_network_delete(id=self.uuid) - else: - for i in range(12): - if not self.orch.delete_vn(self.obj): - # This might be due to caching issues. - self.logger.warn("%s. Deleting the VN %s failed" % - (i, self.vn_name)) - self.logger.info("%s. Retry deleting the VN %s " % - (i, self.vn_name)) - sleep(5) - else: - break - if self.verify_is_run or verify: - assert self.verify_vn_not_in_api_server() - assert self.verify_vn_not_in_agent() - assert self.verify_vn_not_in_control_nodes() - else: - self.logger.info('Skipping the deletion of the VN %s ' % - (self.vn_name)) - # end cleanUp - - def get_obj(self): - return self.vn_obj - # end get_obj - - def bind_policies(self, policy_fq_names, vn_id): - if self.inputs.orchestrator == 'vcenter': - self.api_vn_obj = self.vnc_lib_h.virtual_network_read(id=self.uuid) - self.api_vn_obj.set_network_policy_list([],True) - self.vnc_lib_h.virtual_network_update(self.api_vn_obj) - for seq, policy in enumerate(policy_fq_names): - policy_obj = self.vnc_lib_h.network_policy_read(fq_name=policy) - self.api_vn_obj.add_network_policy(policy_obj, - VirtualNetworkPolicyType(sequence=SequenceType(major=seq, minor=0))) - net_rsp = self.vnc_lib_h.virtual_network_update(self.api_vn_obj) - else: - net_rsp = {} - project_name = self.project_name - if len(policy_fq_names) != 0: - project_name = policy_fq_names[0][1] - net_req = {'contrail:policys': policy_fq_names} - net_rsp = self.quantum_h.update_network( - vn_id, {'network': net_req}) - self.logger.debug( - 'Response for mapping policy(s) with vn ' + str(net_rsp)) - # Update VN obj - self.update_vn_object() - return net_rsp - # end bind_policy - - def get_current_policies_bound(self): - self.api_vn_obj = self.vnc_lib_h.virtual_network_read(id=self.uuid) - api_policy_refs = self.api_vn_obj.get_network_policy_refs() - if not api_policy_refs: - return [] - api_policy_fq_names = [item['to'] for item in api_policy_refs] - return api_policy_fq_names - # end get_current_policies_bound - - def update_vn_object(self): - if self.inputs.orchestrator == 'openstack': - self.obj = self.quantum_h.get_vn_obj_from_id(self.uuid) - self.policy_objs = [] - if not self.policy_objs: - for policy_fq_name in self.get_current_policies_bound(): - policy_obj = self.orch.get_policy(policy_fq_name) - self.policy_objs.append(policy_obj) - # end update_vn_object - - def unbind_policies(self, vn_id, policy_fq_names=[]): - if self.inputs.orchestrator == 'vcenter': - if policy_fq_names == []: - self.api_vn_obj.set_network_policy_list([],True) - net_rsp = self.vnc_lib_h.virtual_network_update(self.api_vn_obj) - else: - for policy in policy_fq_names: - policy_obj = self.vnc_lib_h.network_policy_read(fq_name=policy) - self.api_vn_obj.del_network_policy(policy_obj) - net_rsp = self.vnc_lib_h.virtual_network_update(self.api_vn_obj) - else: - policys = self.get_current_policies_bound() - policys_to_remain = policys - for policy_name in policy_fq_names: - if not policy_name in policys: - self.logger.error('Policy %s is not bound to VN ID %s ' % - (policy_name, vn_id)) - return None - else: - policys_to_remain.remove(policy_name) - # If no policy is passed, unbind all policys - if len(policy_fq_names) == 0: - policys_to_remain = [] - net_req = {'contrail:policys': policys_to_remain} - net_rsp = self.quantum_h.update_network( - vn_id, {'network': net_req}) - - self.policy_objs= [] - self.update_vn_object() - return net_rsp - # end unbind_policy - - def update_subnet(self, subnet_id, subnet_dict): - if self.inputs.orchestrator == 'vcenter': - raise Exception('vcenter: subnets not supported') - self.quantum_h.update_subnet(subnet_id, subnet_dict) - self.vn_subnet_objs = self.quantum_h.get_subnets_of_vn(self.uuid) - - def get_subnets(self): - if self.inputs.orchestrator == 'vcenter': - raise Exception('vcenter: subnets not supported') - return self.quantum_h.get_subnets_of_vn(self.uuid) - - def add_to_router(self, physical_router_id): - pr = self.vnc_lib_h.physical_router_read(id=physical_router_id) - vn_obj = self.vnc_lib_h.virtual_network_read(id = self.uuid) - pr.add_virtual_network(vn_obj) - # end add_to_router - - def delete_from_router(self, physical_router_id): - pr = self.vnc_lib_h.physical_router_read(id=physical_router_id) - vn_obj = self.vnc_lib_h.virtual_network_read(id = self.uuid) - pr.delete_virtual_network(vn_obj) - # end delete_from_router - - def set_unknown_unicast_forwarding(self, enable=True): - vn_obj = self.vnc_lib_h.virtual_network_read(id = self.uuid) - vn_obj.set_flood_unknown_unicast(enable) - self.vnc_lib_h.virtual_network_update(vn_obj) - self.logger.info('Setting flood_unknown_unicast flag of VN %s to %s' - '' % (self.vn_name, enable)) - # end set_unknown_unicast_forwarding - -# end VNFixture - - -class MultipleVNFixture(fixtures.Fixture): - - """ Fixture to create, verify and delete multiple VNs and multiple subnets - each. - - Deletion of the VN upon exit can be disabled by setting - fixtureCleanup=no. If a VN with the vn_name is already present, it is - not deleted upon exit. Use fixtureCleanup=force to force a delete. - """ - - def __init__(self, connections, inputs, vn_count=1, subnet_count=1, - vn_name_net={}, project_name=None, af=None): - """ - vn_count : Number of VN's to be created. - subnet_count : Subnet per each VN's - vn_name_net : Dictionary of VN name as key and a network with prefix to - be subnetted(subnet_count)as value or list of subnets to - be created in that VN as value. - - Example Usage: - 1. vn_fixture = MultipleVnFixture(conn, inputs, vn_count=10, - subnet_count=20) - Creates 10 VN's with name vn1, vn2...vn10 with 20 subnets each. - Dynamicaly identifies the subnet's and stores them as class attributes - for future use. - - 2. vn_fixture = MultipleVnFixture(conn, inputs, subnet_count=20, - vn_name_net={'vn1' : '10.1.1.0/24', - 'vn2' : ['30.1.1.0/24', '30.1.2.0/24']}) - Creates VN's vn1 and vn2, with 20 subnets in vn1 and 2 subnets in vn2. - """ - self.inputs = inputs - self.connections = connections - if not project_name: - project_name = self.inputs.project_name - self.stack = af or self.inputs.get_af() - self.project_name = project_name - self.vn_count = vn_count - self.subnet_count = subnet_count - self.vn_name_net = vn_name_net - self.logger = inputs.logger - self._vn_subnets = {} - self._find_subnets() - - def _subnet(self, af='v4', network=None, roll_over=False): - if not network: - while True: - network=get_random_cidr(af=af, mask=SUBNET_MASK[af]['min']) - for rand_net in self.random_networks: - if not cidr_exclude(network, rand_net): - break - else: - break - net, plen = network.split('/') - plen = int(plen) - max_plen = SUBNET_MASK[af]['max'] - reqd_plen = max_plen - (int(self.subnet_count) - 1).bit_length() - if plen > reqd_plen: - if not roll_over: - max_subnets = 2 ** (max_plen - plen) - raise NotPossibleToSubnet("Network prefix %s can be subnetted " - "only to maximum of %s subnets" % (network, max_subnets)) - network = '%s/%s'%(net, reqd_plen) - - subnets = list(IPNetwork(network).subnet(plen)) - return map(lambda subnet: subnet.__str__(), subnets[:]) - - def _find_subnets(self): - if not self.vn_name_net: - self.random_networks = [] - for i in range(self.vn_count): - subnets = [] - if 'v4' in self.stack or 'dual' in self.stack: - subnets.extend(self._subnet(af='v4')) - if 'v6' in self.stack or 'dual' in self.stack: - subnets.extend(self._subnet(af='v6')) - self._vn_subnets.update({'vn%s' % (i + 1): subnets[:]}) - self.random_networks.extend(subnets) - return - for vn_name, net in self.vn_name_net.items(): - if type(net) is list: - self._vn_subnets.update({vn_name: net}) - else: - self._vn_subnets.update({vn_name: self._subnet(network=net)}) - - def setUp(self): - super(MultipleVNFixture, self).setUp() - self._vn_fixtures = [] - for vn_name, subnets in self._vn_subnets.items(): - vn_fixture = self.useFixture(VNFixture(inputs=self.inputs, - connections=self.connections, - project_name=self.project_name, - vn_name=vn_name, subnets=subnets)) - self._vn_fixtures.append((vn_name, vn_fixture)) - - def verify_on_setup(self): - result = True - for vn_name, vn_fixture in self._vn_fixtures: - result &= vn_fixture.verify_on_setup() - - return result - - def get_all_subnets(self): - return self._vn_subnets - - def get_all_fixture_obj(self): - return map(lambda (name, fixture): (name, fixture.obj), self._vn_fixtures) - diff --git a/fixtures/vnc_api_test.py b/fixtures/vnc_api_test.py deleted file mode 100644 index 870557df2..000000000 --- a/fixtures/vnc_api_test.py +++ /dev/null @@ -1,198 +0,0 @@ -import fixtures -import os -import uuid - -from vnc_api.vnc_api import * -from cfgm_common.exceptions import NoIdError - -from tcutils.util import get_dashed_uuid -from quantum_test import QuantumHelper -from openstack import OpenstackAuth -from openstack import OpenstackAuth, OpenstackOrchestrator -from vcenter import VcenterAuth - -class VncLibFixture(fixtures.Fixture): - ''' Wrapper for VncApi - - :param domain : default is default-domain - :param project_name : default is admin - :param cfgm_ip : default is 127.0.0.1 - :param api_port : default is 8082 - :param connections : ContrailConnections object. default is None - :param username : default is admin - :param password : default is contrail123 - :param auth_server_ip : default is 127.0.0.1 - :param logger : logger object - ''' - def __init__(self, *args, **kwargs): - - self.username = os.getenv('OS_USERNAME') or \ - kwargs.get('username', 'admin') - self.password = os.getenv('OS_PASSWORD') or \ - kwargs.get('password', 'contrail123') - self.project_name = kwargs.get('project_name', 'admin') - self.domain = kwargs.get('domain', 'default-domain') - self.api_server_port = kwargs.get('api_server_port', '8082') - self.cfgm_ip = kwargs.get('cfgm_ip', '127.0.0.1') - self.auth_server_ip = kwargs.get('auth_server_ip', '127.0.0.1') - self.logger = kwargs.get('logger', logging.getLogger(__name__)) - self.connections = kwargs.get('connections', None) - self.orchestrator = kwargs.get('orchestrator', 'openstack') - self.vnc_api_h = None - self.auth_client_h = None - self.inputs = kwargs.get('inputs', None) - self.neutron_handle = None - self.auth_url = os.getenv('OS_AUTH_URL') - if self.auth_server_ip: - self.auth_url = 'http://' + self.auth_server_ip + ':5000/v2.0' - - - # end __init__ - - def setUp(self): - super(VncLibFixture, self).setUp() - if self.connections: - self.logger = self.connections.logger - self.project_name = self.connections.project_name - self.inputs = self.connections.inputs - self.neutron_handle = self.connections.quantum_h - self.vnc_api_h = self.connections.vnc_lib - self.username = self.connections.username - self.password = self.connections.password - self.cfgm_ip = self.inputs.cfgm_ip - self.auth_server_ip = self.inputs.auth_ip - self.project_id = self.connections.project_id - self.auth_url = 'http://' + self.inputs.auth_ip + ':5000/v2.0' - else: - self.vnc_api_h = VncApi( - username=self.username, - password=self.password, - tenant_name=self.project_name, - api_server_host=self.cfgm_ip, - api_server_port=self.api_server_port, - auth_host=self.auth_server_ip) - if self.orchestrator == 'openstack': - self.auth_client = OpenstackAuth( - self.username, - self.password, - self.project_name, - auth_url=self.auth_url, - logger=self.logger) - self.project_id = self.auth_client.get_project_id() - elif self.orchestrator == 'vcenter': - self.auth_client = VcenterAuth(self.username, - self.password, - self.project_name, - self.inputs - ) - self.project_id = self.auth_client.get_project_id() - - # end setUp - - def cleanUp(self): - super(VncLibFixture, self).cleanUp() - - def get_handle(self): - return self.vnc_api_h - # end get_handle - - def get_neutron_handle(self): - if self.neutron_handle: - return self.neutron_handle - else: - self.orch = OpenstackOrchestrator(username=self.username, - password=self.password, - project_id=self.project_id, - project_name=self.project_name, - auth_server_ip=self.auth_server_ip, - vnclib=self.vnc_api_h, - logger=self.logger, inputs=None) - self.neutron_handle = self.orch.get_network_handler() - return self.neutron_handle - # end get_neutron_handle - - def get_forwarding_mode(self, vn_fq_name): - vnc_lib = self.vnc_api_h - # Figure out VN - vni_list = vnc_lib.virtual_networks_list( - parent_id=self.project_id)['virtual-networks'] - for vni_record in vni_list: - if (vni_record['fq_name'][0] == vn_fq_name.split(":")[0] and - vni_record['fq_name'][1] == vn_fq_name.split(":")[1] and - vni_record['fq_name'][2] == vn_fq_name.split(":")[2]): - vni_obj = vnc_lib.virtual_network_read(id=vni_record['uuid']) - vni_obj_properties = vni_obj.get_virtual_network_properties() - if vni_obj_properties: - fw_mode = vni_obj_properties.get_forwarding_mode() - else: - fw_mode = None - return fw_mode - # end get_forwarding_mode - - def get_vn_subnet_dhcp_flag(self, vn_fq_name): - vnc_lib = self.vnc_api_h - # Figure out VN - vni_list = vnc_lib.virtual_networks_list( - parent_id=self.project_id)['virtual-networks'] - for vni_record in vni_list: - if (vni_record['fq_name'][0] == vn_fq_name.split(":")[0] and - vni_record['fq_name'][1] == vn_fq_name.split(":")[1] and - vni_record['fq_name'][2] == vn_fq_name.split(":")[2]): - vni_obj = vnc_lib.virtual_network_read(id=vni_record['uuid']) - subnets = vni_obj.network_ipam_refs[0]['attr'] - ipam = subnets.get_ipam_subnets() - enable_dhcp = ipam[0].get_enable_dhcp() - return enable_dhcp - - # get_vn_subnet_dhcp_flag - - def set_rpf_mode(self, vn_fq_name, mode): - vnc_lib = self.vnc_api_h - # Figure out VN - vni_list = self.vnc_api_h.virtual_networks_list( - parent_id=self.project_id)['virtual-networks'] - for vni_record in vni_list: - if (vni_record['fq_name'][0] == vn_fq_name.split(":")[0] and - vni_record['fq_name'][1] == vn_fq_name.split(":")[1] and - vni_record['fq_name'][2] == vn_fq_name.split(":")[2]): - vni_obj = vnc_lib.virtual_network_read(id=vni_record['uuid']) - vni_obj_properties = vni_obj.get_virtual_network_properties() or VirtualNetworkType() - vni_obj_properties.set_rpf(mode) - vni_obj.set_virtual_network_properties(vni_obj_properties) - vnc_lib.virtual_network_update(vni_obj) - - # end set_rpf_mode - - def id_to_fq_name(self, id): - return self.vnc_api_h.id_to_fq_name(id) - - def set_vxlan_mode(self, vxlan_mode='automatic'): - ''' one of automatic or configured - ''' - fq_name = [ 'default-global-system-config', - 'default-global-vrouter-config'] - vrouter_config = self.vnc_api_h.global_vrouter_config_read(fq_name=fq_name) - vrouter_config.set_vxlan_network_identifier_mode(vxlan_mode) - self.vnc_api_h.global_vrouter_config_update(vrouter_config) - - def get_vxlan_mode(self): - fq_name = [ 'default-global-system-config', - 'default-global-vrouter-config'] - vrouter_config = self.vnc_api_h.global_vrouter_config_read(fq_name=fq_name) - return vrouter_config.get_vxlan_network_identifier_mode() - # end - - def get_global_asn(self, gsc_id=None): - gsc_id = gsc_id or self.vnc_api_h.get_default_global_system_config_id() - gsc_obj = self.vnc_api_h.global_system_config_read(id=gsc_id) - return gsc_obj.get_autonomous_system() - # end get_global_asn - - def set_global_asn(self, asn, gsc_id=None): - gsc_id = gsc_id or self.vnc_api_h.get_default_global_system_config_id() - gsc_obj = self.vnc_api_h.global_system_config_read(id=gsc_id) - gsc_obj.set_autonomous_system(int(asn)) - self.vnc_api_h.global_system_config_update(gsc_obj) - # end set_global_asn - -# end VncLibFixture diff --git a/fixtures/vpc_fip_fixture.py b/fixtures/vpc_fip_fixture.py deleted file mode 100644 index ef85da754..000000000 --- a/fixtures/vpc_fip_fixture.py +++ /dev/null @@ -1,162 +0,0 @@ -import time -import re -import fixtures -from fabric.api import local -from fabric.context_managers import shell_env, settings, hide -from fabric.contrib.files import exists -from fabric.operations import get, put - -from common.connections import ContrailConnections -from vpc_fixture_new import VPCFixture -from ec2_base import EC2Base -from floating_ip import FloatingIPFixture -from tcutils.util import * - - -class VPCFIPFixture(fixtures.Fixture): - - '''Fixture to create, verify and delete FIP - Flow: Euca2ools -> Boto -> Nova - ''' - - def __init__(self, public_vn_obj, ec2_base=None, connections=None): - self.connections = connections - self.inputs = connections.inputs - self.logger = self.inputs.logger - - self.ec2_base = ec2_base - self.already_present = False - self.fip_vn_fixture = public_vn_obj.public_vn_fixture - self.pool_name = public_vn_obj.fip_fixture.pool_name - self.public_vn_obj = public_vn_obj - self.vn_obj = self.fip_vn_fixture.obj - # end __init__ - - def setUp(self): - super(VPCFIPFixture, self).setUp() - self.c_fip_fixture = self.public_vn_obj.fip_fixture - # end setUp - - def allocate_floating_ip(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-allocate-address -d vpc', True).split('\t') - if out: - floating_ip = out[1] - fip_allocation_id = out[2] - self.logger.info('Allocated a Floating IP %s from Floating Ip pool fpool. ID : %s' - % (floating_ip, fip_allocation_id)) - return (floating_ip, fip_allocation_id) - else: - return (None, None) - # end allocate_floating_ip - - def associate_floating_ip(self, fip_allocation_id, instance_id): - out = self.ec2_base._shell_with_ec2_env( - 'euca-associate-address -a %s %s' % (fip_allocation_id, - instance_id), True).split('\t') - if out: - fip_allocation_id = out[1] - self.logger.info( - 'Associated Floating IP (Assoc ID : %s) to Instance %s' % (fip_allocation_id, - instance_id)) - return True - else: - return False - # end associate_floating_ip - - def create_and_assoc_fip(self, instance_id): - (fip, fip_alloc_id) = self.allocate_floating_ip() - if not self.associate_floating_ip(fip_alloc_id, instance_id): - self.logger.error('Error while applying FIP to instance %s' % - (instance_id)) - return (None, None) - return (fip, fip_alloc_id) - # end create_and_assoc_fip - - def disassociate_floating_ip(self, fip_allocation_id, fip): - out = self.ec2_base._shell_with_ec2_env('euca-disassociate-address %s' % ( - fip_allocation_id), True) - if out == 'True': - fip_allocation_id.replace('eipassoc', 'eipalloc') - self.logger.info('Floating IP %s disassociated ' % (fip)) - return True - else: - return False - # end disassociate_floating_ip - - def release_floating_ip(self, fip_allocation_id, fip): - out = self.ec2_base._shell_with_ec2_env( - 'euca-release-address %s' % fip_allocation_id, True) - if out: - self.logger.info('Floating IP (Alloc ID %s) %s released' % - (fip, fip_allocation_id)) - return True - else: - return False - # end release_floating_ip - - def disassoc_and_delete_fip(self, fip_allocation_id, fip): - if not self.disassociate_floating_ip(fip_allocation_id, fip): - self.logger.error('Disassociation of FIP %s failed' % fip) - if not self.release_floating_ip(fip_allocation_id, fip): - self.logger.error('Unable to deallocate FIP %s ' % (fip)) - # end disassoc_and_delete_fip - - def verify_fip(self, floating_ip): - out = self.ec2_base._shell_with_ec2_env( - 'euca-describe-addresses --filter domain=vpc| grep %s' % (floating_ip), True).split('\n') - self.logger.debug(out) - foundIp = False - - for ip in out: - ip = filter(None, ip.split(' ')) - if len(ip) == 0: - continue - if ip[0] == floating_ip: - fip_allocation_id = ip[2] - foundIp = True - - # looger info for allocation or association verification - if ip[2].split('-')[0] == 'eipalloc': - self.logger.info('Floating IP %s verified. No instance associated' - % floating_ip) - elif ip[2].split('-')[0] == 'eipassoc': - self.logger.info( - 'Floating IP %s associated with instance %s verified' % - (floating_ip, ip[3])) - else: - self.logger.warn( - 'Floating IP allocation or association id problem') - break - - if not foundIp: - self.logger.warn( - 'Floating IP allocation or association id verification failed') - return foundIp - # end verify_fip - - def verify_on_setup(self): - if not self.c_fip_fixture.verify_on_setup(): - self.logger.error('Contrail Fixture verification of FIP Pool %s failed' - % (self.pool_name)) - return False - return True - # end verify_on_setup - - @retry(delay=5, tries=3) - def verify_on_cleanup(self): - return True - # end verify_on_cleanup - - def cleanUp(self): - if self.already_present: - self.logger.debug( - 'VM was not created by this fixture..Skipping deletion') - super(VPCFIPFixture, self).cleanUp() - else: - super(VPCFIPFixture, self).cleanUp() - assert self.verify_on_cleanup(), "Euca Verification failed for FIP Pool %s cleanup" \ - % (self.pool_name) - # end cleanUp - -# end VPCFIPFixture diff --git a/fixtures/vpc_fixture.py b/fixtures/vpc_fixture.py deleted file mode 100644 index 647c6a4ef..000000000 --- a/fixtures/vpc_fixture.py +++ /dev/null @@ -1,705 +0,0 @@ -import time -import re -import fixtures -from fabric.api import local -from fabric.context_managers import shell_env, settings - -from common.connections import ContrailConnections - -from floating_ip import * - - -class VPCFixture(fixtures.Fixture): - - '''Fixture to create, verify and delete VPC, Subnet, Instance, - Floating IP allocation and association - Flow: Euca2ools -> Boto -> Nova - ''' - - def __init__( - self, inputs, cidr, subnet_cidr=None, floating_net_id=None, connections=None, - doSubnet=False, doInstance=False, doFloating=True, - doAcl=False, doSg=False, sgName=None): - self.inputs = inputs - self.logger = inputs.logger - self.cidr = cidr - self.vpc_id = None - self.subnet_id = None - self.do_subnet_test = doSubnet - self.instance_id = None - self.do_instance_test = doInstance - self.do_floating_ip = doFloating - self.floating_ip = None - self.floating_ip_association = False - self.floating_ip_allocation = False - self.floating_net_id = floating_net_id - self.def_acl_id = None - self.do_acl = doAcl - self.acl_association = False - self.acl_association_id = None - self.sg_id = None - self.do_security_group = doSg - self.sg_name = sgName - - if subnet_cidr and doSubnet: - self.subnet_cidr = subnet_cidr - else: - self.subnet_cidr = cidr.split('/')[0] + '/' + '30' - - if doFloating and floating_net_id: - self.fpool = 'fpool' - self.connections = connections - # end __init__ - - def setUp(self): - super(VPCFixture, self).setUp() - # end setUp - - def cleanUp(self): - # delete security group - if self.do_security_group and self.sg_id: - self.delete_security_group() - - # delete ACL - if self.do_acl and not self.acl_association: - self.delete_acl() - - # release floating IP and delete floating IP pool - if self.do_floating_ip and self.floating_ip: - self.release_floating_ip() - self.fip_fixture.cleanUp() - - # terminate instance - if self.instance_id and self.do_instance_test: - self.terminate_instance() - print('Waiting for VM to terminate') - time.sleep(7) - - # delete subnet - if self.do_subnet_test and self.subnet_id: - self.delete_subnet() - - # delete VPC and its ec2 aceess key, ec2 secret key - if self.vpc_id: - self.delete_vpc() - self.delete_ec2_keys(self.access_key) - - super(VPCFixture, self).cleanUp() - # end cleanUp - - # EC2 Secret key and Access key setup functions - - def _set_ec2_keys(self, tenant): - # export ec2 secret key and access key for admin or VPC - keys = local( - '(source /etc/contrail/openstackrc; keystone ec2-credentials-list)', - capture=True).split('\n')[3:] - found = False - - for key in keys: - key = [k for k in filter(None, key.split(' ')) if k != '|'] - if key[0] == tenant: - found = True - self.logger.info('Exported ec2 keys for %s' % tenant) - self.access_key = key[1] - self.secret_key = key[2] - break - return found - # end set_ec2_keys - - def _create_ec2_keys(self, tenant_name): - # create ec2 credentials for VPC - tenantId = self._get_tenant_id(tenant_name) - local('(source /etc/contrail/openstackrc; keystone ec2-credentials-create \ - --tenant-id %s)' % tenantId) - self.logger.info('EC2 keys created for %s' % tenant_name) - return True - # end create_ec2_keys - - def delete_ec2_keys(self, accessKey): - local('(source /etc/contrail/openstackrc; keystone ec2-credentials-delete \ - --access %s)' % accessKey) - self.logger.info('EC2 keys deleted for VPC') - # end delete_ec2_keys - - def _get_admin_user_id(self): - users = local( - '(source /etc/contrail/keystonerc; keystone user-get admin)', - capture=True).split('\n') - - for user in users: - user = [k for k in filter(None, user.split(' ')) if k != '|'] - if user[0] == 'id': - self.user_id = user[1] - break - - return self.user_id - # end _get_admin_user_id - - def _get_admin_role_id(self): - roles = local( - '(source /etc/contrail/keystonerc; keystone role-get admin)', - capture=True).split('\n') - - for role in roles: - role = [k for k in filter(None, role.split(' ')) if k != '|'] - if role[0] == 'id': - self.role_id = role[1] - break - - return self.role_id - # end _get_admin_role_id - - def _get_tenant_id(self, tenantName): - tenants = local('(source /etc/contrail/openstackrc; keystone tenant-get %s)' - % tenantName, capture=True).split('\n') - - for tenant in tenants: - tenant = [k for k in filter(None, tenant.split(' ')) if k != '|'] - if tenant[0] == 'id': - self.tenant_id = tenant[1] - break - - return self.tenant_id - # end _get_tenant_id - - def _add_admin_role_to_tenant(self): - # add 'admin' user to VPC with 'admin' role - userId = self._get_admin_user_id() - roleId = self._get_admin_role_id() - tenantId = self._get_tenant_id(self.vpc_id) - local('(source /etc/contrail/keystonerc ; keystone user-role-add --user %s\ - --role %s --tenant %s)' % (userId, roleId, tenantId)) - self.logger.info('Admin user with admin role added to VPC %s' % - self.vpc_id) - # end _add_admin_role_to_tenant - - def _shell_with_ec2_env(self, command, ret): - # shell to run Euca commands on machine with ec2 credentials - first_cfgm = self.inputs.cfgm_ips[0] - with settings(warn_only=True): - with shell_env(EC2_ACCESS_KEY=self.access_key, - EC2_SECRET_KEY=self.secret_key, - EC2_URL='http://%s:8773/services/Cloud' % first_cfgm): - out = local(command, capture=True) - if ret: - return out - # end _shell_with_ec2_env - - # VPC Functions - - def create_vpc(self): - if not self._set_ec2_keys(self.inputs.stack_tenant): - self.logger.error('set ec2-key failed for admin') - return False - - create_vpc_output = self._shell_with_ec2_env( - 'euca-create-vpc %s' % (self.cidr), True) - self.logger.info('Create vpc with CIDR %s' % self.cidr) - - # get vpcid and setup ec2 environment - if create_vpc_output: - self.vpc_id = create_vpc_output.split(' ')[1][4:] - self._add_admin_role_to_tenant() - - if not self._create_ec2_keys(self.vpc_id): - self.logger.error('ec2-key create failed for vpc tenant') - - if not self._set_ec2_keys(self.vpc_id): - self.logger.error('set ec2-key failed for vpc tenant') - - local('service openstack-nova-api restart') - return True - - else: - return False - # end create_vpc - - def verify_vpc(self): - verify_vpc_output = self._shell_with_ec2_env( - 'euca-describe-vpcs %s' % (self.vpc_id), True).split('\n')[2].split(' ') - verify_vpc_output = filter(None, verify_vpc_output) - - if verify_vpc_output[1] == self.cidr and verify_vpc_output[0] == self.vpc_id: - self.logger.info('VPC %s verified' % self.vpc_id) - return True - - else: - return False - # end verify_vpc - - def delete_vpc(self): - out = self._shell_with_ec2_env( - 'euca-delete-vpc %s' % (self.vpc_id), True) - if len(out) > 0 and out.split(' ')[1] == self.vpc_id: - self.logger.info('VPC %s deleted' % self.vpc_id) - return True - else: - return False - # end delete_vpc - - # Subnet Functions - - def create_subnet(self): - create_subnet_output = self._shell_with_ec2_env( - 'euca-create-subnet -c %s %s' % - (self.subnet_cidr, self.vpc_id), True) - if create_subnet_output: - self.subnet_id = create_subnet_output.split(' ')[0].split(':')[1] - self.logger.info('Create subnet with CIDR %s' % self.subnet_cidr) - return True - else: - return False - # end create_subnet - - def verify_subnet(self): - verify_subnet_output = self._shell_with_ec2_env( - 'euca-describe-subnets', True).split('\n')[2:] - foundSubnet = False - - for subnet in verify_subnet_output: - if subnet.startswith(self.subnet_id): - foundSubnet = True - break - - if not foundSubnet: - return False - - subnet = subnet.split('\t') - if subnet[1] == self.vpc_id and subnet[2] == self.subnet_cidr: - self.logger.info('Subnet %s verified' % self.subnet_id) - return True - - else: - return False - # end verify_subnet - - def delete_subnet(self): - out = self._shell_with_ec2_env( - 'euca-delete-subnet %s' % (self.subnet_id), True) - if len(out) > 0 and out.split(' ')[1] == self.subnet_id: - self.logger.info('Subnet %s deleted' % self.subnet_id) - return True - else: - return False - # end delete_subnet - - # Instance Functions - - def _get_image_id(self): - images = self._shell_with_ec2_env( - 'euca-describe-images', True).split('\n') - - for image in images: - image = [k for k in image.split('\t')] - if image[4] == 'available': - self.image_id = image[1] - self.logger.info('Using image %s(%s) to launch VM' % - (self.image_id, image[2])) - break - - return self.image_id - # end _get_image_id - - def _get_instance_id(self, instances): - instance = [k for k in instances[1].split('\t')] - - if instance[1].startswith('i-'): - self.instance_id = instance[1] - self.instance_name = instance[3] - - else: - self.logger.error('create instance failed') - - return self.instance_id - # end _get_instance_id - - def run_instance(self): - imageId = self._get_image_id() - - run_instance_output = self._shell_with_ec2_env( - 'euca-run-instances %s -s %s' % - (imageId, self.subnet_id), True).split('\n') - instanceId = self._get_instance_id(run_instance_output) - - if not instanceId: - return False - self.logger.info( - 'Run Instance in subnet %s with %s image' % (self.subnet_id, - self.image_id)) - return True - # end run_instance - - @retry(delay=1, tries=3) - def verify_instance(self): - print('Waiting for VM to be in running state ...') - time.sleep(7) - instances = self._shell_with_ec2_env( - 'euca-describe-instances', True).split('\n') - - foundInstance = False - for instance in instances: - instance = [k for k in instance.split('\t')] - if instance[1] == self.instance_id and instance[5] == 'running': - foundInstance = True - self.logger.info('Instance %s verified' % self.instance_id) - break - - return foundInstance - # end verify_instance - - def terminate_instance(self): - out = self._shell_with_ec2_env( - 'euca-terminate-instances %s' % (self.instance_id), True).split('\t') - if out[1] == self.instance_id: - self.logger.info('Instance %s terminated' % self.instance_id) - return True - return False - # end terminate_instance - - # Floating IP - - def _create_floating_ip_pool(self): - # create flaoting ip pool - self.fip_fixture = FloatingIPFixture( - project_name='admin', inputs=self.inputs, - connections=self.connections, pool_name=self.fpool, - vn_id=self.floating_net_id) - self.fip_fixture.setUp() - - if self.fip_fixture.verify_on_setup(): - return True - else: - self.logger.error('FIP pool create error') - - return False - # end _create_floating_ip_pool - - def allocate_floating_ip(self): - if not self._create_floating_ip_pool(): - return False - - out = self._shell_with_ec2_env( - 'euca-allocate-address -d vpc', True).split('\t') - if out: - self.floating_ip = out[1] - self.fip_allcation_id = out[2] - self.floating_ip_allocation = True - self.logger.info( - 'Allocate a Floating IP from Floating Ip pool fpool') - return True - - else: - return False - # end allocate_floating_ip - - def release_floating_ip(self): - out = self._shell_with_ec2_env( - 'euca-release-address %s' % self.fip_allcation_id, True) - if out: - self.logger.info('Floating IP %s released' % self.floating_ip) - # TODO enable verify_floating ip after describe_address - # has been fixed in cloud.py - # return not self.verify_floating_ip() - return True - else: - return False - # end release_floating_ip - - def verify_floating_ip(self): - out = self._shell_with_ec2_env( - 'euca-describe-addresses', True).split('\n') - foundIp = False - - for ip in out: - ip = filter(None, ip.split(' ')) - if ip[0] == self.floating_ip: - self.fip_allcation_id = ip[2] - foundIp = True - - # looger info for allocation or association verification - if ip[2].split('-')[0] == 'eipalloc' and self.floating_ip_allocation: - self.logger.info('Floating IP %s verified. No instance associated' - % self.floating_ip) - elif ip[2].split('-')[0] == 'eipassoc' and self.floating_ip_association: - self.logger.info( - 'Floating IP %s associated with instance %s verified' % - (self.floating_ip, ip[3])) - else: - self.logger.debug( - 'Floating IP allocation or association id problem') - break - - return foundIp - # end verify_floating_ip - - def associate_floating_ip(self): - out = self._shell_with_ec2_env( - 'euca-associate-address -a %s %s' % (self.fip_allcation_id, - self.instance_id), True).split('\t') - if out: - self.floating_ip_association = True - self.fip_allcation_id = out[1] - self.logger.info( - 'Associate Floating IP %s to Instance %s' % (self.floating_ip, - self.instance_id)) - return True - - else: - return False - # end associate_floating_ip - - def disassociate_floating_ip(self): - out = self._shell_with_ec2_env('euca-disassociate-address %s' % ( - self.fip_allcation_id), True) - if out == 'True': - self.floating_ip_association = False - self.fip_allcation_id.replace('eipassoc', 'eipalloc') - self.logger.info( - 'Floating IP %s disassociated from instance %s' % (self.floating_ip, - self.instance_id)) - return True - else: - return False - # end disassociate_floating_ip - - # ACL - - def create_acl(self): - out = self._shell_with_ec2_env( - 'euca-create-network-acl %s' % self.vpc_id, True) - if len(out) > 0 and out.startswith('acl-'): - self.acl_id = out - self.logger.info('Create ACL in vpc %s' % self.vpc_id) - return True - - else: - return False - # end create_acl - - def verify_acl(self): - out = self._shell_with_ec2_env( - 'euca-describe-network-acls %s' % self.acl_id, True).split('\n') - foundAcl = False - - if len(out) <= 0: - return foundAcl - - acl_id = out[2].replace(' ', '') - vpc_id = out[3].replace(' ', '') - if acl_id == self.acl_id and vpc_id == self.vpc_id: - self.logger.info('ACL %s verified' % self.acl_id) - - # check if acl associated or not - if not self.acl_association: - self.logger.info('ACL %s not associated with any subnet' % - self.acl_id) - foundAcl = True - - # check if acl associated with subnet or not - else: - for entry in out: - assoc_str = re.sub(' +', ' ', entry).replace(' ', '', 1) - assoc = assoc_str.split(' ') - if not assoc[0].startswith('aclassoc-'): - continue - if assoc[0] == self.acl_association_id and assoc[1] == self.subnet_id: - self.logger.info( - 'ACL %s associated with subnet %s verified' % - (self.acl_id, self.subnet_id)) - if self.acl_association: - foundAcl = True - break - return foundAcl - - else: - return False - - def delete_acl(self): - out = self._shell_with_ec2_env( - 'euca-delete-network-acl %s' % self.acl_id, True) - if out == 'True': - self.logger.info('ACL %s deleted' % self.acl_id) - return True - else: - return False - # end delete_acl - - def associate_acl(self, acl=None): - # if acl = default then associate subnet to default ACL for VPC - # else associate subnet with ACL created using euca2ools - if not acl: - aclId = self.acl_id - self.acl_association_id = self._get_acl_association_id() - if not self.acl_association_id: - self.logger.debug('Cannot get ACL association id') - else: - aclId = self.def_acl_id - - out = self._shell_with_ec2_env( - 'euca-replace-network-acl-association %s -a %s' % (aclId, - self.acl_association_id), True) - if out: - self.logger.info('Associate ACL %s to subnet %s' % - (aclId, self.subnet_id)) - if acl == 'default': - self.acl_association = False - else: - self.acl_association = True - return True - - return False - # end associate_acl - - def _get_acl_association_id(self): - out = self._shell_with_ec2_env( - 'euca-describe-network-acls', True).split('\n') - assoc_id = None - - for entry in out: - idx = out.index(entry) - entry.replace(' ', '') - if not entry.startswith('acl-'): - continue - - vpc_id = out[idx + 1].replace(' ', '') - if entry.startswith('acl-default'): - self.def_acl_id = 'acl-default' - - for entry in out: - assoc_str = re.sub(' +', ' ', entry).replace(' ', '', 1) - assoc = assoc_str.split(' ') - if not assoc[0].startswith('aclassoc-'): - continue - if assoc[1] == self.subnet_id: - assoc_id = assoc[0] - - return assoc_id - # end _get_acl_association_id - - def create_acl_rule(self, rule): - out = self._shell_with_ec2_env('euca-create-network-acl-entry %s -r %s -p %s -a %s -n %s -f %s -t %s -d %s' - % (self.acl_id, rule['number'], - rule['protocol'], rule['action'], - rule['cidr'], rule['fromPort'], - rule['toPort'], rule['direction']), True) - if out == 'True': - self.logger.info('Rule %s added in ACL %s' % - (rule['number'], self.acl_id)) - return True - else: - return False - # end create_acl_rule - - def replace_acl_rule(self, rule): - out = self._shell_with_ec2_env('euca-replace-network-acl-entry %s -r %s -p %s -a %s -n %s -f %s -t %s -d %s' - % (self.acl_id, rule['number'], - rule['protocol'], rule['action'], - rule['cidr'], rule['fromPort'], - rule['toPort'], rule['direction']), True) - if out == 'True': - self.logger.info('Rule %s replaced in ACL %s' % - (rule['number'], self.acl_id)) - return True - else: - return False - # end replace_acl_rule - - def delete_acl_rule(self, rule): - out = self._shell_with_ec2_env('euca-delete-network-acl-entry %s -r %s -d %s' - % (self.acl_id, rule['number'], - rule['direction']), True) - if out == 'True': - self.logger.info('Rule %s deleted in ACL %s' % - (rule['number'], self.acl_id)) - return True - else: - return False - # end delete_acl_rule - - # Security Group - - def create_security_group(self): - out = self._shell_with_ec2_env( - 'euca-create-security-group -d sanity_test_group -v %s %s' % - (self.vpc_id, self.sg_name), True).split('\t') - if len(out) > 3 and out[2] == self.sg_name and out[3] == 'sanity_test_group': - self.logger.info('Create security group %s' % self.sg_name) - self.sg_id = out[1] - return True - else: - return False - # end create_security_group - - def verify_security_group(self): - out = self._shell_with_ec2_env('euca-describe-group', True).split('\n') - foundGroup = False - - for group in out: - group = group.split('\t') - if len(group) > 3 and group[2] == self.sg_name and group[3] == 'sanity_test_group': - foundGroup = True - self.logger.info('Security Group %s (%s) verified' % - (self.sg_name, self.sg_id)) - break - return foundGroup - # end verify_security_group - - def delete_security_group(self): - out = self._shell_with_ec2_env( - 'euca-delete-security-group %s' % self.sg_id, True) - if out == 'Group %s deleted' % self.sg_id: - self.logger.info('Security Group %s (%s) deleted' % - (self.sg_name, self.sg_id)) - return True - else: - return False - # end delete_security_group - - def create_security_group_rule(self, rule): - if rule.has_key('group_id'): - cidr_group = rule['group_id'] - ruletail = '-o %s' % rule['group_id'] - else: - cidr_group = rule['cidr'] - ruletail = '-s %s' % rule['cidr'] - - out = self._shell_with_ec2_env('euca-authorize-security-group-%s -P %s -p %s %s %s' - % (rule[ - 'direction'], rule['protocol'], - rule['port'], ruletail, - self.sg_id), True).split('\n') - if len(out) > 1: - ruleList = out[1].split('\t') - if self.sg_id in ruleList and rule['protocol'] in ruleList and cidr_group in ruleList: - self.logger.info('Rule added successfuly') - return True - - else: - return False - # end add_security_group_rule - - def delete_security_group_rule(self, rule): - if rule.has_key('group_id'): - cidr_group = rule['group_id'] - ruletail = '-o %s' % rule['group_id'] - else: - cidr_group = rule['cidr'] - ruletail = '-s %s' % rule['cidr'] - - out = self._shell_with_ec2_env('euca-revoke-security-group-%s -P %s -p %s %s %s' - % (rule[ - 'direction'], rule['protocol'], - rule['port'], ruletail, - self.sg_id), True).split('\n') - if len(out) > 1: - ruleList = out[1].split('\t') - if self.sg_id in ruleList and rule['protocol'] in ruleList and cidr_group in ruleList: - self.logger.info('Rule deleted successfuly') - return True - - else: - return False - # end delete_security_group_rule - -# end VPCFixture diff --git a/fixtures/vpc_fixture_new.py b/fixtures/vpc_fixture_new.py deleted file mode 100644 index 2663aeac8..000000000 --- a/fixtures/vpc_fixture_new.py +++ /dev/null @@ -1,740 +0,0 @@ -import time -import re -import fixtures -from fabric.api import local, run -from fabric.context_managers import shell_env, settings - -from common.connections import ContrailConnections -from ec2_base import EC2Base - -from floating_ip import * - - -class VPCFixture(fixtures.Fixture): - - '''Fixture to create, verify and delete VPC - Flow: Euca2ools -> Boto -> Nova - ''' - - def __init__(self, cidr, connections=None): - self.connections = connections - self.inputs = connections.inputs - self.logger = self.inputs.logger - self.cidr = cidr - self.vpc_id = None - self.ec2_base = EC2Base( - logger=self.logger, - inputs=self.inputs) - self.openstack_ip = self.inputs.openstack_ip - self.os_username = self.inputs.host_data[self.openstack_ip]['username'] - self.os_password = self.inputs.host_data[self.openstack_ip]['password'] - self.acl_association = False - self.acl_association_id = None - self.tenant_id = None - self.project_connections = None - # end __init__ - - def setUp(self): - super(VPCFixture, self).setUp() - self.create_vpc() - # end setUp - - def create_vpc(self): - create_vpc_output = self.ec2_base._shell_with_ec2_env( - 'euca-create-vpc %s' % (self.cidr), True) - self.logger.info('Create vpc with CIDR %s' % self.cidr) - - # get vpcid and setup ec2 environment - if 'EC2APIError' in create_vpc_output or create_vpc_output.failed: - self.logger.warn('Unable to create VPC : %s' % (create_vpc_output)) - return False - if create_vpc_output: - self.vpc_id = create_vpc_output.split(' ')[1][4:] - self._add_admin_role_to_tenant() - - key_data = self.ec2_base.create_ec2_keys(self.vpc_id) - if key_data: - self.addCleanup(self.ec2_base.delete_ec2_keys, - key_data['access']) - else: - self.logger.error('ec2-key create failed for vpc tenant') - - if not self.ec2_base._set_ec2_keys(self.vpc_id): - self.logger.error('set ec2-key failed for vpc tenant') - - return True - else: - return False - # end create_vpc - - def verify_on_setup(self): - if not self.vpc_id: - self.logger.warn('VPC ID is not set, VPC Verification failed') - return False - if not self.verify_vpc(): - self.logger.error('VPC %s verification failed' % (self.vpc_id)) - return False - return True - # end verify_on_setup - - def cleanUp(self): - # delete VPC and its ec2 aceess key, ec2 secret key - if self.vpc_id: - self.delete_vpc() - - super(VPCFixture, self).cleanUp() - # end cleanUp - - def run_cmd_on_os_node(self, cmd): - ''' - Run cmd on openstack node - ''' - with settings(host_string='%s@%s' % (self.os_username, - self.openstack_ip), password=self.os_password, - warn_only=True, abort_on_prompts=False): - output = run(cmd) - return output - # end run_cmd_on_os_node - - def _get_admin_user_id(self): - users = self.run_cmd_on_os_node( - '(source /etc/contrail/keystonerc; keystone user-get admin)', - ).split('\n') - - for user in users: - user = [k for k in filter(None, user.split(' ')) if k != '|'] - if user[0] == 'id': - user_id = user[1] - break - - return user_id - # end _get_admin_user_id - - def _get_admin_role_id(self): - roles = self.run_cmd_on_os_node( - '(source /etc/contrail/keystonerc; keystone role-get admin)', - ).split('\n') - - for role in roles: - role = [k for k in filter(None, role.split(' ')) if k != '|'] - if role[0] == 'id': - role_id = role[1] - break - - return role_id - # end _get_admin_role_id - - def _get_tenant_id(self, tenantName): - tenants = self.run_cmd_on_os_node( - '(source /etc/contrail/openstackrc; keystone tenant-get %s)' % tenantName, ).split('\n') - - for tenant in tenants: - tenant = [k for k in filter(None, tenant.split(' ')) if k != '|'] - if tenant[0] == 'id': - self.tenant_id = tenant[1] - break - - return self.tenant_id - # end _get_tenant_id - - def _add_admin_role_to_tenant(self): - # add 'admin' user to VPC with 'admin' role - userId = self._get_admin_user_id() - roleId = self._get_admin_role_id() - tenantId = self._get_tenant_id(self.vpc_id) - if not tenantId: - self.logger.warn('Tenant id not found for VPC %s' % (self.vpc_id)) - return False - self.run_cmd_on_os_node( - '(source /etc/contrail/keystonerc ; keystone user-role-add --user %s --role %s --tenant %s)' % - (userId, roleId, tenantId)) - self.logger.info('Admin user with admin role added to VPC %s' % - self.vpc_id) - # end _add_admin_role_to_tenant - - @retry(delay=5, tries=3) - def verify_vpc(self): - if not self.vpc_id: - self.logger.warn( - 'VPC ID is not set, VPC may not be created at all') - return False - verify_vpc_output = self.ec2_base._shell_with_ec2_env( - 'euca-describe-vpcs |grep %s' % (self.vpc_id), True).split('\n')[0].split(' ') - verify_vpc_output = filter(None, verify_vpc_output) - - if verify_vpc_output[1] == self.cidr and \ - verify_vpc_output[0] == self.vpc_id: - self.logger.info('VPC %s verified' % self.vpc_id) - return True - - else: - return False - # end verify_vpc - # vaildation of a partiular vpcs entry - - def verify_vpc_entry(self, vpc_id): - - if not vpc_id: - self.logger.warn( - 'VPC ID is not set, VPC may not be created at all') - return False - - verify_vpc_output = self.ec2_base._shell_with_ec2_env( - 'euca-describe-vpcs %s | grep vpc- | wc -l' % (vpc_id), True) - - if verify_vpc_output != '1': - - found_vpc = False - self.logger.debug( - 'euca-describe-vpcs returns Multiple Entries') - - else: - self.logger.info('Single vpcs entry %s verified' % (vpc_id)) - found_vpc = True - return found_vpc - - # end verify_vpc - - def delete_vpc(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-delete-vpc %s' % (self.vpc_id), True) - if len(out) > 0 and out.split(' ')[1] == self.vpc_id: - self.logger.info('VPC %s deleted' % self.vpc_id) - return True - else: - return False - # end delete_vpc - - # Instance Functions - - def _get_image_id(self): - images = self.ec2_base._shell_with_ec2_env( - 'euca-describe-images', True).split('\n') - - for image in images: - image = [k for k in image.split('\t')] - if image[4] == 'available': - self.image_id = image[1] - self.logger.info('Using image %s(%s) to launch VM' % - (self.image_id, image[2])) - break - - return self.image_id - # end _get_image_id - - def _get_nat_image_id(self): - images = self.ec2_base._shell_with_ec2_env( - 'euca-describe-images', True).split('\n') - - for image in images: - image = [k for k in image.split('\t')] - if image[4] == 'available' and image[2] == 'None (nat-service)': - image_id = image[1] - self.logger.info('Using image %s(%s) to launch VM' % - (image_id, image[2])) - break - - return image_id - # end _get_nat_image_id - - def _get_instance_id(self, instances): - instance = [k for k in instances[1].split('\t')] - - if instance[1].startswith('i-'): - self.instance_id = instance[1] - self.instance_name = instance[3] - - else: - self.logger.error('create instance failed') - - return self.instance_id - # end _get_instance_id - - @retry(delay=1, tries=3) - def verify_instance(self): - self.logger.debug('Waiting for VM to be in running state ...') - time.sleep(7) - instances = self.ec2_base._shell_with_ec2_env( - 'euca-describe-instances', True).split('\n') - - foundInstance = False - for instance in instances: - instance = [k for k in instance.split('\t')] - if instance[1] == self.instance_id and instance[5] == 'running': - foundInstance = True - self.logger.info('Instance %s verified' % self.instance_id) - break - - return foundInstance - # end verify_instance - - def terminate_instance(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-terminate-instances %s' % (self.instance_id), True).split('\t') - if out[1] == self.instance_id: - self.logger.info('Instance %s terminated' % self.instance_id) - return True - return False - # end terminate_instance - - # Floating IP - - def _create_floating_ip_pool(self): - # create flaoting ip pool - self.fip_fixture = FloatingIPFixture( - inputs=self.inputs, - connections=self.connections, pool_name=self.fpool, - vn_id=self.floating_net_id) - self.fip_fixture.setUp() - - if self.fip_fixture.verify_on_setup(): - return True - else: - self.logger.error('FIP pool create error') - - return False - # end _create_floating_ip_pool - - def allocate_floating_ip(self): - if not self._create_floating_ip_pool(): - return False - - out = self.ec2_base._shell_with_ec2_env( - 'euca-allocate-address -d vpc', True).split('\t') - if out: - self.floating_ip = out[1] - self.fip_allcation_id = out[2] - self.floating_ip_allocation = True - self.logger.info( - 'Allocate a Floating IP from Floating Ip pool fpool') - return True - - else: - return False - # end allocate_floating_ip - - def release_floating_ip(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-release-address %s' % self.fip_allcation_id, True) - if out: - self.logger.info('Floating IP %s released' % self.floating_ip) - # TODO enable verify_floating ip after describe_address - # has been fixed in cloud.py - # return not self.verify_floating_ip() - return True - else: - return False - # end release_floating_ip - - def verify_floating_ip(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-describe-addresses', True).split('\n') - foundIp = False - - for ip in out: - ip = filter(None, ip.split(' ')) - if ip[0] == self.floating_ip: - self.fip_allcation_id = ip[2] - foundIp = True - - # looger info for allocation or association verification - if ip[2].split('-')[0] == 'eipalloc' \ - and self.floating_ip_allocation: - self.logger.info( - 'Floating IP %s verified. No instance associated' - % self.floating_ip) - elif ip[2].split('-')[0] == 'eipassoc' and \ - self.floating_ip_association: - self.logger.info( - 'Floating IP %s associated with instance %s verified' % - (self.floating_ip, ip[3])) - else: - self.logger.debug( - 'Floating IP allocation or association id problem') - break - - return foundIp - # end verify_floating_ip - - def associate_floating_ip(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-associate-address -a %s %s' % (self.fip_allcation_id, self.instance_id), True).split('\t') - if out: - self.floating_ip_association = True - self.fip_allcation_id = out[1] - self.logger.info('Associate Floating IP %s to Instance %s' % - (self.floating_ip, self.instance_id)) - return True - - else: - return False - # end associate_floating_ip - - def disassociate_floating_ip(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-disassociate-address %s' % (self.fip_allcation_id), True) - if out == 'True': - self.floating_ip_association = False - self.fip_allcation_id.replace('eipassoc', 'eipalloc') - self.logger.info('Floating IP %s disassociated from instance %s' % - (self.floating_ip, self.instance_id)) - return True - else: - return False - # end disassociate_floating_ip - - # ACL - - def create_acl(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-create-network-acl %s' % self.vpc_id, True) - if len(out) > 0 and out.startswith('acl-'): - acl_id = out - self.logger.info('Create ACL in vpc %s' % self.vpc_id) - return acl_id - - else: - return None - # end create_acl - - def delete_acl(self, acl_id): - out = self.ec2_base._shell_with_ec2_env( - 'euca-delete-network-acl %s' % acl_id, True) - if out == 'True': - self.logger.info('ACL %s deleted' % acl_id) - return True - else: - self.logger.error('Unable to delete ACL %s' % (acl_id)) - return False - # end delete_acl - - def create_acl_rule(self, acl_id, rule): - cmd = 'euca-create-network-acl-entry %s ' % (acl_id) - acl_keys = ['protocol', 'action', 'cidr', - 'fromPort', 'toPort', 'direction'] - acl_key_prefix = ['-p', '-a', '-n', '-f', '-t', '-d'] - acl_strings = { - 'number': '-r', - 'protocol': '-p', - 'action': '-a', - 'cidr': '-n', - 'fromPort': '-f', - 'toPort': '-t', - 'direction': '-d', - } - for key in rule.keys(): - cmd += ' %s %s ' % (acl_strings[key], rule[key]) - - out = self.ec2_base._shell_with_ec2_env(cmd, True) - - if out == 'True': - self.logger.info( - 'Rule %s added in ACL %s' % (rule['number'], acl_id)) - return True - else: - return False - # end create_acl_rule - - def replace_acl_rule(self, acl_id, rule): - out = self.ec2_base._shell_with_ec2_env( - 'euca-replace-network-acl-entry %s -r %s -p %s -a %s -n %s -f %s -t %s -d %s' - % (acl_id, rule['number'], - rule['protocol'], rule['action'], - rule['cidr'], rule['fromPort'], - rule['toPort'], rule['direction']), True) - if out == 'True': - self.logger.info( - 'Rule %s replaced in ACL %s' % (rule['number'], acl_id)) - return True - else: - return False - # end replace_acl_rule - - def delete_acl_rule(self, acl_id, rule): - out = self.ec2_base._shell_with_ec2_env('euca-delete-network-acl-entry %s -r %s -d %s' - % (acl_id, rule['number'], - rule['direction']), True) - if out == 'True': - self.logger.info( - 'Rule %s deleted in ACL %s' % (rule['number'], acl_id)) - return True - else: - return False - # end delete_acl_rule - - # Route Table - def create_route_table(self, vpc_id=None): - ''' - Create route table in VPC - [root@nodec22 0000_]# euca-create-route-table vpc-96d08029 - rtb-2e799f33 vpc-96d08029 10.1.1.0/24 local active - [root@nodec22 0000_]# - ''' - if not vpc_id: - vpc_id = self.vpc_id - out = self.ec2_base._shell_with_ec2_env('euca-create-route-table %s' % - (vpc_id), True).split('\t') - if out[1] == vpc_id: - rtb_id = out[0] - self.logger.info( - 'Created Route table %s in VPC %s' % (rtb_id, vpc_id)) - return rtb_id - else: - return None - # end create_route_table - - def verify_route_table(self, rtb_id): - out = self.ec2_base._shell_with_ec2_env( - 'euca-describe-route-tables %s' % (rtb_id), True).split('\n') - found_rtb = False - - for line in out: - if rtb_id in line: - found_rtb = True - self.logger.info('Route table %s verified' % (rtb_id)) - break - - # validation for Bug [1904] - out = self.ec2_base._shell_with_ec2_env( - 'euca-describe-route-tables %s | grep rtb- | wc -l' % (rtb_id), True) - if out != '1': - found_rtb = False - self.logger.debug( - 'euca-describe-route-tables returns Multiple Entries') - else: - self.logger.info('Single Route table %s verified' % (rtb_id)) - - return found_rtb - # end verify_route_table - - def delete_route_table(self, rtb_id): - out = self.ec2_base._shell_with_ec2_env( - 'euca-delete-route-table %s' % rtb_id, True) - if 'True' in out: - self.logger.info('Route Table %s deleted' % (rtb_id)) - return True - else: - return False - # end delete_route_table - - def associate_route_table(self, rtb_id, subnet_id): - ''' - Associate subnet with route_table - [root@nodec22 ~]# euca-associate-route-table -s subnet-4dad7f88 rtb-9bb0e59f -ASSOCIATION rtbassoc-2f162321 rtb-9bb0e59f subnet-4dad7f88 -[root@nodec22 ~]# - ''' - out = self.ec2_base._shell_with_ec2_env( - 'euca-associate-route-table -s %s %s' % (subnet_id, rtb_id), True).split('\n') - line = filter(None, out[0].split('\t')) - if line[2] == rtb_id: - assoc_id = line[1] - self.logger.info('Route table %s is associated with Subnet %s \ - with association id %s' % (rtb_id, subnet_id, assoc_id)) - return assoc_id - else: - return None - # end associate_route_table - - def disassociate_route_table(self, rtb_assoc_id): - ''' - Disassociate a subnet from this route table - ''' - out = self.ec2_base._shell_with_ec2_env( - 'euca-disassociate-route-table %s' % (rtb_assoc_id), True) - if out == 'True': - self.logger.info('Association id %s removed' % (rtb_assoc_id)) - return True - else: - return False - # end disassociate_route_table - - def create_route(self, prefix, rtb_id, instance_id=None, gw_id=None): - ''' - Create a route entry in a route table - ''' - cmd = 'euca-create-route ' - if instance_id: - cmd += '-i %s ' % instance_id - if gw_id: - cmd += '-g %s ' % gw_id - out = self.ec2_base._shell_with_ec2_env(cmd + '-r %s %s' % (prefix, - rtb_id), True).split('\n') - line = filter(None, out[0].split('\t')) - if line[2] == prefix: - self.logger.info('Created Route with prefix %s in %s' % (prefix, - rtb_id)) - return True - # endif - return False - # end create_route - - def delete_route(self, rtb_id, prefix): - ''' - Delete route from route table - Ex: - [root@nodec22 ~]# euca-delete-route -r 0.0.0.0/0 rtb-9bb0e59f -True - ''' - out = self.ec2_base._shell_with_ec2_env('euca-delete-route -r %s %s' - % (prefix, rtb_id), True) - if out == 'True': - self.logger.info('Route with prefix %s removed from Route table %s' - % (prefix, rtb_id)) - return True - else: - return False - # end delete_route - - # Security Group - - def create_security_group(self, sg_name): - out = self.ec2_base._shell_with_ec2_env( - 'euca-create-security-group -d sanity_test_group -v %s %s' % (self.vpc_id, sg_name), True).split('\t') - if len(out) > 3 and out[2] == sg_name and out[3] == 'sanity_test_group': - self.logger.info('Create security group %s' % sg_name) - sg_id = out[1] - return sg_id - else: - return None - # end create_security_group - - def verify_security_group(self, sg_name): - out = self.ec2_base._shell_with_ec2_env( - 'euca-describe-group', True).split('\n') - foundGroup = False - - for group in out: - group = group.split('\t') - if len(group) > 3 and group[2] == sg_name and \ - group[3] == 'sanity_test_group': - foundGroup = True - self.logger.info('Security Group %s verified' % (sg_name)) - break - return foundGroup - # end verify_security_group - - def get_security_group_id(self, sg_name): - out = self.ec2_base._shell_with_ec2_env( - 'euca-describe-security-groups --filter vpc-id=%s' % (self.vpc_id), True).split('\n') - sg_id = None - for group in out: - group = group.split() - if len(group) >= 3 and group[2] == sg_name: - sg_id = group[0] - break - return sg_id - # end get_security_group_id - - def delete_security_group(self, sg_id): - out = self.ec2_base._shell_with_ec2_env( - 'euca-delete-security-group %s' % sg_id, True) - if out == 'Group %s deleted' % sg_id: - self.logger.info('Security Group %s deleted' % (sg_id)) - return True - else: - return False - # end delete_security_group - - def create_security_group_rule(self, sg_id, rule): - if rule.has_key('source-group'): - cidr_group = rule['source-group'] - else: - cidr_group = rule['cidr'] - cmd = 'euca-authorize-security-group-%s ' % (rule['direction']) - sg_keys = ['protocol', 'port', 'cidr', 'source-group'] - acl_strings = { - 'protocol': '-P', - 'cidr': '-s', - 'port': '-p', - 'source-group': '-o', - } - for key in rule.keys(): - if not key == 'direction': - cmd += ' %s %s ' % (acl_strings[key], rule[key]) - cmd += sg_id - - out = self.ec2_base._shell_with_ec2_env(cmd, True).split('\n') - if len(out) > 1: - ruleList = out[1].split('\t') - if sg_id in ruleList and \ - rule['protocol'] in ruleList and cidr_group in ruleList: - self.logger.info('Rule added successfuly') - return True - - else: - return False - # end add_security_group_rule - - def delete_security_group_rule(self, sg_id, rule): - if rule.has_key('source-group'): - cidr_group = rule['source-group'] - else: - cidr_group = rule['cidr'] - cmd = 'euca-revoke-security-group-%s ' % (rule['direction']) - sg_keys = ['protocol', 'port', 'cidr', 'source-group'] - acl_strings = { - 'protocol': '-P', - 'cidr': '-s', - 'port': '-p', - 'source-group': '-o', - } - for key in rule.keys(): - if not key == 'direction': - cmd += ' %s %s ' % (acl_strings[key], rule[key]) - cmd += sg_id - - out = self.ec2_base._shell_with_ec2_env(cmd, True).split('\n') - if len(out) > 1: - ruleList = out[1].split('\t') - if sg_id in ruleList and rule['protocol'] in ruleList and \ - cidr_group in ruleList: - self.logger.info('Rule deleted successfuly') - return True - - else: - return False - # end delete_security_group_rule - - # Internet Gateway - def create_gateway(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-create-internet-gateway', True) - if len(out) > 0 and 'igw-default' in out: - gw_id = 'igw-default' - self.logger.info('Created Gateway %s in vpc %s' % - (gw_id, self.vpc_id)) - return gw_id - else: - return None - # end create_gateway - - def delete_gateway(self, gw_id): - out = self.ec2_base._shell_with_ec2_env( - 'euca-delete-internet-gateway %s' % (gw_id), True) - if 'True' in out: - self.logger.info('Deleted Gateway %s in vpc %s' % - (gw_id, self.vpc_id)) - return gw_id - else: - return None - # end delete_gateway - - def get_project_connections(self, username=None, password=None): - if not username: - username = 'admin' - if not password: - password = 'contrail123' - if not self.project_connections: - self.project_connections = ContrailConnections( - inputs=self.inputs, - logger=self.logger, - project_name=self.vpc_id, - username=username, - password=password) - return self.project_connections - # end get_project_connections - - - -# end VPCFixture diff --git a/fixtures/vpc_vm_fixture.py b/fixtures/vpc_vm_fixture.py deleted file mode 100644 index d3ab012d2..000000000 --- a/fixtures/vpc_vm_fixture.py +++ /dev/null @@ -1,311 +0,0 @@ -import time -import re -import fixtures -from fabric.api import local -from fabric.context_managers import shell_env, settings, hide -from fabric.contrib.files import exists -from fabric.operations import get, put - -from common.connections import ContrailConnections -from vpc_fixture_new import VPCFixture -from ec2_base import EC2Base -from vm_test import VMFixture -from tcutils.util import * - - -class VPCVMFixture(fixtures.Fixture): - - '''Fixture to create, verify and delete VM - Flow: Euca2ools -> Boto -> Nova - Instance_type is either 'nat' or 'regular' - If Instance type is nat, also pass public vn fixture object - ''' - - def __init__(self, vpc_vn_fixture, - image_name='ubuntu', connections=None, key='key1', sg_ids=[], - instance_type='regular', - public_vn_fixture=None): - self.connections = connections - self.inputs = connections.inputs - self.logger = self.inputs.logger - self.vpc_fixture = vpc_vn_fixture.vpc_fixture - self.vpc_id = self.vpc_fixture.vpc_id - self.vpc_vn_fixture = vpc_vn_fixture - self.vpc_id = self.vpc_fixture.vpc_id - self.project_id = self.vpc_id - self.image_name = image_name - self.instance_type = instance_type - self.vn_obj = vpc_vn_fixture.contrail_vn_fixture.obj - self.vm_name = None - self.image_id = None - self.subnet_id = vpc_vn_fixture.subnet_id - - self.ec2_base = self.vpc_fixture.ec2_base - self.already_present = False - self.nova_h = self.connections.nova_h - self.key = self.inputs.stack_user+key - self.sg_ids = sg_ids - self.cfgm_ip = self.inputs.cfgm_ip - self.instance_id = None - self.public_vn_fixture = public_vn_fixture - if public_vn_fixture: - self.vn_obj = public_vn_fixture.obj - - # end __init__ - - def setUp(self): - super(VPCVMFixture, self).setUp() - try: - f = '/tmp/key%s'%self.inputs.stack_user - lock = Lock(f) - lock.acquire() - self._create_keypair(self.key) - finally: - lock.release() - self.create_vm() - # Build up data structure for std VM verification to happen - # Note that this Fixture does not create a VM if it is already present - if self.vm_name: - self.c_vm_fixture = self.useFixture(VMFixture( - project_name=self.vpc_id, - connections=self.connections, - image_name=self.image_name, - vn_obj=self.vn_obj, - vm_name=self.vm_name, - sg_ids=self.sg_ids)) - # end setUp - - def create_vm(self): - zone, node_name = self.nova_h.lb_node_zone() - self.image_name = self.nova_h.get_image_name_for_zone( - image_name=self.image_name, - zone=zone) - self.nova_h.get_image(self.image_name) - self.image_id = self._get_image_id() - cmd_str = 'euca-run-instances %s -s %s -k %s -z %s' % \ - (self.image_id, self.subnet_id, self.key, zone) - if self.instance_type == 'nat': - cmd_str = 'euca-run-instances %s' % (self.image_id) - if self.sg_ids: - cmd_str += ' -g %s' % (self.sg_ids[0]) - self.logger.debug(cmd_str) - run_instance_output = self.ec2_base._shell_with_ec2_env( - cmd_str, True).split('\n') - self.logger.debug(run_instance_output) - self.logger.debug('Image name is .%s.' % (self.image_name)) - # TODO WA for Bug 2010 - if self.image_name == 'nat-service': - time.sleep(10) - run_instance_output = self.ec2_base._shell_with_ec2_env( - 'euca-describe-instances | grep %s' % (self.image_id), - True).split('\n') - self._gather_instance_details(run_instance_output) - - if not self.instance_id: - self.logger.error( - 'VM Instance ID not found upon doing euca-run-instances') - return False - self.logger.info('Instance %s(ID %s) is started with %s image' - % (self.instance_name, self.instance_id, self.image_id)) - self.logger.info('VPC VM ID of Instance %s is %s' % - (self.instance_name, self.vm_id)) - # end create_vm - - def verify_on_setup(self): - if not self.verify_instance(): - self.logger.error('Verification of VM %s from euca cmds failed' % ( - self.instance_name)) - return False - if not self.c_vm_fixture.verify_on_setup(): - self.logger.error('Contrail Fixture verification of VM %s(ID: %s)\ - failed' % (self.instance_name, self.vm_id)) - return False - self.logger.info('Euca cmd verification and Contrail fixture \ - verification passed' + ' for VM %s(ID: %s)' % - (self.instance_name, self.vm_id)) - return True - # end verify_on_setup - - def wait_till_vm_is_up(self): - return self.c_vm_fixture.wait_till_vm_is_up() - - @retry(delay=10, tries=30) - def verify_instance(self): - self.logger.debug('Waiting for VM %s to be in running state' % - (self.instance_id)) - time.sleep(5) - instances = self.ec2_base._shell_with_ec2_env( - 'euca-describe-instances | grep %s' % (self.instance_id), True).split('\n') - self.logger.debug(instances) - - foundInstance = False - for instance in instances: - instance = [k for k in instance.split('\t')] - if instance[1] == self.instance_id and instance[5] == 'running': - foundInstance = True - self.logger.info('Instance %s verified' % self.instance_id) - break - # end if - return foundInstance - # end verify_instance - - @retry(delay=5, tries=3) - def verify_vm_deleted(self): - instances = self.ec2_base._shell_with_ec2_env( - 'euca-describe-instances | grep %s' % (self.instance_id), True).split('\n') - result = True - for instance in instances: - instance = [k for k in instance.split('\t')] - try: - if instance[1] == self.instance_id: - result = False - self.logger.debug( - 'Instance %s is still found in Euca cmds' % - (self.instance_id)) - break - # end if - except IndexError: - self.logger.debug( - 'No instances in euca-describe-instances output') - self.logger.info('Instance %s is not seen in Euca cmds' % - (self.instance_id)) - return result - # end verify_vm_deleted - - def _get_image_id(self): - images = self.ec2_base._shell_with_ec2_env( - 'euca-describe-images', True).split('\n') - - for image in images: - image = [k for k in image.split('\t')] - if '(%s)' % (self.image_name) in image[2] and image[4] == 'available': - self.image_id = image[1] - self.logger.info('Using image %s(%s) to launch VM' % - (self.image_id, image[2])) - break - return self.image_id - # end _get_image_id - - def stop_instance(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-stop-instances %s' % (self.instance_id), True) - self.logger.debug(out) - time.sleep(5) - if 'UnknownError' in out: - self.logger.error( - 'Some unknown error has happened..pls check system logs') - return False - return True - # end stop_instance - - def start_instance(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-start-instances %s' % (self.instance_id), True) - self.logger.debug(out) - time.sleep(2) - if 'UnknownError' in out: - self.logger.error( - 'Some unknown error has happened..pls check system logs') - return False - return True - # end start_instance - - def _gather_instance_details(self, instance_output): - my_instance = None - for line in instance_output: - if 'INSTANCE' in line: - my_instance = line - if not my_instance: - self.logger.error('No Instance detail was found!') - return False - # instance = [k for k in my_instance.split('\t')] - # change made for UBUNTU set up as multiple tabs were not getting - # handled. - instance = [k for k in re.split("\s+", my_instance)] - - if instance[1].startswith('i-'): - self.instance_id = instance[1] - self.instance_name = instance[3] - self.vm_id = instance[3].replace('server-', '') - if 'nat' in self.instance_name: -# self.vm_name = instance[3].replace('server-','') - self.vm_name = '%s-nat_1' % (self.vpc_id) - elif 'server-' + self.vm_id != self.instance_name: - self.logger.error('Unexpected instance name : %s' % - (self.instance_name)) - # self.vm_name would have VM name as required by Nova - else: - self.vm_name = 'Server ' + self.vm_id - else: - self.logger.error( - 'Unable to gather Instance details of the launched VM') - return True - # end _gather_instance_details - - @retry(delay=5, tries=3) - def verify_on_cleanup(self): - if not self.verify_vm_deleted(): - self.logger.error('VM %s still present ' % (self.vm_name)) - return False - else: - self.logger.info('VM %s is cleaned up as seen by euca cmds ' % - (self.instance_id)) - return True - # end verify_on_cleanup - - def cleanUp(self): - if self.already_present: - self.logger.debug( - 'VM was not created by this fixture..Skipping deletion') - super(VPCVMFixture, self).cleanUp() - else: - self.terminate_instance() - super(VPCVMFixture, self).cleanUp() - assert self.verify_on_cleanup(), "Euca Verification failed for VM %s cleanup" % ( - self.instance_id) - # end cleanUp - - def terminate_instance(self): - self.logger.debug('Terminating instance %s' % (self.instance_id)) - out = self.ec2_base._shell_with_ec2_env( - 'euca-terminate-instances %s' % (self.instance_id), True).split('\t') - if out[1] == self.instance_id: - self.logger.info('Instance %s terminated' % self.instance_id) - return True - return False - # end terminate_instance - - def _create_keypair(self, key_name): - output_lines = self.ec2_base._shell_with_ec2_env( - 'euca-describe-keypairs', True).split('\n') - for line in output_lines: - entries = [k for k in line.split('\t')] - if entries: - if key_name in entries[0]: - return - username = self.inputs.host_data[self.cfgm_ip]['username'] - password = self.inputs.host_data[self.cfgm_ip]['password'] - with hide('everything'): - with settings( - host_string='%s@%s' % (username, self.cfgm_ip), - password=password, warn_only=True, abort_on_prompts=True): - rsa_pub_arg = '.ssh/id_rsa' - self.logger.debug('Creating keypair') - if exists('.ssh/id_rsa.pub'): # If file exists on remote m/c - self.logger.debug('Public key exists. Getting public key') - else: - self.logger.debug('Making .ssh dir') - run('mkdir -p .ssh') - self.logger.debug('Removing id_rsa*') - run('rm -f .ssh/id_rsa*') - self.logger.debug('Creating key using : ssh-keygen -f -t rsa -N') - run('ssh-keygen -f %s -t rsa -N \'\'' % (rsa_pub_arg)) - self.logger.debug('Getting the created keypair') - get('.ssh/id_rsa.pub', '/tmp/') - openstack_host = self.inputs.host_data[self.inputs.openstack_ip] - copy_file_to_server(openstack_host, '/tmp/id_rsa.pub', '/tmp', - 'id_rsa.pub') - self.ec2_base._shell_with_ec2_env( - 'euca-import-keypair -f /tmp/id_rsa.pub %s' % (self.key), True) - -# end VPCVMFixture diff --git a/fixtures/vpc_vn_fixture.py b/fixtures/vpc_vn_fixture.py deleted file mode 100644 index 3aff29d10..000000000 --- a/fixtures/vpc_vn_fixture.py +++ /dev/null @@ -1,265 +0,0 @@ -import time -import re -import fixtures -from fabric.api import local -from fabric.context_managers import shell_env, settings - -from common.connections import ContrailConnections -from vpc_fixture_new import VPCFixture -from ec2_base import EC2Base -from vn_test import VNFixture -from tcutils.util import * - - -class VPCVNFixture(fixtures.Fixture): - - '''Fixture to create, verify and delete Subnet - Flow: Euca2ools -> Boto -> Nova - ''' - - def __init__(self, vpc_fixture, subnet_cidr=None, connections=None): - self.connections = vpc_fixture.get_project_connections() - #self.connections = connections - self.inputs = connections.inputs - self.logger = self.inputs.logger - self.vpc_id = vpc_fixture.vpc_id - self.subnet_id = None - self.vpc_fixture = vpc_fixture - - self.subnet_cidr = subnet_cidr - self.ec2_base = vpc_fixture.ec2_base - self.already_present = False - self.def_acl_id = 'acl-default' - - # end __init__ - - def setUp(self): - super(VPCVNFixture, self).setUp() - self.create_subnet() - # Build up data structure for std VN verification to happen - # Note that this VNFixture does not create a VN if it is already - # present - if self.subnet_id: - self.contrail_vn_fixture = self.useFixture( - VNFixture(project_name=self.vpc_id, - connections=self.connections, inputs=self.inputs, - vn_name=self.subnet_id, subnets=[self.subnet_cidr])) - self.vn_id = self.contrail_vn_fixture.vn_id - # end setUp - - def verify_on_setup(self): - if not self.subnet_id: - self.logger.error( - 'Subnet ID not found...verification failed for %s' % - self.subnet_cidr) - return False - if not self.verify_subnet(): - self.logger.error('Verification failed for Subnet id %s ' % - self.subnet_id) - return False - else: - self.logger.info('EC2 Verification for Subnet id %s passed' % - self.subnet_id) - if not self.contrail_vn_fixture.verify_on_setup(): - self.logger.error( - 'Contrail VN verification failed for Subnet %s ' % - self.subnet_id) - return False - return True - # end verify_on_setup - - def verify_on_cleanup(self): - if self.verify_subnet_deleted(): - self.logger.info('Subnet %s is removed as per euca cmds' % - (self.subnet_id)) - return True - self.logger.error('Subnet %s still persists as per euca cmds' % - (self.subnet_id)) - return False - # end verify_on_cleanup - - @retry(delay=5, tries=3) - def verify_subnet(self): - verify_subnet_output = self.ec2_base._shell_with_ec2_env( - 'euca-describe-subnets', True).split('\n')[2:] - self.logger.debug(verify_subnet_output) - foundSubnet = False - - for subnet in verify_subnet_output: - if subnet.startswith(self.subnet_id): - subnet_list = subnet.replace('\r', '').split('\t') - if subnet_list[1] == self.vpc_id and subnet_list[2] == self.subnet_cidr: - foundSubnet = True - self.logger.info('Subnet %s verified' % self.subnet_id) - break - - if not foundSubnet: - self.logger.warn('Subnet %s not found in euca-describe-subnets' % - self.subnet_id) - return foundSubnet - return foundSubnet - - # end verify_subnet - - @retry(delay=5, tries=3) - def verify_subnet_deleted(self): - verify_subnet_output = self.ec2_base._shell_with_ec2_env( - 'euca-describe-subnets', True).split('\n')[2:] - foundSubnet = False - - if not self.subnet_id: - self.logger.warn( - 'Subnet does not seem to be present, nothing to verify in cleanup') - return True - for subnet in verify_subnet_output: - if subnet.startswith(self.subnet_id): - foundSubnet = True - break - - if foundSubnet: - self.logger.warn('Subnet %s still found in euca-describe-subnets' % - self.subnet_id) - return False - else: - self.logger.debug( - 'Verified that subnet %s is deleted in euca-describe-subnets' % self.subnet_id) - return True - # end verify_subnet_deleted - - def cleanUp(self): - if self.already_present: - self.logger.debug( - 'Subnet was not created by this fixture..Skipping deletion') - else: - self.delete_subnet() - self.verify_on_cleanup() - super(VPCVNFixture, self).cleanUp() - # end cleanUp - - def delete_vpc(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-delete-vpc %s' % (self.vpc_id), True) - if len(out) > 0 and out.split(' ')[1] == self.vpc_id: - self.logger.info('VPC %s deleted' % self.vpc_id) - return True - else: - return False - # end delete_vpc - - def create_subnet(self): - create_subnet_output = self.ec2_base._shell_with_ec2_env( - 'euca-create-subnet -c %s %s' % - (self.subnet_cidr, self.vpc_id), True) - if create_subnet_output: - self.subnet_id = create_subnet_output.split(' ')[0].split(':')[1] - self.logger.info('Create subnet with CIDR %s' % self.subnet_cidr) - return True - else: - return False - # end create_subnet - - def delete_subnet(self): - out = self.ec2_base._shell_with_ec2_env( - 'euca-delete-subnet %s' % (self.subnet_id), True) - if len(out) > 0 and out.split(' ')[1] == self.subnet_id: - self.logger.info('Subnet %s deleted' % self.subnet_id) - return True - else: - return False - # end delete_subnet - - def _get_acl_association_id(self): - subnet_id = self.subnet_id - out = self.ec2_base._shell_with_ec2_env( - 'euca-describe-network-acls', True).split('\n') - assoc_id = None - - for entry in out: - idx = out.index(entry) - entry.replace(' ', '') - if not entry.startswith('acl-'): - continue - - vpc_id = out[idx + 1].replace(' ', '') - - for entry in out: - assoc_str = re.sub(' +', ' ', entry).replace(' ', '', 1) - assoc = assoc_str.split(' ') - if not assoc[0].startswith('aclassoc-'): - continue - if assoc[1] == subnet_id: - assoc_id = assoc[0] - - return assoc_id - # end _get_acl_association_id - - def associate_acl(self, acl_id=None): - subnet_id = self.subnet_id - acl_assoc_id = self._get_acl_association_id() - # if acl = default then associate subnet to default ACL for VPC - # else associate subnet with ACL created using euca2ools - if not acl_assoc_id: - self.logger.error('Cannot get ACL association id') - if not acl_id: - aclId = self.def_acl_id - - out = self.ec2_base._shell_with_ec2_env( - 'euca-replace-network-acl-association %s -a %s' % (acl_id, acl_assoc_id), True) - self.logger.debug(out) - self.contrail_vn_fixture.update_vn_object() - if out: - self.logger.info('Associate ACL %s to subnet %s' % - (acl_id, subnet_id)) - if acl_id == 'default': - self.acl_association = False - else: - self.acl_association = True - return True - - return False - # end associate_acl - - def verify_acl_binding(self, acl_id): - subnet_id = self.subnet_id - acl_assoc_id = self._get_acl_association_id() - out = self.ec2_base._shell_with_ec2_env( - 'euca-describe-network-acls %s' % acl_id, True).split('\n') - self.logger.debug(out) - foundAcl = False - - if len(out) <= 0: - return foundAcl - - got_acl_id = out[2].replace(' ', '') - vpc_id = out[3].replace(' ', '').replace('\r', '') - if acl_id in got_acl_id and vpc_id == self.vpc_id: - self.logger.info('ACL %s verified' % acl_id) - - # check if acl associated or not - if not self.acl_association: - self.logger.info('ACL %s not associated with any subnet' % - acl_id) - foundAcl = True - - # check if acl associated with subnet or not - else: - for entry in out: - assoc_str = re.sub(' +', ' ', entry).replace(' ', '', 1) - assoc = assoc_str.split(' ') - if not assoc[0].startswith('aclassoc-'): - continue - if assoc[0] == acl_assoc_id and assoc[1] == self.subnet_id: - self.logger.info( - 'ACL %s associated with subnet %s verified' % - (acl_id, self.subnet_id)) - if self.acl_association: - foundAcl = True - break - return foundAcl - - else: - return False - # end verify_acl_binding - - -# end VPCVNFixture diff --git a/fixtures/webui_test.py b/fixtures/webui_test.py deleted file mode 100644 index 29b225023..000000000 --- a/fixtures/webui_test.py +++ /dev/null @@ -1,5462 +0,0 @@ -from selenium import webdriver -from selenium.webdriver.common.keys import Keys -from selenium.webdriver.support.ui import WebDriverWait -from selenium.common.exceptions import WebDriverException -import time -import random -import fixtures -from ipam_test import * -from project_test import * -from tcutils.util import * -from vnc_api.vnc_api import * -from contrail_fixtures import * -from webui.webui_common import WebuiCommon -import re - - -class WebuiTest: - - os_release = None - - def __init__(self, connections, inputs): - self.inputs = inputs - self.connections = connections - self.logger = self.inputs.logger - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.delay = 20 - self.frequency = 3 - self.logger = inputs.logger - self.ui = WebuiCommon(self) - self.dash = "-" * 60 - self.vnc_lib = connections.vnc_lib_fixture - self.log_path = None - if not WebuiTest.os_release: - WebuiTest.os_release = self.inputs.get_openstack_release() - # end __init__ - - def _click_if_element_found(self, element_name, elements_list): - for element in elements_list: - if element.text == element_name: - element.click() - break - # end _click_if_element_found - - def create_vn(self, fixture): - result = True - try: - fixture.obj = fixture.quantum_h.get_vn_obj_if_present( - fixture.vn_name, fixture.project_id) - if not fixture.obj: - if not self.ui.click_on_create( - 'VN', - 'networks', - fixture.vn_name, - prj_name=fixture.project_name): - result = result and False - txtVNName = self.ui.find_element('txtDisName') - txtVNName.send_keys(fixture.vn_name) - if isinstance(fixture.vn_subnets, list): - for subnet in fixture.vn_subnets: - self.ui.click_element('btnCommonAddIpam') - self.ui.wait_till_ajax_done(self.browser) - self.ui.click_element( - ['ipamTuples', 'select2-choice'], ['id', 'class'], jquery=False, wait=3) - ipam_list = self.ui.find_element( - ['select2-drop', 'li'], ['id', 'tag'], if_elements=[1]) - for ipam in ipam_list: - ipam_text = self.ui.find_element( - 'div', - 'tag', - ipam).text - if ipam_text.find(fixture.ipam_fq_name[2]) != -1: - ipam.click() - break - self.ui.send_keys( - subnet['cidr'], - "//input[@placeholder = 'CIDR']", - 'xpath') - else: - self.ui.click_element('btnCommonAddIpam') - self.ui.click_element('select2-drop-mask') - ipam_list = self.ui.find_element( - ['select2-drop', 'ul', 'li'], ['id', 'tag', 'tag'], if_elements=[2]) - for ipam in ipam_list: - ipam_text = ipam.get_attribute("innerHTML") - if ipam_text == self.ipam_fq_name: - ipam.click() - break - self.ui.send_keys( - fixture.vn_subnets, - "//input[@placeholder = 'IP Block']", - 'xpath') - if not self.ui.click_on_create('VN', save=True): - result = result and False - else: - fixture.already_present = True - self.logger.info('VN %s already exists, skipping creation ' % - (fixture.vn_name)) - self.logger.debug('VN %s exists, already there' % - (fixture.vn_name)) - fixture.obj = fixture.quantum_h.get_vn_obj_if_present( - fixture.vn_name, fixture.project_id) - fixture.vn_id = fixture.obj['network']['id'] - fixture.vn_fq_name = ':'.join(self.vnc_lib.id_to_fq_name( - fixture.obj['network']['id'])) - except WebDriverException: - self.logger.error("Error while creating %s" % (fixture.vn_name)) - self.ui.screenshot("vn_error") - result = result and False - raise - self.ui.click_on_cancel_if_failure('btnCreateVNCancel') - return result - # end create_vn - - def create_port( - self, - net, - subnet, - mac=None, - state='Up', - port_name=None, - fixed_ip=None, - fip=None, - sg=None, - device_owner=None): - result = True - try: - if not self.ui.click_on_create('Ports', 'ports', port_name): - result = result and False - txt_port = self.ui.find_element('txtPortName') - self.ui.click_on_select2_arrow('s2id_ddVN') - self.ui.select_from_dropdown(net) - self.ui.click_element(['smaller', 'i'], ['class', 'tag']) - if mac: - self.ui.send_keys(mac, 'txtMacAddress') - if port_name: - self.ui.send_keys(port_name, 'txtPortName') - if subnet: - self.ui.click_on_select2_arrow('FixedIPTuples') - self.ui.select_from_dropdown(subnet) - if fixed_ip: - self.ui.send_keys( - fixed_ip, - "//input[@placeholder='Fixed IP']", - 'xpath') - if device_owner: - self.ui.click_on_select2_arrow('s2id_ddDeviceOwnerName') - self.ui.select_from_dropdown(device_owner) - if not self.ui.click_on_create('Ports', save=True): - result = result and False - except WebDriverException: - self.logger.error("Error while creating %s" % (port_name)) - self.ui.screenshot("port_error") - result = result and False - raise - self.ui.click_on_cancel_if_failure('btnCreatePortCancel') - return result - # end create_port - - def update_port(self, net_name, subnet, new_ip): - result = True - rows = self.ui.get_rows() - self.logger.info("Updating port...") - self.logger.info("Adding one more ip address...") - for port in rows: - port_net = self.ui.get_slick_cell_text(port, 3) - if (port_net == net_name): - port.find_element_by_class_name('icon-cog').click() - self.ui.wait_till_ajax_done(self.browser) - self.browser.find_element_by_class_name( - 'tooltip-success').find_element_by_tag_name('i').click() - self.ui.wait_till_ajax_done(self.browser) - self.ui.click_element('icon-plus', 'class') - if subnet: - self.ui.click_on_select2_arrow('FixedIPTuples') - self.ui.select_from_dropdown(subnet) - self.ui.send_keys( - new_ip, - "//input[@placeholder='Fixed IP']", - 'xpath') - self.ui.wait_till_ajax_done(self.browser) - if not self.ui.click_on_create('Ports', save=True): - result = result and False - break - self.ui.click_on_cancel_if_failure('btnCreatePortCancel') - return result - # end update_port - - def create_router( - self, - router_name, - networks, - state='Up', - gateway=None, - snat=True): - result = True - try: - project_name = 'admin' - if not self.ui.click_on_create( - 'LogicalRouter', - 'routers', - router_name, - prj_name=project_name): - result = result and False - self.ui.send_keys(router_name, 'txtRouterName') - self.ui.click_on_select2_arrow('s2id_ddRouterStatus') - self.ui.select_from_dropdown(state) - if gateway: - self.ui.click_on_select2_arrow('s2id_ddExtGateway') - self.ui.select_from_dropdown(gateway) - if not snat: - self.ui.click_element('chkSNAT') - for vn in networks: - self.ui.click_element( - ['s2id_msConnectedNetworks', 'input'], ['id', 'tag']) - xpath4_vn_select2match_objs = "//*[@class = 'select2-match']/.." - vn_elements = self.ui.find_xpath_elements( - xpath4_vn_select2match_objs) - self._click_if_element_found(vn, vn_elements) - if not self.ui.click_on_create('LR', save=True): - result = result and False - except WebDriverException: - self.logger.error("Error while creating %s" % (router_name)) - self.ui.screenshot("router_error") - result = result and False - raise - return result - # end create_router - - def create_physical_router( - self, - router_name, - model, - mgmt_ip, - tunnel_ip, - vendor='Juniper'): - result = True - try: - project_name = 'admin' - if not self.ui.click_on_create( - 'PhysicalRouter', - 'physical_routers', - router_name, - prj_name=project_name): - result = result and False - self.ui.send_keys(router_name, 'txtPhysicalRouterName') - self.ui.send_keys(vendor, 'txtVendor') - self.ui.send_keys(model, 'txtModel') - self.ui.send_keys(mgmt_ip, 'txtMgmtIPAddress') - self.ui.send_keys(tunnel_ip, 'txtDataIPAddress') - icon_carets = self.ui.find_element( - 'grey icon-caret-right', - 'class', - elements=True) - icon_caret[1].click() - # To be implemented - if not self.ui.click_on_create('PhysicalRouter', save=True): - result = result and False - except WebDriverException: - self.logger.error( - "Error while creating %s physical router" % - (router_name)) - self.ui.screenshot("physical_router_error") - result = result and False - raise - return result - # end create_physical_router - - def create_dns_server( - self, - server_name, - domain_name, - rr_order='Random', - fip_record='Dashed IP Tenant', - ipam_list=None, - ttl=None, - dns_forwarder=None): - project_name = 'admin' - if ipam_list: - ipam_list = [project_name + ':' + ipam for ipam in ipam_list] - result = True - try: - if not self.ui.click_on_create( - 'DNSServer', - 'dns_servers', - server_name, - prj_name=project_name): - result = result and False - self.ui.send_keys(server_name, 'txtDNSServerName') - self.ui.send_keys(domain_name, 'txtDomainName') - if ttl: - self.ui.send_keys(ttl, 'txtTimeLive') - if dns_forwarder: - self.ui.send_keys( - dns_forwarder, - 'custom-combobox-input', - 'class') - if rr_order: - self.ui.dropdown('s2id_ddLoadBal', rr_order) - if fip_record: - self.ui.dropdown('s2id_ddType', fip_record) - if ipam_list: - self.ui.click_select_multiple('s2id_msIPams', ipam_list) - if not self.ui.click_on_create('DNSServer', save=True): - result = result and False - except WebDriverException: - self.logger.error( - "Error while creating DNS server %s" % - (server_name)) - self.ui.screenshot("DNS_server_error") - result = result and False - raise - return result - # end create_dns_server - - def create_dns_record( - self, - server_name, - host_name, - ip_address, - type=None, - dns_class=None, - ttl=None): - project_name = 'admin' - result = True - try: - if not self.ui.click_on_create( - 'DNSRecord', - 'dns_records', - server_name, - prj_name=server_name): - result = result and False - self.ui.send_keys(host_name, 'txtRecordName') - self.ui.send_keys(ip_address, 'txtRecordData') - if ttl: - self.ui.send_keys(ttl, 'txtRecordTTL') - if type: - self.ui.dropdown('s2id_cmbRecordType', type) - if dns_class: - self.ui.dropdown('s2id_cmbRecordClass', dns_class) - self.ui.click_element('btnAddDNSRecordOk') - if not self.ui.check_error_msg("create dns record"): - result = result and False - raise Exception("DNS Record creation failed") - except WebDriverException: - self.logger.error( - "Error while creating dns record in dns server %s" % - (server_name)) - self.ui.screenshot("dns_record_error") - result = result and False - raise - return result - # end create_dns_records - - def create_svc_template(self, fixture): - result = True - try: - if not self.ui.click_on_create( - 'Service Template', - 'service_template', - fixture.st_name, - select_project=False): - result = result and False - self.ui.send_keys(fixture.st_name, 'name', 'name') - self.browser.find_element_by_id( - 's2id_service_mode_dropdown').find_element_by_class_name( - 'select2-choice').click() - service_mode_list = self.browser.find_element_by_id( - "select2-drop").find_elements_by_tag_name('li') - for service_mode in service_mode_list: - service_mode_text = service_mode.text - if service_mode_text.lower() == fixture.svc_mode: - service_mode.click() - break - self.browser.find_element_by_id( - 's2id_service_type_dropdown').find_element_by_class_name( - 'select2-choice').click() - service_type_list = self.browser.find_element_by_id( - "select2-drop").find_elements_by_tag_name('li') - for service_type in service_type_list: - service_type_text = service_type.text - if service_type_text.lower() == fixture.svc_type: - service_type.click() - break - self.browser.find_element_by_id( - 's2id_image_name_dropdown').find_element_by_class_name( - 'select2-choice').click() - image_name_list = self.browser.find_element_by_id( - "select2-drop").find_elements_by_tag_name('li') - for image_name in image_name_list: - image_name_text = image_name.text - if image_name_text.lower() == fixture.image_name: - image_name.click() - break - static_route = self.browser.find_element_by_id( - 'advanced_options').find_element_by_tag_name('div').click() - for index, intf_element in enumerate(fixture.if_list): - intf_text = intf_element[0] - shared_ip = intf_element[1] - static_routes = intf_element[2] - self.ui.click_element('editable-grid-add-link', 'class') - self.browser.find_element_by_id( - 'interfaces').find_elements_by_class_name( - 'data-row')[index].click() - if shared_ip: - self.browser.find_elements_by_id( - 'shared_ip')[index].click() - if static_routes: - self.browser.find_elements_by_id( - 'static_route_enable')[index].click() - intf_types = self.browser.find_elements_by_class_name( - 'select2-results-dept-0') - intf_dropdown = [element.find_element_by_tag_name('div') - for element in intf_types] - for intf in intf_dropdown: - if intf.text.lower() == intf_text: - intf.click() - break - self.browser.find_element_by_id( - 's2id_flavor_dropdown').find_element_by_class_name( - 'select2-choice').click() - flavors_list = self.browser.find_elements_by_xpath( - "//span[@class = 'select2-match']/..") - for flavor in flavors_list: - flavor_text = flavor.text - if flavor_text.find(fixture.flavor) != -1: - flavor.click() - break - if fixture.svc_scaling: - self.browser.find_element_by_xpath( - "//input[@type = 'checkbox' and \ - @name = 'service_scaling']").click() - if not self.ui.click_on_create('Service Template', - 'service_template', save=True): - result = result and False - self.logger.info("Running verify_on_setup..") - fixture.verify_on_setup() - except WebDriverException: - self.logger.error( - "Error while creating svc template %s" % - (fixture.st_name)) - self.ui.screenshot("svc template creation failed") - result = result and False - self.ui.click_on_cancel_if_failure('cancelBtn') - return result - # end create_svc_template - - def create_svc_instance(self, fixture): - try: - result = True - if not self.ui.click_on_create( - 'svcInstances', - 'service_instance', - fixture.si_name, prj_name=fixture.project_name): - result = result and False - txt_instance_name = self.ui.find_element('txtsvcInstanceName') - txt_instance_name.send_keys(fixture.si_name) - self.browser.find_element_by_id( - 's2id_ddsvcTemplate').find_element_by_class_name('select2-choice').click() - service_template_list = self.browser.find_element_by_id( - 'select2-drop').find_elements_by_tag_name('li') - service_temp_list = [ - element.find_element_by_tag_name('div') for element in service_template_list] - for service_temp in service_temp_list: - service_temp_text = service_temp.text - if service_temp_text.find(fixture.st_name) != -1: - service_temp.click() - break - intfs = self.browser.find_element_by_id( - 'instanceDiv').find_elements_by_tag_name('a') - if not self.ui.click_on_create('svcInstences', save=True): - result = result and False - time.sleep(40) - self.logger.info("Running verify_on_setup..") - fixture.verify_on_setup() - self.logger.info("Svc instance %s creation successful" % - (fixture.si_name)) - except WebDriverException: - self.logger.error( - "Error while creating svc instance %s" % - (fixture.si_name)) - self.ui.screenshot("svc instance creation failed") - result = result and False - self.ui.click_on_cancel_if_failure('btnCreatesvcInstencesCancel') - return result - # end create_svc_instance - - def create_ipam(self, fixture): - result = True - ip_blocks = False - if not self.ui.click_on_create( - 'IPAM', - 'ipam', - fixture.name, - prj_name=fixture.project_name): - result = result and False - self.ui.send_keys(fixture.name, 'name', 'name') - ''' - self.ui.click_element(['s2id_dns_method_dropdown', \ - 'select2-choice'], ['id', 'class']) - dns_method_list = self.ui.find_element([ - 'select2-drop', 'li'], ['id', 'tag']) - dns_list = [ element.find_element_by_tag_name( - 'div') for element in dns_method_list] - - for dns in dns_list : - dns_text = dns.text - if dns_text.find('Tenant') != -1 : - dns.click() - if dns_text == 'Tenant': - self.ui.click_element('editable-grid-add-link', 'class') - self.ui.find_element( - "input[name$='ip_addr']", - 'css').send_keys('189.23.2.3/21') - self.ui.find_element( - "input[name$='ntpServer']", - 'css').send_keys('32.24.53.45/28') - self.ui.find_element( - "input[name$='domainName']", - 'css').send_keys('domain_1') - elif dns_text == 'Default' or dns.text == 'None': - self.ui.find_element( - "input[name$='ntpServer']", - 'css').send_keys('32.24.53.45/28') - self.ui.find_element( - "input[name$='domainName']", - 'css').send_keys('domain_1') - elif dns_text == 'Virtual DNS': - self.ui.click_element([ - 'virtual_dns_server_name', 'a'], ['id', 'tag']) - self.ui.wait_till_ajax_done(self.browser) - virtual_dns_list = self.ui.find_element([ - 'select2-drop', 'li'], ['id', 'tag']) - vdns_list = [ element.find_element_by_tag_name( - 'div') for element in virtual_dns_list] - for vdns in vdns_list : - vdns_text = vdns.text - if vdns_text == 'default-domain:'+'dns': - vdns.click() - break - break - - for net in range(len(net_list)): - self.browser.find_element_by_id("btnCommonAddVN").click() - self.browser.find_element_by_id('vnTuples').find_element_by_tag_name('a').click() - self.ui.wait_till_ajax_done(self.browser) - vn_list = self.browser.find_element_by_id('select2-drop').find_elements_by_tag_name('li') - virtual_net_list = [ element.find_element_by_tag_name('div') for element in vn_list] - for vns in virtual_net_list : - vn_text = vns.text - if vn_text == net_list[net] : - vns.click() - break - - self.browser.find_element_by_xpath("//*[contains(@placeholder, 'IP Block')]").send_keys('187.23.2.'+str(net+1)+'/21') - ''' - if not self.ui.click_on_create('IPAM','ipam', save=True): - result = result and False - return result - # end create_ipam - - def create_policy(self, fixture): - result = True - line = 0 - try: - fixture.policy_obj = fixture.quantum_h.get_policy_if_present( - fixture.project_name, fixture.policy_name) - if not fixture.policy_obj: - if not self.ui.click_on_create( - 'Policy', - 'policies', - fixture.policy_name, - prj_name=fixture.project_name): - result = result and False - self.ui.send_keys(fixture.policy_name, 'txtPolicyName') - for index, rule in enumerate(fixture.rules_list): - action = rule['simple_action'] - protocol = rule['protocol'] - source_net = rule['source_network'] - direction = rule['direction'] - dest_net = rule['dest_network'] - if rule['src_ports']: - if isinstance(rule['src_ports'], list): - src_port = ','.join(str(num) - for num in rule['src_ports']) - else: - src_port = str(rule['src_ports']) - if rule['dst_ports']: - if isinstance(rule['dst_ports'], list): - dst_port = ','.join(str(num) - for num in rule['dst_ports']) - else: - dst_port = str(rule['dst_ports']) - self.ui.click_element( - 'btnCommonAddRule', - jquery=False, - wait=2) - rules = self.ui.find_element( - ['ruleTuples', 'rule-item'], ['id', 'class'], if_elements=[1])[line] - textbox_rule_items = self.ui.find_element( - 'span1', - 'class', - rules, - elements=True) - src_textbox_element = textbox_rule_items[ - 0].find_element_by_tag_name('input') - dst_textbox_element = textbox_rule_items[ - 2].find_element_by_tag_name('input') - src_textbox_element.clear() - src_textbox_element.send_keys(src_port) - dst_textbox_element.clear() - dst_textbox_element.send_keys(dst_port) - dropdown_rule_items = self.ui.find_element( - "div[class$='pull-left']", - 'css', - rules, - elements=True) - self.ui.click_on_dropdown(dropdown_rule_items[3]) - self.ui.select_from_dropdown(direction) - li = self.browser.find_elements_by_css_selector( - "ul[class^='ui-autocomplete']") - if len(li) == 4 and index == 0: - lists = 0 - elif index == 0: - lists = 1 - for item in range(len(dropdown_rule_items)): - if item == 3: - continue - self.ui.click_on_dropdown(dropdown_rule_items[item]) - if item == 0: - self.ui.select_from_dropdown(action.upper()) - elif item == 1: - self.ui.select_from_dropdown(protocol.upper()) - elif item == 2: - self.ui.select_from_dropdown(source_net) - elif item == 4: - self.ui.select_from_dropdown(dest_net) - lists = lists + 1 - if not self.ui.click_on_create('Policy', save=True): - result = result and False - fixture.policy_obj = fixture.quantum_h.get_policy_if_present( - fixture.project_name, - fixture.policy_name) - else: - fixture.already_present = True - self.logger.info( - 'Policy %s already exists, skipping creation ' % - (fixture.policy_name)) - self.logger.debug('Policy %s exists, already there' % - (fixture.policy_name)) - except WebDriverException: - self.logger.error( - "Error while creating %s" % - (fixture.policy_name)) - self.ui.screenshot("policy_create_error") - result = result and False - raise - return result - # end create_policy - - def create_security_group(self, fixture): - result = True - try: - if not self.ui.click_on_create( - 'Security Group', - 'security_groups', - fixture.secgrp_name, - prj_name=fixture.project_name): - result = result and False - self.ui.send_keys(fixture.secgrp_name, 'display_name', 'name') - for index, rule in enumerate(fixture.secgrp_entries): - direction = rule['direction'] - ether_type = rule['eth_type'] - src_addresses = rule['src_addresses'][0] - dst_addresses = rule['dst_addresses'][0] - src_start_port = str(rule['src_ports'][0]['start_port']) - src_end_port = str(rule['src_ports'][0]['end_port']) - dst_start_port = str(rule['dst_ports'][0]['start_port']) - dst_end_port = str(rule['dst_ports'][0]['end_port']) - protocol = rule['protocol'].upper() - if 'security_group' in dst_addresses and dst_addresses[ - 'security_group'] == 'local': - direction = 'Ingress' - port_range = dst_start_port + '-' + dst_end_port - addresses = src_addresses['subnet'] - else: - direction = 'Egress' - port_range = src_start_port + '-' + src_end_port - addresses = dst_addresses['subnet'] - addresses = addresses['ip_prefix'] + \ - '/' + str(addresses['ip_prefix_len']) - self.ui.click_element('editable-grid-add-link', 'class') - sg_grp_tuple = self.browser.find_elements_by_class_name( - 'data-row')[index] - self.ui.dropdown( - "td[id$='direction']", - direction, - element_type='css', - browser_obj=sg_grp_tuple) - self.ui.dropdown( - "td[id$='protocol']", - protocol, - element_type='css', - browser_obj=sg_grp_tuple) - self.ui.dropdown( - "td[id$='ethertype']", - ether_type, - element_type='css', - browser_obj=sg_grp_tuple) - text_box = self.ui.find_element( - "input[name$='remotePorts']", - 'css', - browser=sg_grp_tuple) - text_box.clear() - text_box.send_keys(port_range) - self.ui.click_element("td[id$='remoteAddr']", - 'css', browser=sg_grp_tuple) - self.ui.send_keys( - addresses, - "input[id^='s2id_autogen']", - 'css') - elements = self.ui.find_element( - 'select2-result-label', - 'class', - elements=True) - for element in elements: - if element.text == addresses: - element.click() - break - if not self.ui.click_on_create('Security Group', - 'security_groups', save=True): - result = result and False - self.logger.info( - "Security group %s creation successful" % - (fixture.secgrp_name)) - except WebDriverException: - self.logger.error( - "Error while creating %s" % - (fixture.secgrp_name)) - self.ui.screenshot("security_group_create_error") - result = result and False - raise - return result - # end create_security_group - - def delete_security_group(self, fixture): - if not self.ui.delete_element(fixture, 'security_group_delete'): - self.logger.info("Security group deletion failed") - return False - return True - # delete_security_group - - def verify_analytics_nodes_ops_basic_data(self): - self.logger.info( - "Verifying analytics node opserver basic data on Monitor->Infra->Analytics Nodes(Basic view) page.") - self.logger.debug(self.dash) - if not self.ui.click_monitor_analytics_nodes(): - result = result and False - rows = self.ui.get_rows() - analytics_nodes_list_ops = self.ui.get_collectors_list_ops() - result = True - for n in range(len(analytics_nodes_list_ops)): - ops_analytics_node_name = analytics_nodes_list_ops[n]['name'] - self.logger.info( - "Vn host name %s exists in opserver..checking if exists in webui as well" % - (ops_analytics_node_name)) - if not self.ui.click_monitor_analytics_nodes(): - result = result and False - rows = self.ui.get_rows() - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i], index=0) - if obj_text == ops_analytics_node_name: - self.logger.info( - "Analytics_node name %s found in webui...Verifying basic details" % - (ops_analytics_node_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - break - if not match_flag: - self.logger.error( - "Analytics node name %s not found in webui" % - (ops_analytics_node_name)) - self.logger.debug(self.dash) - else: - self.logger.info("Verify analytics node basic view details for \ - analytics_node-name %s " % (ops_analytics_node_name)) - self.ui.click_monitor_analytics_nodes_basic( - match_index) - dom_basic_view = self.ui.get_basic_view_infra() - # special handling for overall node status value - node_status = self.browser.find_element_by_id('allItems').find_element_by_tag_name( - 'p').get_attribute('innerHTML').replace('\n', '').strip() - for i, item in enumerate(dom_basic_view): - if item.get('key') == 'Overall Node Status': - dom_basic_view[i]['value'] = node_status - # filter analytics_node basic view details from opserver data - analytics_nodes_ops_data = self.ui.get_details( - analytics_nodes_list_ops[n]['href']) - ops_basic_data = [] - host_name = analytics_nodes_list_ops[n]['name'] - ip_address = analytics_nodes_ops_data.get( - 'CollectorState').get('self_ip_list') - ip_address = ', '.join(ip_address) - generators_count = str( - len(analytics_nodes_ops_data.get('CollectorState').get('generator_infos'))) - version = json.loads(analytics_nodes_ops_data.get('CollectorState').get( - 'build_info')).get('build-info')[0].get('build-id') - version = self.ui.get_version_string(version) - module_cpu_info_len = len( - analytics_nodes_ops_data.get('ModuleCpuState').get('module_cpu_info')) - for i in range(module_cpu_info_len): - if analytics_nodes_ops_data.get('ModuleCpuState').get( - 'module_cpu_info')[i]['module_id'] == 'contrail-collector': - cpu_mem_info_dict = analytics_nodes_ops_data.get( - 'ModuleCpuState').get('module_cpu_info')[i] - break - cpu = self.ui.get_cpu_string(cpu_mem_info_dict) - memory = self.ui.get_memory_string(cpu_mem_info_dict) - modified_ops_data = [] - - process_state_list = analytics_nodes_ops_data.get( - 'NodeStatus').get('process_info') - process_down_stop_time_dict = {} - process_up_start_time_dict = {} - redis_uve_string = None - redis_query_string = None - exclude_process_list = [ - 'contrail-config-nodemgr', - 'contrail-analytics-nodemgr', - 'contrail-control-nodemgr', - 'contrail-vrouter-nodemgr', - 'openstack-nova-compute', - 'contrail-svc-monitor', - 'contrail-discovery:0', - 'contrail-zookeeper', - 'contrail-schema'] - for i, item in enumerate(process_state_list): - if item['process_name'] == 'redis-query': - redis_query_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-query-engine': - contrail_qe_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-analytics-nodemgr': - contrail_analytics_nodemgr_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'redis-uve': - redis_uve_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-analytics-api': - contrail_opserver_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-collector': - contrail_collector_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - reduced_process_keys_dict = {} - for k, v in process_down_stop_time_dict.items(): - if k not in exclude_process_list: - reduced_process_keys_dict[k] = v - if not reduced_process_keys_dict: - for process in exclude_process_list: - process_up_start_time_dict.pop(process, None) - recent_time = min(process_up_start_time_dict.values()) - overall_node_status_time = self.ui.get_node_status_string( - str(recent_time)) - overall_node_status_string = [ - 'Up since ' + - status for status in overall_node_status_time] - else: - overall_node_status_down_time = self.ui.get_node_status_string( - str(max(reduced_process_keys_dict.values()))) - process_down_count = len(reduced_process_keys_dict) - overall_node_status_string = str( - process_down_count) + ' Process down' - - modified_ops_data.extend( - [ - { - 'key': 'Hostname', 'value': host_name}, { - 'key': 'Generators', 'value': generators_count}, { - 'key': 'IP Address', 'value': ip_address}, { - 'key': 'CPU', 'value': cpu}, { - 'key': 'Memory', 'value': memory}, { - 'key': 'Version', 'value': version}, { - 'key': 'Collector', 'value': contrail_collector_string}, { - 'key': 'Query Engine', 'value': contrail_qe_string}, { - 'key': 'OpServer', 'value': contrail_opserver_string}, { - 'key': 'Overall Node Status', 'value': overall_node_status_string}]) - if redis_uve_string: - modified_ops_data.append( - {'key': 'Redis UVE', 'value': redis_uve_string}) - if redis_query_string: - modified_ops_data.append( - {'key': 'Redis Query', 'value': redis_query_string}) - if self.ui.match_ui_kv( - modified_ops_data, - dom_basic_view): - self.logger.info( - "Analytics node %s basic view details data matched" % - (ops_analytics_node_name)) - else: - self.logger.error( - "Analytics node %s basic view details data match failed" % - (ops_analytics_node_name)) - result = result and False - ops_data = [] - ops_data.extend( - [ - { - 'key': 'Hostname', 'value': host_name}, { - 'key': 'IP Address', 'value': ip_address}, { - 'key': 'CPU', 'value': cpu}, { - 'key': 'Memory', 'value': memory}, { - 'key': 'Version', 'value': version}, { - 'key': 'Status', 'value': overall_node_status_string}, { - 'key': 'Generators', 'value': generators_count}]) - - if self.verify_analytics_nodes_ops_grid_page_data( - host_name, - ops_data): - self.logger.info( - "Analytics node %s main page data matched" % - (ops_analytics_node_name)) - else: - self.logger.error( - "Analytics nodes %s main page data match failed" % - (ops_analytics_node_name)) - result = result and False - return result - # end verify_analytics_nodes_ops_basic_data_in_webui - - def verify_config_nodes_ops_basic_data(self): - self.logger.info( - "Verifying config node api server basic data on Monitor->Infra->Config Nodes->Details(basic view) page ...") - self.logger.debug(self.dash) - if not self.ui.click_monitor_config_nodes(): - result = result and False - rows = self.ui.get_rows() - config_nodes_list_ops = self.ui.get_config_nodes_list_ops() - result = True - for n in range(len(config_nodes_list_ops)): - ops_config_node_name = config_nodes_list_ops[n]['name'] - self.logger.info( - "Vn host name %s exists in opserver..checking if exists in webui as well" % - (ops_config_node_name)) - if not self.ui.click_monitor_config_nodes(): - result = result and False - rows = self.ui.get_rows() - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i], index=0) - if obj_text == ops_config_node_name: - self.logger.info( - "Config node name %s found in webui..Verifying basic details..." % - (ops_config_node_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - break - if not match_flag: - self.logger.error("Config node name %s not found in webui" % ( - ops_config_node_name)) - self.logger.debug(self.dash) - else: - self.logger.info("Verify config node basic view details for \ - config_node-name %s " % (ops_config_node_name)) - # filter config_node basic view details from opserver data - config_nodes_ops_data = self.ui.get_details( - config_nodes_list_ops[n]['href']) - self.ui.click_monitor_config_nodes_basic(match_index) - ops_basic_data = [] - host_name = config_nodes_list_ops[n]['name'] - ip_address = config_nodes_ops_data.get( - 'ModuleCpuState').get('config_node_ip') - if not ip_address: - ip_address = '--' - else: - ip_address = ', '.join(ip_address) - process_state_list = config_nodes_ops_data.get( - 'NodeStatus').get('process_info') - process_down_stop_time_dict = {} - process_up_start_time_dict = {} - exclude_process_list = [ - 'contrail-config-nodemgr', - 'contrail-analytics-nodemgr', - 'contrail-control-nodemgr', - 'contrail-vrouter-nodemgr', - 'openstack-nova-compute', - 'contrail-svc-monitor', - 'contrail-discovery:0', - 'contrail-zookeeper', - 'contrail-schema'] - for i, item in enumerate(process_state_list): - if item['process_name'] == 'contrail-api:0': - api_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'ifmap': - ifmap_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-discovery:0': - discovery_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-schema': - schema_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-svc-monitor': - monitor_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - reduced_process_keys_dict = {} - for k, v in process_down_stop_time_dict.items(): - if k not in exclude_process_list: - reduced_process_keys_dict[k] = v - if not reduced_process_keys_dict: - for process in exclude_process_list: - process_up_start_time_dict.pop(process, None) - recent_time = max(process_up_start_time_dict.values()) - overall_node_status_time = self.ui.get_node_status_string( - str(recent_time)) - overall_node_status_string = [ - 'Up since ' + - status for status in overall_node_status_time] - else: - overall_node_status_down_time = self.ui.get_node_status_string( - str(max(reduced_process_keys_dict.values()))) - process_down_count = len(reduced_process_keys_dict) - overall_node_status_string = str( - process_down_count) + ' Process down' - version = config_nodes_ops_data.get( - 'ModuleCpuState').get('build_info') - if not version: - version = '--' - else: - version = json.loads(config_nodes_ops_data.get('ModuleCpuState').get( - 'build_info')).get('build-info')[0].get('build-id') - version = self.ui.get_version_string(version) - module_cpu_info_len = len( - config_nodes_ops_data.get('ModuleCpuState').get('module_cpu_info')) - cpu_mem_info_dict = {} - for i in range(module_cpu_info_len): - if config_nodes_ops_data.get('ModuleCpuState').get( - 'module_cpu_info')[i]['module_id'] == 'contrail-api': - cpu_mem_info_dict = config_nodes_ops_data.get( - 'ModuleCpuState').get('module_cpu_info')[i] - break - # special handling for overall node status value - dom_basic_view = self.ui.get_basic_view_infra() - node_status = self.browser.find_element_by_id('allItems').find_element_by_tag_name( - 'p').get_attribute('innerHTML').replace('\n', '').strip() - for i, item in enumerate(dom_basic_view): - if item.get('key') == 'Overall Node Status': - dom_basic_view[i]['value'] = node_status - if not cpu_mem_info_dict: - cpu = '--' - memory = '--' - else: - cpu = self.ui.get_cpu_string(cpu_mem_info_dict) - memory = self.ui.get_memory_string( - cpu_mem_info_dict) - modified_ops_data = [] - generator_list = self.ui.get_generators_list_ops() - for element in generator_list: - if element['name'] == ops_config_node_name + \ - ':Config:Contrail-Config-Nodemgr:0': - analytics_data = element['href'] - generators_vrouters_data = self.ui.get_details( - element['href']) - analytics_data = generators_vrouters_data.get( - 'ModuleClientState').get('client_info') - if analytics_data['status'] == 'Established': - analytics_primary_ip = analytics_data[ - 'primary'].split(':')[0] + ' (Up)' - - modified_ops_data.extend( - [ - { - 'key': 'Hostname', 'value': host_name}, { - 'key': 'IP Address', 'value': ip_address}, { - 'key': 'CPU', 'value': cpu}, { - 'key': 'Memory', 'value': memory}, { - 'key': 'Version', 'value': version}, { - 'key': 'API Server', 'value': api_string}, { - 'key': 'Discovery', 'value': discovery_string}, { - 'key': 'Service Monitor', 'value': monitor_string}, { - 'key': 'Ifmap', 'value': ifmap_string}, { - 'key': 'Schema Transformer', 'value': schema_string}, { - 'key': 'Overall Node Status', 'value': overall_node_status_string}]) - if self.ui.match_ui_kv( - modified_ops_data, - dom_basic_view): - self.logger.info( - "Config nodes %s basic view details data matched" % - (ops_config_node_name)) - else: - self.logger.error( - "Config node %s basic view details data match failed" % - (ops_config_node_name)) - result = result and False - ops_data = [] - self.logger.info( - "Verifying opserver basic data on Monitor->Infra->Config Nodes main page") - ops_data.extend( - [ - { - 'key': 'Hostname', 'value': host_name}, { - 'key': 'IP Address', 'value': ip_address.split(',')[0]}, { - 'key': 'CPU', 'value': cpu}, { - 'key': 'Memory', 'value': memory}, { - 'key': 'Version', 'value': version}, { - 'key': 'Status', 'value': overall_node_status_string}]) - if self.verify_config_nodes_ops_grid_page_data( - host_name, - ops_data): - self.logger.info( - "Config node %s main page data matched" % - (ops_config_node_name)) - else: - self.logger.error( - "Config node %s main page data match failed" % - (ops_config_node_name)) - result = result and False - return result - # end verify_config_nodes_ops_basic_data_in_webui - - def verify_vrouter_ops_basic_data(self): - result = True - self.logger.info( - "Verifying opserver basic data on Monitor->Infra->Virtual routers->Details(basic view)...") - self.logger.debug(self.dash) - if not self.ui.click_monitor_vrouters(): - result = result and False - rows = self.ui.get_rows() - vrouters_list_ops = self.ui.get_vrouters_list_ops() - for n in range(len(vrouters_list_ops)): - ops_vrouter_name = vrouters_list_ops[n]['name'] - self.logger.info( - "Vn host name %s exists in opserver..checking if exists in webui as well" % - (ops_vrouter_name)) - if not self.ui.click_monitor_vrouters(): - result = result and False - rows = self.ui.get_rows() - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i], index=0) - if obj_text == ops_vrouter_name: - self.logger.info( - "Vrouter name %s found in webui..Verifying basic details..." % - (ops_vrouter_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - break - if not match_flag: - self.logger.error( - "Vrouter name %s not found in webui" % (ops_vrouter_name)) - self.logger.debug(self.dash) - else: - self.logger.info( - "Verifying vrouter basic view details for vrouter-name %s " % - (ops_vrouter_name)) - self.ui.click_monitor_vrouters_basic(match_index) - dom_basic_view = self.ui.get_basic_view_infra() - # special handling for overall node status value - node_status = self.browser.find_element_by_id( - 'allItems').find_element_by_tag_name('p').get_attribute('innerHTML') - if node_status.find("") != -1: - node_status = node_status.split("")[1] - node_status = node_status.replace('\n', '').strip() - - for i, item in enumerate(dom_basic_view): - if item.get('key') == 'Overall Node Status': - dom_basic_view[i]['value'] = node_status - # special handling for control nodes - control_nodes = self.browser.find_element_by_class_name( - 'table-cell').text - for i, item in enumerate(dom_basic_view): - if item.get('key') == 'Control Nodes': - dom_basic_view[i]['value'] = control_nodes - # filter vrouter basic view details from opserver data - vrouters_ops_data = self.ui.get_details( - vrouters_list_ops[n]['href']) - ops_basic_data = [] - host_name = vrouters_list_ops[n]['name'] - ip_address = vrouters_ops_data.get( - 'VrouterAgent').get('self_ip_list')[0] - version = json.loads(vrouters_ops_data.get('VrouterAgent').get( - 'build_info')).get('build-info')[0].get('build-id') - version = self.ui.get_version_string(version) - xmpp_messages = vrouters_ops_data.get( - 'VrouterStatsAgent').get('xmpp_stats_list') - for i, item in enumerate(xmpp_messages): - if item['ip'] == ip_address: - xmpp_in_msgs = item['in_msgs'] - xmpp_out_msgs = item['out_msgs'] - xmpp_msgs_string = str(xmpp_in_msgs) + \ - ' In, ' + \ - str(xmpp_out_msgs) + ' Out' - break - total_flows = vrouters_ops_data.get( - 'VrouterStatsAgent').get('total_flows') - active_flows = vrouters_ops_data.get( - 'VrouterStatsAgent').get('active_flows') - flow_count_string = str(active_flows) + \ - ' Active, ' + \ - str(total_flows) + ' Total' - if vrouters_ops_data.get('VrouterAgent').get( - 'connected_networks'): - networks = str( - len(vrouters_ops_data.get('VrouterAgent').get('connected_networks'))) - else: - networks = '0' - interfaces = str(vrouters_ops_data.get('VrouterAgent') - .get('total_interface_count')) - if not interfaces: - interfaces = '0 Total' - else: - interfaces = interfaces + ' Total' - if vrouters_ops_data.get('VrouterAgent').get( - 'virtual_machine_list'): - instances = str( - len(vrouters_ops_data.get('VrouterAgent').get('virtual_machine_list'))) - else: - instances = '0' - vrouter_stats_agent = vrouters_ops_data.get( - 'VrouterStatsAgent') - if not vrouter_stats_agent: - cpu = '--' - memory = '--' - else: - cpu = self.ui.get_cpu_string(vrouter_stats_agent) - memory = self.ui.get_memory_string(vrouter_stats_agent) - last_log = vrouters_ops_data.get( - 'VrouterAgent').get('total_interface_count') - modified_ops_data = [] - process_state_list = vrouters_ops_data.get( - 'NodeStatus').get('process_info') - process_down_stop_time_dict = {} - process_up_start_time_dict = {} - exclude_process_list = [ - 'contrail-config-nodemgr', - 'contrail-analytics-nodemgr', - 'contrail-control-nodemgr', - 'contrail-vrouter-nodemgr', - 'openstack-nova-compute', - 'contrail-svc-monitor', - 'contrail-discovery:0', - 'contrail-zookeeper', - 'contrail-schema'] - for i, item in enumerate(process_state_list): - if item['process_name'] == 'contrail-vrouter-agent': - contrail_vrouter_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-vrouter-nodemgr': - contrail_vrouter_nodemgr_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'openstack-nova-compute': - openstack_nova_compute_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - reduced_process_keys_dict = {} - for k, v in process_down_stop_time_dict.items(): - if k not in exclude_process_list: - reduced_process_keys_dict[k] = v - ''' - if not reduced_process_keys_dict : - recent_time = max(process_up_start_time_dict.values()) - overall_node_status_time = self.ui.get_node_status_string(str(recent_time)) - overall_node_status_string = ['Up since ' + status for status in overall_node_status_time] - else: - overall_node_status_down_time = self.ui.get_node_status_string(str(max(reduced_process_keys_dict.values()))) - overall_node_status_string = ['Down since ' + status for status in overall_node_status_down_time] - ''' - if not reduced_process_keys_dict: - for process in exclude_process_list: - process_up_start_time_dict.pop(process, None) - recent_time = max(process_up_start_time_dict.values()) - overall_node_status_time = self.ui.get_node_status_string( - str(recent_time)) - down_intf = vrouters_ops_data.get( - 'VrouterAgent').get('down_interface_count') - if down_intf > 0: - overall_node_status_string = str( - down_intf) + ' Interfaces down' - else: - overall_node_status_string = [ - 'Up since ' + - status for status in overall_node_status_time] - else: - overall_node_status_down_time = self.ui.get_node_status_string( - str(max(reduced_process_keys_dict.values()))) - process_down_count = len(reduced_process_keys_dict) - process_down_list = reduced_process_keys_dict.keys() - overall_node_status_string = str( - process_down_count) + ' Process down' - - generator_list = self.ui.get_generators_list_ops() - for element in generator_list: - if element['name'] == ops_vrouter_name + \ - ':Compute:contrail-vrouter-agent:0': - analytics_data = element['href'] - break - generators_vrouters_data = self.ui.get_details( - element['href']) - analytics_data = generators_vrouters_data.get( - 'ModuleClientState').get('client_info') - if analytics_data['status'] == 'Established': - analytics_primary_ip = analytics_data[ - 'primary'].split(':')[0] + ' (Up)' - tx_socket_bytes = analytics_data.get( - 'tx_socket_stats').get('bytes') - tx_socket_size = self.ui.get_memory_string( - int(tx_socket_bytes)) - analytics_messages_string = self.ui.get_analytics_msg_count_string( - generators_vrouters_data, - tx_socket_size) - control_nodes_list = vrouters_ops_data.get( - 'VrouterAgent').get('xmpp_peer_list') - control_nodes_string = '' - for node in control_nodes_list: - if node['status'] and node['primary']: - control_ip = node['ip'] - control_nodes_string = control_ip + '* (Up)' - index = control_nodes_list.index(node) - del control_nodes_list[index] - for node in control_nodes_list: - node_ip = node['ip'] - if node['status']: - control_nodes_string = control_nodes_string + \ - ', ' + node_ip + ' (Up)' - else: - control_nodes_string = control_nodes_string + \ - ', ' + node_ip + ' (Down)' - modified_ops_data.extend( - [ - { - 'key': 'Flow Count', 'value': flow_count_string}, { - 'key': 'Hostname', 'value': host_name}, { - 'key': 'IP Address', 'value': ip_address}, { - 'key': 'Networks', 'value': networks}, { - 'key': 'Instances', 'value': instances}, { - 'key': 'CPU', 'value': cpu}, { - 'key': 'Memory', 'value': memory}, { - 'key': 'Version', 'value': version}, { - 'key': 'vRouter Agent', 'value': contrail_vrouter_string}, { - 'key': 'Overall Node Status', 'value': overall_node_status_string}, { - 'key': 'Analytics Node', 'value': analytics_primary_ip}, { - 'key': 'Analytics Messages', 'value': analytics_messages_string}, { - 'key': 'Control Nodes', 'value': control_nodes_string}, { - 'key': 'XMPP Messages', 'value': xmpp_msgs_string}, { - 'key': 'Interfaces', 'value': interfaces}]) - if self.ui.match_ui_kv( - modified_ops_data, - dom_basic_view): - self.logger.info( - "Vrouter %s basic view details data matched" % - (ops_vrouter_name)) - else: - self.logger.error( - "Vrouter %s basic view details data match failed" % - (ops_vrouter_name)) - result = result and False - ops_data = [] - self.logger.info( - "Verifying Vrouter opserver basic data on Monitor->Infra->Virtual Routers main page") - ops_data.extend( - [ - { - 'key': 'Hostname', 'value': host_name}, { - 'key': 'IP Address', 'value': ip_address}, { - 'key': 'Networks', 'value': networks}, { - 'key': 'Instances', 'value': instances}, { - 'key': 'CPU', 'value': cpu}, { - 'key': 'Memory', 'value': memory}, { - 'key': 'Version', 'value': version}, { - 'key': 'Status', 'value': overall_node_status_string}, { - 'key': 'Interfaces', 'value': interfaces}]) - - if self.verify_vrouter_ops_grid_page_data(host_name, ops_data): - self.logger.info( - "Vrouter %s main page data matched" % - (ops_vrouter_name)) - else: - self.logger.error( - "Vrouter %s main page data match failed" % - (ops_vrouter_name)) - result = result and False - - return result - # end verify_vrouter_ops_basic_data_in_webui - - def verify_vrouter_ops_advance_data(self): - self.logger.info( - "Verifying vrouter Opserver advance data on Monitor->Infra->Virtual Routers->Details(advance view) page......") - self.logger.debug(self.dash) - if not self.ui.click_monitor_vrouters(): - result = result and False - rows = self.ui.get_rows() - vrouters_list_ops = self.ui.get_vrouters_list_ops() - result = True - for n in range(len(vrouters_list_ops)): - ops_vrouter_name = vrouters_list_ops[n]['name'] - self.logger.info( - "Vn host name %s exists in opserver..checking if exists in webui as well" % - (ops_vrouter_name)) - if not self.ui.click_monitor_vrouters(): - result = result and False - rows = self.ui.get_rows() - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i], index=0) - if obj_text == ops_vrouter_name: - self.logger.info( - "Vrouter name %s found in webui..Verifying advance details..." % - (ops_vrouter_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - break - if not match_flag: - self.logger.error( - "Vrouter name %s not found in webui" % (ops_vrouter_name)) - self.logger.debug(self.dash) - else: - self.logger.info( - "Verfiying vrouter advance details for vrouter-name %s " % - (ops_vrouter_name)) - self.ui.click_monitor_vrouters_advance(match_index) - vrouters_ops_data = self.ui.get_details( - vrouters_list_ops[n]['href']) - dom_arry = self.ui.parse_advanced_view() - dom_arry_str = self.ui.get_advanced_view_str() - dom_arry_num = self.ui.get_advanced_view_num() - dom_arry_num_new = [] - for item in dom_arry_num: - dom_arry_num_new.append( - {'key': item['key'].replace('\\', '"').replace(' ', ''), 'value': item['value']}) - dom_arry_num = dom_arry_num_new - merged_arry = dom_arry + dom_arry_str + dom_arry_num - if 'VrouterStatsAgent' in vrouters_ops_data: - ops_data = vrouters_ops_data['VrouterStatsAgent'] - history_del_list = [ - 'total_in_bandwidth_utilization', - 'cpu_share', - 'used_sys_mem', - 'one_min_avg_cpuload', - 'virt_mem', - 'total_out_bandwidth_utilization'] - for item in history_del_list: - if ops_data.get(item): - for element in ops_data.get(item): - if element.get('history-10'): - del element['history-10'] - if element.get('s-3600-topvals'): - del element['s-3600-topvals'] - - modified_ops_data = [] - self.ui.extract_keyvalue( - ops_data, modified_ops_data) - if 'VrouterAgent' in vrouters_ops_data: - ops_data_agent = vrouters_ops_data['VrouterAgent'] - modified_ops_data_agent = [] - self.ui.extract_keyvalue( - ops_data_agent, modified_ops_data_agent) - complete_ops_data = modified_ops_data + \ - modified_ops_data_agent - for k in range(len(complete_ops_data)): - if isinstance(complete_ops_data[k]['value'], list): - for m in range(len(complete_ops_data[k]['value'])): - complete_ops_data[k]['value'][m] = str( - complete_ops_data[k]['value'][m]) - elif isinstance(complete_ops_data[k]['value'], unicode): - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - else: - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - if self.ui.match_ui_kv( - complete_ops_data, - merged_arry): - self.logger.info( - "Vrouter %s advance view details matched" % - (ops_vrouter_name)) - else: - self.logger.error( - "Vrouter %s advance details match failed" % - (ops_vrouter_name)) - result = result and False - return result - # end verify_vrouter_ops_advance_data_in_webui - - def verify_bgp_routers_ops_basic_data(self): - self.logger.info( - "Verifying Control Nodes opserver basic data on Monitor->Infra->Control Nodes->Details(basic view) page......") - self.logger.debug(self.dash) - if not self.ui.click_monitor_control_nodes(): - result = result and False - rows = self.ui.get_rows() - bgp_routers_list_ops = self.ui.get_bgp_routers_list_ops() - result = True - for n in range(len(bgp_routers_list_ops)): - ops_bgp_routers_name = bgp_routers_list_ops[n]['name'] - self.logger.info("Control node host name %s exists in opserver..checking if exists \ - in webui as well" % (ops_bgp_routers_name)) - if not self.ui.click_monitor_control_nodes(): - result = result and False - rows = self.ui.get_rows() - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i], index=0) - if obj_text == ops_bgp_routers_name: - self.logger.info( - "Bgp routers name %s found in webui..Verifying basic details..." % - (ops_bgp_routers_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - break - if not match_flag: - self.logger.error("Bgp routers name %s not found in webui" % ( - ops_bgp_routers_name)) - self.logger.debug(self.dash) - else: - self.logger.info("Verify control nodes basic view details for \ - control node name %s " % (ops_bgp_routers_name)) - self.ui.click_monitor_control_nodes_basic( - match_index) - dom_basic_view = self.ui.get_basic_view_infra() - # special handling for overall node status value - node_status = self.browser.find_element_by_id('allItems').find_element_by_tag_name( - 'p').get_attribute('innerHTML').replace('\n', '').strip() - for i, item in enumerate(dom_basic_view): - if item.get('key') == 'Overall Node Status': - dom_basic_view[i]['value'] = node_status - # filter bgp_routers basic view details from opserver data - bgp_routers_ops_data = self.ui.get_details( - bgp_routers_list_ops[n]['href']) - ops_basic_data = [] - host_name = bgp_routers_list_ops[n]['name'] - ip_address = bgp_routers_ops_data.get( - 'BgpRouterState').get('bgp_router_ip_list')[0] - if not ip_address: - ip_address = '--' - version = json.loads(bgp_routers_ops_data.get('BgpRouterState').get( - 'build_info')).get('build-info')[0].get('build-id') - version = self.ui.get_version_string(version) - bgp_peers_count = bgp_routers_ops_data.get( - 'BgpRouterState').get('num_bgp_peer') - if not bgp_peers_count: - bgp_peers_count = '0 Total' - else: - bpg_peers_count = str(bpg_peers_count) + ' Total' - bgp_peers_string = 'BGP Peers: ' + bgp_peers_count - vrouters = 'vRouters: ' + \ - str(bgp_routers_ops_data.get('BgpRouterState') - .get('num_up_xmpp_peer')) + ' Established in Sync' - - cpu = bgp_routers_ops_data.get('BgpRouterState') - memory = bgp_routers_ops_data.get('BgpRouterState') - if not cpu: - cpu = '--' - memory = '--' - else: - cpu = self.ui.get_cpu_string(cpu) - memory = self.ui.get_memory_string(memory) - generator_list = self.ui.get_generators_list_ops() - for element in generator_list: - if element['name'] == ops_bgp_routers_name + \ - ':Control:contrail-control:0': - analytics_data = element['href'] - generators_vrouters_data = self.ui.get_details( - element['href']) - analytics_data = generators_vrouters_data.get( - 'ModuleClientState').get('client_info') - if analytics_data['status'] == 'Established': - analytics_primary_ip = analytics_data[ - 'primary'].split(':')[0] + ' (Up)' - tx_socket_bytes = analytics_data.get( - 'tx_socket_stats').get('bytes') - tx_socket_size = self.ui.get_memory_string( - int(tx_socket_bytes)) - analytics_messages_string = self.ui.get_analytics_msg_count_string( - generators_vrouters_data, tx_socket_size) - ifmap_ip = bgp_routers_ops_data.get('BgpRouterState').get( - 'ifmap_info').get('url').split(':')[0] - ifmap_connection_status = bgp_routers_ops_data.get( - 'BgpRouterState').get('ifmap_info').get('connection_status') - ifmap_connection_status_change = bgp_routers_ops_data.get( - 'BgpRouterState').get('ifmap_info').get('connection_status_change_at') - ifmap_connection_string = [ - ifmap_ip + - ' (' + - ifmap_connection_status + - ' since ' + - time + - ')' for time in self.ui.get_node_status_string(ifmap_connection_status_change)] - process_state_list = bgp_routers_ops_data.get( - 'NodeStatus').get('process_info') - process_down_stop_time_dict = {} - process_up_start_time_dict = {} - exclude_process_list = [ - 'contrail-config-nodemgr', - 'contrail-analytics-nodemgr', - 'contrail-control-nodemgr', - 'contrail-vrouter-nodemgr', - 'openstack-nova-compute', - 'contrail-svc-monitor', - 'contrail-discovery:0', - 'contrail-zookeeper', - 'contrail-schema'] - for i, item in enumerate(process_state_list): - if item['process_name'] == 'contrail-control': - control_node_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-control-nodemgr': - control_nodemgr_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-dns': - contrail_dns_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - if item['process_name'] == 'contrail-named': - contrail_named_string = self.ui.get_process_status_string( - item, - process_down_stop_time_dict, - process_up_start_time_dict) - reduced_process_keys_dict = {} - for k, v in process_down_stop_time_dict.items(): - if k not in exclude_process_list: - reduced_process_keys_dict[k] = v - - if not reduced_process_keys_dict: - for process in exclude_process_list: - process_up_start_time_dict.pop(process, None) - recent_time = max(process_up_start_time_dict.values()) - overall_node_status_time = self.ui.get_node_status_string( - str(recent_time)) - overall_node_status_string = [ - 'Up since ' + - status for status in overall_node_status_time] - else: - overall_node_status_down_time = self.ui.get_node_status_string( - str(max(reduced_process_keys_dict.values()))) - process_down_list = reduced_process_keys_dict.keys() - process_down_count = len(reduced_process_keys_dict) - overall_node_status_string = str( - process_down_count) + ' Process down' - modified_ops_data = [] - modified_ops_data.extend( - [ - { - 'key': 'Peers', 'value': bgp_peers_string}, { - 'key': 'Hostname', 'value': host_name}, { - 'key': 'IP Address', 'value': ip_address}, { - 'key': 'CPU', 'value': cpu}, { - 'key': 'Memory', 'value': memory}, { - 'key': 'Version', 'value': version}, { - 'key': 'Analytics Node', 'value': analytics_primary_ip}, { - 'key': 'Analytics Messages', 'value': analytics_messages_string}, { - 'key': 'Ifmap Connection', 'value': ifmap_connection_string}, { - 'key': 'Control Node', 'value': control_node_string}, { - 'key': 'Overall Node Status', 'value': overall_node_status_string}]) - if self.ui.match_ui_kv( - modified_ops_data, - dom_basic_view): - self.logger.info( - "Control node %s basic view details matched" % - (ops_bgp_routers_name)) - else: - self.logger.error( - "Control node %s basic view details match failed" % - (ops_bgp_routers_name)) - result = result and False - ops_data = [] - ops_data.extend( - [ - { - 'key': 'Peers', 'value': bgp_peers_count}, { - 'key': 'Hostname', 'value': host_name}, { - 'key': 'IP Address', 'value': ip_address}, { - 'key': 'CPU', 'value': cpu}, { - 'key': 'Memory', 'value': memory}, { - 'key': 'Version', 'value': version}, { - 'key': 'Status', 'value': overall_node_status_string}, { - 'key': 'vRouters', 'value': vrouters.split()[1] + ' Total'}]) - - if self.verify_bgp_routers_ops_grid_page_data( - host_name, - ops_data): - self.logger.info( - "Control node %s main page data matched" % - (ops_bgp_routers_name)) - else: - self.logger.error( - "Control node %s main page data match failed" % - (ops_bgp_routers_name)) - result = result and False - return result - # end verify_bgp_routers_ops_basic_data_in_webui - - def verify_bgp_routers_ops_advance_data(self): - self.logger.info( - "Verifying Control Nodes opserver advance data on Monitor->Infra->Control Nodes->Details(advance view) page ......") - self.logger.debug(self.dash) - if not self.ui.click_monitor_control_nodes(): - result = result and False - rows = self.ui.get_rows() - bgp_routers_list_ops = self.ui.get_bgp_routers_list_ops() - result = True - for n in range(len(bgp_routers_list_ops)): - ops_bgp_router_name = bgp_routers_list_ops[n]['name'] - self.logger.info( - "Control node %s exists in opserver..checking if exists in webui " % - (ops_bgp_router_name)) - self.logger.info( - "Clicking on Monitor->Control Nodes") - if not self.ui.click_monitor_control_nodes(): - result = result and False - rows = self.ui.get_rows() - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i], index=0) - if obj_text == ops_bgp_router_name: - self.logger.info( - "Control Node name %s found in webui..Verifying advance details..." % - (ops_bgp_router_name)) - match_flag = 1 - match_index = i - break - if not match_flag: - self.logger.error("Control Node name %s not found in webui" % - (ops_bgp_router_name)) - self.logger.debug(self.dash) - else: - self.logger.info( - "verify control node advance view details for control node %s " % - (ops_bgp_router_name)) - self.ui.click_monitor_control_nodes_advance( - match_index) - dom_arry = self.ui.parse_advanced_view() - dom_arry_str = self.ui.get_advanced_view_str() - dom_arry_num = self.ui.get_advanced_view_num() - dom_arry_num_new = [] - for item in dom_arry_num: - dom_arry_num_new.append( - {'key': item['key'].replace('\\', '"').replace(' ', ''), 'value': item['value']}) - dom_arry_num = dom_arry_num_new - merged_arry = dom_arry + dom_arry_str + dom_arry_num - bgp_routers_ops_data = self.ui.get_details( - bgp_routers_list_ops[n]['href']) - bgp_router_state_ops_data = bgp_routers_ops_data[ - 'BgpRouterState'] - history_del_list = [ - 'total_in_bandwidth_utilization', - 'cpu_share', - 'used_sys_mem', - 'one_min_avg_cpuload', - 'virt_mem', - 'total_out_bandwidth_utilization'] - for item in history_del_list: - if bgp_router_state_ops_data.get(item): - for element in bgp_router_state_ops_data.get(item): - if element.get('history-10'): - del element['history-10'] - if element.get('s-3600-topvals'): - del element['s-3600-topvals'] - if 'BgpRouterState' in bgp_routers_ops_data: - bgp_router_state_ops_data = bgp_routers_ops_data[ - 'BgpRouterState'] - - modified_bgp_router_state_ops_data = [] - self.ui.extract_keyvalue( - bgp_router_state_ops_data, - modified_bgp_router_state_ops_data) - complete_ops_data = modified_bgp_router_state_ops_data - for k in range(len(complete_ops_data)): - if isinstance(complete_ops_data[k]['value'], list): - for m in range(len(complete_ops_data[k]['value'])): - complete_ops_data[k]['value'][m] = str( - complete_ops_data[k]['value'][m]) - elif isinstance(complete_ops_data[k]['value'], unicode): - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - else: - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - if self.ui.match_ui_kv( - complete_ops_data, - merged_arry): - self.logger.info( - "Control node advanced view data matched") - else: - self.logger.error( - "Control node advanced view data match failed") - result = result and False - return result - # end verify_bgp_routers_ops_advance_data_in_webui - - def verify_analytics_nodes_ops_advance_data(self): - self.logger.info( - "Verifying analytics_nodes(collectors) opserver advance data on Monitor->Infra->Analytics Nodes->Details(advanced view) page......") - self.logger.debug(self.dash) - if not self.ui.click_monitor_analytics_nodes(): - result = result and False - rows = self.ui.get_rows() - analytics_nodes_list_ops = self.ui.get_collectors_list_ops() - result = True - for n in range(len(analytics_nodes_list_ops)): - ops_analytics_node_name = analytics_nodes_list_ops[n]['name'] - self.logger.info( - "Analytics node %s exists in opserver..checking if exists in webui " % - (ops_analytics_node_name)) - self.logger.info( - "Clicking on analytics nodes on Monitor->Infra->Analytics Nodes...") - if not self.ui.click_monitor_analytics_nodes(): - result = result and False - rows = self.ui.get_rows() - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i], index=0) - if obj_text == ops_analytics_node_name: - self.logger.info( - "Analytics node name %s found in webui..Verifying advance details..." % - (ops_analytics_node_name)) - match_flag = 1 - match_index = i - break - if not match_flag: - self.logger.error("Analytics node name %s not found in webui" % - (ops_analytics_node_name)) - self.logger.debug(self.dash) - else: - self.logger.info( - "Verify analytics node advance view details for analytics node-name %s " % - (ops_analytics_node_name)) - self.ui.click_monitor_analytics_nodes_advance( - match_index) - analytics_nodes_ops_data = self.ui.get_details( - analytics_nodes_list_ops[n]['href']) - dom_arry = self.ui.parse_advanced_view() - dom_arry_str = self.ui.get_advanced_view_str() - dom_arry_num = self.ui.get_advanced_view_num() - dom_arry_num_new = [] - for item in dom_arry_num: - dom_arry_num_new.append( - {'key': item['key'].replace('\\', '"').replace(' ', ''), 'value': item['value']}) - dom_arry_num = dom_arry_num_new - merged_arry = dom_arry + dom_arry_str + dom_arry_num - modified_query_perf_info_ops_data = [] - modified_module_cpu_state_ops_data = [] - modified_analytics_cpu_state_ops_data = [] - modified_collector_state_ops_data = [] - history_del_list = [ - 'opserver_mem_virt', - 'queryengine_cpu_share', - 'opserver_cpu_share', - 'collector_cpu_share', - 'collector_mem_virt', - 'queryengine_mem_virt', - 'enq_delay'] - if 'QueryPerfInfo' in analytics_nodes_ops_data: - query_perf_info_ops_data = analytics_nodes_ops_data[ - 'QueryPerfInfo'] - for item in history_del_list: - if query_perf_info_ops_data.get(item): - for element in query_perf_info_ops_data.get(item): - if element.get('history-10'): - del element['history-10'] - if element.get('s-3600-topvals'): - del element['s-3600-topvals'] - if element.get('s-3600-summary'): - del element['s-3600-summary'] - self.ui.extract_keyvalue( - query_perf_info_ops_data, - modified_query_perf_info_ops_data) - if 'ModuleCpuState' in analytics_nodes_ops_data: - module_cpu_state_ops_data = analytics_nodes_ops_data[ - 'ModuleCpuState'] - for item in history_del_list: - if module_cpu_state_ops_data.get(item): - for element in module_cpu_state_ops_data.get(item): - if element.get('history-10'): - del element['history-10'] - if element.get('s-3600-topvals'): - del element['s-3600-topvals'] - if element.get('s-3600-summary'): - del element['s-3600-summary'] - - self.ui.extract_keyvalue( - module_cpu_state_ops_data, - modified_module_cpu_state_ops_data) - if 'AnalyticsCpuState' in analytics_nodes_ops_data: - analytics_cpu_state_ops_data = analytics_nodes_ops_data[ - 'AnalyticsCpuState'] - modified_analytics_cpu_state_ops_data = [] - self.ui.extract_keyvalue( - analytics_cpu_state_ops_data, - modified_analytics_cpu_state_ops_data) - if 'CollectorState' in analytics_nodes_ops_data: - collector_state_ops_data = analytics_nodes_ops_data[ - 'CollectorState'] - self.ui.extract_keyvalue( - collector_state_ops_data, - modified_collector_state_ops_data) - complete_ops_data = modified_query_perf_info_ops_data + modified_module_cpu_state_ops_data + \ - modified_analytics_cpu_state_ops_data + \ - modified_collector_state_ops_data - for k in range(len(complete_ops_data)): - if isinstance(complete_ops_data[k]['value'], list): - for m in range(len(complete_ops_data[k]['value'])): - complete_ops_data[k]['value'][m] = str( - complete_ops_data[k]['value'][m]) - elif isinstance(complete_ops_data[k]['value'], unicode): - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - else: - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - if self.ui.match_ui_kv( - complete_ops_data, - merged_arry): - self.logger.info( - "Analytics node advance view data matched") - else: - self.logger.error( - "Analytics node match failed") - result = result and False - return result - # end verify_analytics_nodes_ops_advance_data_in_webui - - def verify_vm_ops_basic_data(self): - self.logger.info( - "Verifying instances opserver data on Monitor->Networking->Instances summary (basic view) page ..") - self.logger.debug(self.dash) - if not self.ui.click_monitor_instances(): - result = result and False - rows = self.ui.get_rows() - vm_list_ops = self.ui.get_vm_list_ops() - vmi_list_ops = self.ui.get_vmi_list_ops() - result = True - for k in range(len(vm_list_ops)): - ops_uuid = vm_list_ops[k]['name'] - vm_ops_data = self.ui.get_details( - vm_list_ops[k]['href']) - ops_data = vm_ops_data['UveVirtualMachineAgent'] - vmname = ops_data['vm_name'] - if not self.ui.click_monitor_instances(): - result = result and False - rows = self.ui.get_rows() - self.logger.info( - "Vm uuid %s exists in opserver..checking if exists in webui as well" % - (ops_uuid)) - for i in range(len(rows)): - match_flag = 0 - ui_vm_name = self.ui.find_element( - 'instance', - 'name', - browser=rows[i]).text - if ui_vm_name == vmname: - self.logger.info( - "Vm name %s matched in webui..Verifying basic view details..." % - (vmname)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - vm_name = self.ui.get_slick_cell_text(rows[i]) - break - if not match_flag: - self.logger.error( - "Vm exists in opserver but vm %s not found in webui..." % - (vmname)) - self.logger.debug(self.dash) - else: - self.ui.click_monitor_instances_basic( - match_index, - length=len(vm_list_ops)) - self.logger.info( - "Verify instances basic view details for vm %s " % - (vmname)) - dom_arry_basic = {} - ui_list = [] - item_list = self.ui.find_element( - 'item-list', - 'class', - elements=True) - for index in range(len(item_list)): - intf_dict = {} - label = self.ui.find_element( - 'label', - 'tag', - browser=item_list[index], - elements=True) - for lbl in label: - key = self.ui.find_element('key', 'class', browser=lbl) - value = self.ui.find_element( - 'value', - 'class', - browser=lbl) - intf_dict[key.text] = value.text - self.ui.extract_keyvalue(intf_dict, ui_list) - self.ui.type_change(ui_list) - - intf_dict = {} - intf_dict['CPU Utilization (%)'] = vm_ops_data['VirtualMachineStats'][ - 'cpu_stats'][0]['cpu_one_min_avg'] - intf_dict['Used Memory'] = self.ui.get_memory_string( - vm_ops_data['VirtualMachineStats']['cpu_stats'][0]['rss'], - 'KB') - intf_dict['Total Memory'] = self.ui.get_memory_string( - vm_ops_data['VirtualMachineStats']['cpu_stats'][0]['vm_memory_quota'], - 'KB') - - vn_names = None - ip_addresses = None - for k in range(len(vmi_list_ops)): - vmi_ops_data = self.ui.get_details( - vmi_list_ops[k]['href']) - ops_data_interface_list = vmi_ops_data[ - 'UveVMInterfaceAgent'] - vmname_vmi = ops_data_interface_list['vm_name'] - if vmname_vmi == vmname: - vn_name = ops_data_interface_list['virtual_network'] - vn_name = vn_name.split(':') - vnname = vn_name[2] + ' (' + vn_name[1] + ')' - vn_names = self.ui.append_to_string(vn_names, vnname, ',') - ip_addr = ops_data_interface_list['ip_address'] - ip_addresses = self.ui.append_to_string(ip_addresses, ip_addr, ',') - - ops_list = [] - intf_dict['UUID'] = ops_data['uuid'] - intf_dict['Label'] = ops_data['vrouter'] - intf_dict['Interfaces'] = len( - vm_ops_data['UveVirtualMachineAgent']['interface_list']) - intf_dict['IP Address'] = ip_addresses - intf_dict['Virtual Networks'] = vn_names - self.ui.extract_keyvalue(intf_dict, ops_list) - self.ui.type_change(ops_list) - - if self.ui.match_ui_values( - ops_list, ui_list): - self.logger.info("VM basic view data matched") - - else: - self.logger.error( - "VM basic data match failed") - result = result and False - - return result - # end verify_vm_ops_basic_data - - def verify_dashboard_details(self): - self.logger.info( - "Verifying dashboard details on Monitor->Infra->Dashboard page") - self.logger.debug(self.dash) - if not self.ui.click_monitor_dashboard(): - result = result and False - dashboard_node_details = self.browser.find_element_by_id( - 'topStats').find_elements_by_class_name('infobox-data-number') - dashboard_data_details = self.browser.find_element_by_id( - 'sparkLineStats').find_elements_by_class_name('infobox-data-number') - dashboard_system_details = self.browser.find_element_by_id( - 'system-info-stat').find_elements_by_tag_name('li') - servers_ver = self.ui.find_element( - ['system-info-stat', 'value'], ['id', 'class'], if_elements=[1]) - servers = servers_ver[0].text - version = servers_ver[2].text - logical_nodes = servers_ver[1].text - dom_data = [] - dom_data.append( - {'key': 'logical_nodes', 'value': logical_nodes}) - dom_data.append( - {'key': 'vrouters', 'value': dashboard_node_details[0].text}) - dom_data.append( - {'key': 'control_nodes', 'value': dashboard_node_details[1].text}) - dom_data.append( - {'key': 'analytics_nodes', 'value': dashboard_node_details[2].text}) - dom_data.append( - {'key': 'config_nodes', 'value': dashboard_node_details[3].text}) - dom_data.append( - {'key': 'database_nodes', 'value': dashboard_node_details[4].text}) - dom_data.append( - {'key': 'instances', 'value': dashboard_data_details[0].text}) - dom_data.append( - {'key': 'interfaces', 'value': dashboard_data_details[1].text}) - dom_data.append( - {'key': 'virtual_networks', 'value': dashboard_data_details[2].text}) - dom_data.append( - { - 'key': dashboard_system_details[0].find_element_by_class_name('key').text, - 'value': dashboard_system_details[0].find_element_by_class_name('value').text}) - dom_data.append( - { - 'key': dashboard_system_details[1].find_element_by_class_name('key').text, - 'value': dashboard_system_details[1].find_element_by_class_name('value').text}) - ops_servers = str(len(self.ui.get_config_nodes_list_ops())) - ops_version = self.ui.get_version() - self.ui.append_to_list( - dom_data, [('servers', servers), ('version', version)]) - ops_dashborad_data = [] - if not self.ui.click_configure_networks(): - result = result and False - rows = self.ui.get_rows() - vrouter_total_vm = str(len(self.ui.get_vm_list_ops())) - total_vrouters = str(len(self.ui.get_vrouters_list_ops())) - total_control_nodes = str( - len(self.ui.get_bgp_routers_list_ops())) - total_analytics_nodes = str( - len(self.ui.get_collectors_list_ops())) - total_config_nodes = str( - len(self.ui.get_config_nodes_list_ops())) - total_database_nodes = str( - len(self.ui.get_database_nodes_list_ops())) - vrouters_list_ops = self.ui.get_vrouters_list_ops() - interface_count = 0 - vrouter_total_vn = 0 - for index in range(len(vrouters_list_ops)): - vrouters_ops_data = self.ui.get_details( - vrouters_list_ops[index]['href']) - if vrouters_ops_data.get('VrouterAgent').get( - 'total_interface_count'): - interface_count = interface_count + \ - vrouters_ops_data.get('VrouterAgent').get( - 'total_interface_count') - if vrouters_ops_data.get('VrouterAgent').get('connected_networks'): - vrouter_total_vn = vrouter_total_vn + \ - (len(vrouters_ops_data.get('VrouterAgent') - .get('connected_networks'))) - lnodes = str( - int(total_control_nodes) + - int(total_analytics_nodes) + - int(total_config_nodes) + - int(total_vrouters) + - int(total_database_nodes)) - ops_dashborad_data.append({'key': 'logical_nodes', 'value': lnodes}) - ops_dashborad_data.append({'key': 'vrouters', 'value': total_vrouters}) - ops_dashborad_data.append( - {'key': 'control_nodes', 'value': total_control_nodes}) - ops_dashborad_data.append( - {'key': 'analytics_nodes', 'value': total_analytics_nodes}) - ops_dashborad_data.append( - {'key': 'config_nodes', 'value': total_config_nodes}) - ops_dashborad_data.append( - {'key': 'database_nodes', 'value': total_database_nodes}) - ops_dashborad_data.append( - {'key': 'instances', 'value': vrouter_total_vm}) - ops_dashborad_data.append( - {'key': 'interfaces', 'value': str(interface_count)}) - ops_dashborad_data.append( - {'key': 'virtual_networks', 'value': str(vrouter_total_vn)}) - self.ui.append_to_list( - ops_dashborad_data, [ - ('servers', ops_servers), ('version', ops_version)]) - result = True - if self.ui.match_ui_kv(ops_dashborad_data, dom_data): - self.logger.info("Monitor dashborad details matched") - else: - self.logger.error("Monitor dashborad details not matched") - result = result and False - return result - # end verify_dashboard_details_in_webui - - def verify_vn_ops_basic_data(self): - self.logger.info( - "Verifying vn opserver data on Monitor->Networking->Networks page(basic view)") - self.logger.debug(self.dash) - error = 0 - if not self.ui.click_monitor_networks(): - result = result and False - rows = self.ui.get_rows() - vn_list_ops = self.ui.get_vn_list_ops() - for k in range(len(vn_list_ops)): - ops_fq_name = vn_list_ops[k]['name'] - if not self.ui.click_monitor_networks(): - result = result and False - rows = self.browser.find_element_by_class_name('grid-canvas') - rows = self.ui.get_rows(rows) - self.logger.info( - "Vn fq_name %s exists in opserver..checking if exists in webui as well" % - (ops_fq_name)) - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i]) - if obj_text == ops_fq_name: - self.logger.info( - "Vn fq_name %s matched in webui..Verifying basic view details..." % - (ops_fq_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - vn_fq_name = self.ui.get_slick_cell_text(rows[i], 1) - break - if not match_flag: - self.logger.error( - "Vn fq name exists in opserver but %s not found in webui..." % - (ops_fq_name)) - self.logger.debug(self.dash) - else: - self.ui.click_monitor_networks_basic(match_index) - self.logger.info( - "Verify VN basic view details for VN fq_name %s " % - (ops_fq_name)) - # get vn basic details excluding basic interface details - dom_arry_basic = {} - item_list = self.ui.find_element( - 'item-list', - 'class', - elements=True) - for item in item_list: - label = self.ui.find_element( - 'label', - 'tag', - browser=item, - elements=True) - for lbl in label: - key = self.ui.find_element('key', 'class', browser=lbl) - value = self.ui.find_element( - 'value', - 'class', - browser=lbl) - #my_dict = {} - if key.text not in [ - 'Total Throughput', - 'Total In packets', - 'Total Out packets', - 'instances', - 'interfaces', - 'Total ACL Rules']: - dom_arry_basic[key.text] = value.text - len_dom_arry_basic = len(dom_arry_basic) - vn_ops_data = self.ui.get_details( - vn_list_ops[k]['href']) - complete_ops_data = [] - ops_data_ingress = {'key': - 'Ingress Flow Count', 'value': str(0)} - ops_data_egress = {'key': - 'Egress Flow Count', 'value': str(0)} - ops_data_acl_rules = {'key': - 'Total ACL Rules', 'value': str(0)} - vn_name = ops_fq_name.split(':')[2] - ops_data_instances = {'key': 'Instances', 'value': '0'} - ops_data_connected_networks = { - 'key': 'Connected Networks', - 'value': '-'} - ops_data_interfaces_count = { - 'key': 'Interfaces', 'value': str(0)} - if 'UveVirtualNetworkAgent' in vn_ops_data: - # creating a list of basic view items retrieved from - # opserver - ops_data_basic = vn_ops_data.get('UveVirtualNetworkAgent') - if ops_data_basic.get('ingress_flow_count'): - ops_data_ingress = { - 'key': 'Ingress Flow Count', - 'value': ops_data_basic.get('ingress_flow_count')} - if ops_data_basic.get('egress_flow_count'): - ops_data_egress = { - 'key': 'Egress Flow Count', - 'value': ops_data_basic.get('egress_flow_count')} - if ops_data_basic.get('total_acl_rules'): - ops_data_acl_rules = { - 'key': 'Total ACL Rules', - 'value': ops_data_basic.get('total_acl_rules')} - if ops_data_basic.get('interface_list'): - ops_data_interfaces_count = { - 'key': 'Interfaces', - 'value': len( - ops_data_basic.get('interface_list'))} - if ops_data_basic.get('vrf_stats_list'): - vrf_stats_list = ops_data_basic['vrf_stats_list'] - vrf_stats_list_new = [vrf['name'] - for vrf in vrf_stats_list] - vrf_list_joined = ','.join(vrf_stats_list_new) - ops_data_vrf = {'key': 'vrf_stats_list', - 'value': vrf_list_joined} - if ops_data_basic.get('acl'): - ops_data_acl = {'key': 'ACL', 'value': - ops_data_basic.get('acl')} - if ops_data_basic.get('virtualmachine_list'): - ops_data_instances = { - 'key': 'Instances', - 'value': ', '.join( - ops_data_basic.get('virtualmachine_list'))} - complete_ops_data.extend( - [ops_data_connected_networks]) - if ops_fq_name.find('__link_local__') != -1 or ops_fq_name.find( - 'default-virtual-network') != -1 or ops_fq_name.find('ip-fabric') != -1: - for i, item in enumerate(complete_ops_data): - if complete_ops_data[i]['key'] == 'vrf_stats_list': - del complete_ops_data[i] - if 'UveVirtualNetworkConfig' in vn_ops_data: - ops_data_basic = vn_ops_data.get('UveVirtualNetworkConfig') - if ops_data_basic.get('connected_networks'): - connected_networks = ops_data_basic.get( - 'connected_networks') - networks = '' - for index, net in enumerate(connected_networks): - if index == 0: - networks = networks + net - else: - networks = networks + ',' + net - ops_data_connected_networks['value'] = networks - if ops_data_basic.get('attached_policies'): - ops_data_policies = ops_data_basic.get( - 'attached_policies') - if ops_data_policies: - pol_name_list = [pol['vnp_name'] - for pol in ops_data_policies] - pol_list_joined = ', '.join(pol_name_list) - ops_data_policies = { - 'key': 'attached_policies', - 'value': pol_list_joined} - # complete_ops_data.extend([ops_data_policies]) - self.ui.type_change(complete_ops_data) - complete_ops_data.extend([ops_data_connected_networks]) - dom_list = [] - self.ui.extract_keyvalue(dom_arry_basic, dom_list) - if self.ui.match_ui_values( - complete_ops_data, - dom_list): - self.logger.info( - "VN basic view data matched in webui") - else: - self.logger.error( - "VN basic view data match failed in webui") - error = 1 - return not error - # end verify_vn_ops_basic_data_in_webui - - def verify_config_nodes_ops_advance_data(self): - self.logger.info( - "Verifying config nodes opserver data on Monitor->Infra->Config Nodes->Details(advance view) page") - self.logger.debug(self.dash) - if not self.ui.click_monitor_config_nodes(): - result = result and False - rows = self.ui.get_rows() - config_nodes_list_ops = self.ui.get_config_nodes_list_ops() - result = True - for n in range(len(config_nodes_list_ops)): - ops_config_node_name = config_nodes_list_ops[n]['name'] - self.logger.info( - "Config node host name %s exists in opserver..checking if exists in webui as well" % - (ops_config_node_name)) - if not self.ui.click_monitor_config_nodes(): - result = result and False - rows = self.ui.get_rows() - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i], index=0) - if obj_text == ops_config_node_name: - self.logger.info( - "Config node name %s found in webui..Verifying advance view details..." % - (ops_config_node_name)) - match_flag = 1 - match_index = i - break - if not match_flag: - self.logger.error( - "Config node name %s not found in webui" % - (ops_config_node_name)) - self.logger.debug(self.dash) - else: - self.logger.info( - "Verify config nodes advance view details in webui for config node-name %s " % - (ops_config_node_name)) - self.ui.click_monitor_config_nodes_advance( - match_index) - config_nodes_ops_data = self.ui.get_details( - config_nodes_list_ops[n]['href']) - dom_arry = self.ui.parse_advanced_view() - dom_arry_str = self.ui.get_advanced_view_str() - dom_arry_num = self.ui.get_advanced_view_num() - dom_arry_num_new = [] - for item in dom_arry_num: - dom_arry_num_new.append( - {'key': item['key'].replace('\\', '"').replace(' ', ''), 'value': item['value']}) - dom_arry_num = dom_arry_num_new - merged_arry = dom_arry + dom_arry_str + dom_arry_num - if 'ModuleCpuState' in config_nodes_ops_data: - ops_data = config_nodes_ops_data['ModuleCpuState'] - history_del_list = [ - 'api_server_mem_virt', - 'service_monitor_cpu_share', - 'schema_xmer_mem_virt', - 'service_monitor_mem_virt', - 'api_server_cpu_share', - 'schema_xmer_cpu_share'] - for item in history_del_list: - if ops_data.get(item): - for element in ops_data.get(item): - if element.get('history-10'): - del element['history-10'] - if element.get('s-3600-topvals'): - del element['s-3600-topvals'] - modified_ops_data = [] - self.ui.extract_keyvalue( - ops_data, modified_ops_data) - complete_ops_data = modified_ops_data - for k in range(len(complete_ops_data)): - if isinstance(complete_ops_data[k]['value'], list): - for m in range(len(complete_ops_data[k]['value'])): - complete_ops_data[k]['value'][m] = str( - complete_ops_data[k]['value'][m]) - elif isinstance(complete_ops_data[k]['value'], unicode): - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - else: - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - if self.ui.match_ui_kv( - complete_ops_data, - merged_arry): - self.logger.info( - "Config node advance view data matched in webui") - else: - self.logger.error( - "Config node advance view data match failed in webui") - result = result and False - return result - # end verify_config_nodes_ops_advance_data_in_webui - - def verify_vn_ops_advance_data(self): - self.logger.info( - "Verifying vn opserver advance data on Monitor->Networking->Networks Summary(Advanced view) page .....") - self.logger.debug(self.dash) - if not self.ui.click_monitor_networks(): - result = result and False - rows = self.ui.get_rows() - vn_list_ops = self.ui.get_vn_list_ops() - result = True - for n in range(len(vn_list_ops)): - ops_fqname = vn_list_ops[n]['name'] - self.logger.info( - "Vn fq name %s exists in opserver..checking if exists in webui as well" % - (ops_fqname)) - if not self.ui.click_monitor_networks(): - result = result and False - rows = self.browser.find_element_by_class_name('grid-canvas') - rows = self.ui.get_rows(rows) - for i in range(len(rows)): - match_flag = 0 - obj_text = self.ui.get_slick_cell_text(rows[i]) - if obj_text == ops_fqname: - self.logger.info( - "Vn fq name %s found in webui..Verifying advance view details..." % - (ops_fqname)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - break - if not match_flag: - self.logger.error( - "Vn fqname %s not found in webui" % (ops_fqname)) - self.logger.debug(self.dash) - else: - self.logger.info( - "Verify advance view details for fqname %s " % - (ops_fqname)) - self.ui.click_monitor_networks_advance(match_index) - vn_ops_data = self.ui.get_details( - vn_list_ops[n]['href']) - self.ui.expand_advance_details() - dom_arry = self.ui.parse_advanced_view() - dom_arry_str = self.ui.get_advanced_view_str() - merged_arry = dom_arry + dom_arry_str - if 'UveVirtualNetworkConfig' in vn_ops_data: - ops_data = vn_ops_data['UveVirtualNetworkConfig'] - modified_ops_data = [] - self.ui.extract_keyvalue( - ops_data, modified_ops_data) - - if 'UveVirtualNetworkAgent' in vn_ops_data: - ops_data_agent = vn_ops_data['UveVirtualNetworkAgent'] - if 'udp_sport_bitmap' in ops_data_agent: - del ops_data_agent['udp_sport_bitmap'] - if 'udp_dport_bitmap' in ops_data_agent: - del ops_data_agent['udp_dport_bitmap'] - self.logger.info( - "Verifying VN %s details: \n %s \n " % - (vn_list_ops[i]['href'], ops_data_agent)) - modified_ops_data_agent = [] - self.ui.extract_keyvalue( - ops_data_agent, modified_ops_data_agent) - complete_ops_data = modified_ops_data + \ - modified_ops_data_agent - for k in range(len(complete_ops_data)): - if isinstance(complete_ops_data[k]['value'], list): - for m in range(len(complete_ops_data[k]['value'])): - complete_ops_data[k]['value'][m] = str( - complete_ops_data[k]['value'][m]) - elif isinstance(complete_ops_data[k]['value'], unicode): - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - else: - complete_ops_data[k]['value'] = str( - complete_ops_data[k]['value']) - if self.ui.match_ui_kv( - complete_ops_data, - merged_arry): - self.logger.info( - "VN advance view data matched in webui") - else: - self.logger.error( - "VN advance view data match failed in webui") - result = result and False - return result - # end verify_vn_ops_advance_data_in_webui - - def verify_vm_ops_advance_data(self): - self.logger.info( - "Verifying instance opsserver advance data on Monitor->Networking->Instances->Instances summary(Advance view) page......") - self.logger.debug(self.dash) - if not self.ui.click_monitor_instances(): - result = result and False - rows = self.ui.get_rows() - vm_list_ops = self.ui.get_vm_list_ops() - result = True - for k in range(len(vm_list_ops)): - ops_uuid = vm_list_ops[k]['name'] - vm_ops_data = self.ui.get_details(vm_list_ops[k]['href']) - if not self.ui.click_monitor_instances(): - result = result and False - rows = self.ui.get_rows() - self.logger.info( - "Vm %s exists in opserver..checking if exists in webui as well" % - (ops_uuid)) - for i in range(len(rows)): - if not self.ui.click_monitor_instances(): - result = result and False - rows = self.ui.get_rows() - self.ui.click_element( - ('slick-cell', 0), 'class', rows[i], elements=True) - ui_list = [] - self.ui.get_item_list(ui_list) - match_flag = 0 - obj_text = ui_list[0] - if obj_text == ops_uuid: - self.logger.info( - "Vm %s matched in webui..Verifying advance view details..." % - (ops_uuid)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - break - if not match_flag: - self.logger.error( - "VM exists in opserver but uuid %s not found in webui..." % - (ops_vm_name)) - self.logger.debug(self.dash) - else: - self.ui.click_monitor_instances_advance( - match_index, - length=len(vm_list_ops)) - self.logger.info( - "Verify advance view details for uuid %s " % (ops_uuid)) - plus_objs = self.ui.find_element( - 'i.node-2.icon-plus.expander', - 'css', - elements=True) - self.ui.click(plus_objs) - dom_arry = self.ui.parse_advanced_view() - dom_arry_str = [] - dom_arry_str = self.ui.get_advanced_view_str() - merged_arry = dom_arry + dom_arry_str - vm_ops_data = self.ui.get_details( - vm_list_ops[k]['href']) - if vm_ops_data and 'UveVirtualMachineAgent' in vm_ops_data: - ops_data = vm_ops_data['UveVirtualMachineAgent'] - modified_ops_data = [] - self.ui.extract_keyvalue( - ops_data, modified_ops_data) - complete_ops_data = modified_ops_data - for t in range(len(complete_ops_data)): - if isinstance(complete_ops_data[t]['value'], list): - for m in range(len(complete_ops_data[t]['value'])): - complete_ops_data[t]['value'][m] = str( - complete_ops_data[t]['value'][m]) - elif isinstance(complete_ops_data[t]['value'], unicode): - complete_ops_data[t]['value'] = str( - complete_ops_data[t]['value']) - else: - complete_ops_data[t]['value'] = str( - complete_ops_data[t]['value']) - for element in complete_ops_data: - if element['key'] in ['interface_list']: - index = complete_ops_data.index(element) - del complete_ops_data[index] - if self.ui.match_ui_kv( - complete_ops_data, - merged_arry): - self.logger.info( - "VM advance view data matched in webui") - else: - self.logger.error( - "VM advance data match failed in webui") - result = result and False - return result - # end verify_vm_ops_advance_data_in_webui - - def verify_vn_api_data(self): - self.logger.info( - "Verifying vn api server data on Config->Networking->Networks page...") - self.logger.debug(self.dash) - result = True - vn_list_api = self.ui.get_vn_list_api() - for vns in range(len(vn_list_api['virtual-networks'])): - pol_list, pol_list1, ip_block_list, ip_block, pool_list, floating_pool, route_target_list, host_route_main = [ - [] for _ in range(8)] - api_fq = vn_list_api['virtual-networks'][vns]['fq_name'] - api_fq_name = api_fq[2] - project_name = api_fq[1] - if project_name == 'default-project': - continue - self.ui.click_configure_networks() - # if project_name == 'default-project': - # continue - self.ui.select_project(project_name) - rows = self.ui.get_rows() - skip_net_list = [ - 'ip-fabric', - 'default-virtual-network', - '__link_local__'] - if api_fq_name in skip_net_list: - continue - self.logger.info( - "Vn fq_name %s exists in api server..checking if exists in webui as well" % - (api_fq_name)) - for i in range(len(rows)): - match_flag = 0 - dom_arry_basic = [] - if rows[i].find_elements_by_tag_name( - 'div')[2].text == api_fq_name: - self.logger.info( - "Vn fq_name %s matched in webui..Verifying basic view details..." % - (api_fq_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - vn_fq_name = rows[ - i].find_elements_by_tag_name('div')[2].text - policies = rows[i].find_elements_by_tag_name( - 'div')[4].text.splitlines() - dom_arry_basic.append( - {'key': 'Attached Policies', 'value': policies}) - dom_arry_basic.append( - {'key': 'Network', 'value': rows[i].find_elements_by_tag_name('div')[2].text}) - dom_arry_basic.append({'key': 'ip_blocks_grid_row', 'value': rows[ - i].find_elements_by_tag_name('div')[3].text.split()}) - dom_arry_basic.append( - {'key': 'shared_grid_row', 'value': rows[i].find_elements_by_tag_name('div')[5].text}) - dom_arry_basic.append({'key': 'admin_state_grid_row', 'value': rows[ - i].find_elements_by_tag_name('div')[6].text}) - break - if not match_flag: - self.logger.error( - "Vn fq_name exists in apiserver but %s not found in webui..." % - (api_fq_name)) - self.logger.debug(self.dash) - else: - self.ui.click_configure_networks_basic(match_index) - rows = self.ui.get_rows(canvas=True) - self.logger.info( - "Verify basic view details for VN fq_name %s " % - (api_fq_name)) - span_obj = rows[match_index + 1] - rows_detail = span_obj.find_element_by_class_name( - 'slick-row-detail-container').find_elements_by_class_name('row-fluid') - span10_obj = self.ui.find_element( - ['slick-row-detail-container', 'span10'], ['class', 'class'], if_elements=[1], browser=span_obj) - len_span10 = len(span10_obj) - rows_elements = rows_detail[-len_span10:] - no_ipams = len(rows_detail) - len_span10 - 3 - ipam_list = [] - for ipam in range(no_ipams): - elements = rows_detail[ - ipam + - 3].find_elements_by_tag_name('div') - ipam = elements[0].text - cidr = elements[2].text - gateway = elements[3].text - dhcp = elements[5].text - alloc_pool = elements[7].text - dns = elements[6].text - ipam_list.append( - ipam + - ':' + - cidr + - ':' + - gateway + - ':' + - dhcp + - ':' + - dns + - ':' + - alloc_pool) - dom_arry_basic.append({'key': 'IP Blocks', 'value': ipam_list}) - for element in rows_elements: - span_element = element.find_elements_by_tag_name('span') - key = span_element[ - 0].find_element_by_tag_name('label').text - if key == 'Floating IP Pools': - value = span_element[1].text.split( - ': ')[1].split(' ')[0] - elif key == 'Attached Network Policies': - value = span_element[1].text.split( - ': ')[1].splitlines() - else: - value = span_element[1].text.split(': ')[1] - dom_arry_basic.append({'key': key, 'value': value}) - vn_api_data = self.ui.get_details( - vn_list_api['virtual-networks'][vns]['href']) - complete_api_data = [] - if 'virtual-network' in vn_api_data: - api_data_basic = vn_api_data.get('virtual-network') - if api_data_basic.get('name'): - complete_api_data.append( - {'key': 'Network', 'value': api_data_basic['name']}) - if 'network_policy_refs' in api_data_basic: - for ass_pol in range( - len(api_data_basic['network_policy_refs'])): - pol_list.append( - str(api_data_basic['network_policy_refs'][ass_pol]['to'][2])) - if len(pol_list) > 2: - for item in range(len(policies)): - for items in range(len(pol_list)): - if policies[item] == pol_list[items]: - pol_list1.append(pol_list[items]) - pol_string = '(' + str(len(pol_list) - 2) + ' more)' - pol_list1.append(pol_string) - else: - pol_list1 = policies - complete_api_data.append( - {'key': 'Attached Network Policies', 'value': pol_list}) - complete_api_data.append( - {'key': 'Attached Policies', 'value': pol_list1}) - if 'network_ipam_refs' in api_data_basic: - for ip in range(len(api_data_basic['network_ipam_refs'])): - dom_arry_basic.append({'key': 'Attached Policies', 'value': rows[ - i].find_elements_by_tag_name('div')[3].text.split()}) - default_net_ipam = api_data_basic[ - 'network_ipam_refs'][ip]['to'][2] - len_ipams = len( - api_data_basic['network_ipam_refs'][ip]['attr']['ipam_subnets']) - net_ipam_refs = api_data_basic['network_ipam_refs'][ip] - net_domain = net_ipam_refs['to'][0] - net_project = net_ipam_refs['to'][1] - net_ipam = net_ipam_refs['to'][2] - for ip_sub in range(len_ipams): - if default_net_ipam == 'default-network-ipam': - prefix = default_net_ipam + \ - ' (' + net_domain + ':' + net_project + ')' - prefix = prefix.strip().split('\n')[0] - else: - prefix = default_net_ipam - if 'enable_dhcp' in net_ipam_refs[ - 'attr']['ipam_subnets'][ip_sub]: - dhcp_api = net_ipam_refs['attr'][ - 'ipam_subnets'][ip_sub]['enable_dhcp'] - else: - dhcp_api = False - if dhcp_api: - dhcp_api = 'Enabled' - else: - dhcp_api = 'Disabled' - if 'dns_server_address' in net_ipam_refs[ - 'attr']['ipam_subnets'][ip_sub]: - dns_server_address = net_ipam_refs['attr'][ - 'ipam_subnets'][ip_sub]['dns_server_address'] - else: - dns_server_address = False - if dns_server_address: - dns_server_address = 'Enabled' - else: - dns_server_address = 'Disabled' - cidr_ip_prefix = net_ipam_refs['attr'][ - 'ipam_subnets'][ip_sub]['subnet']['ip_prefix'] - cidr_ip_prefix_len = str( - net_ipam_refs['attr']['ipam_subnets'][ip_sub]['subnet']['ip_prefix_len']) - cidr_default_gateway = net_ipam_refs['attr'][ - 'ipam_subnets'][ip_sub]['default_gateway'] - cidr_prefix_and_len = cidr_ip_prefix + \ - '/' + cidr_ip_prefix_len - cidr_string = cidr_prefix_and_len + \ - ':' + cidr_default_gateway - alloc_pool = net_ipam_refs['attr'][ - 'ipam_subnets'][ip_sub]['allocation_pools'] - if alloc_pool: - alloc_pool_string = alloc_pool - else: - alloc_pool_string = '' - ip_block_list.append( - prefix + - ':' + - cidr_string + - ':' + - dns_server_address + - ':' + - dhcp_api + - ':' + - alloc_pool_string) - if ip_sub in range(2): - ip_block.append(cidr_prefix_and_len) - if len(ip_block_list) > 2: - ip_string = '(' + \ - str(len(ip_block_list) - 2) + ' more)' - ip_block.append(ip_string) - complete_api_data.append( - {'key': 'IP Blocks', 'value': ip_block_list}) - complete_api_data.append( - {'key': 'ip_blocks_grid_row', 'value': ip_block}) - if 'route_target_list' in api_data_basic and api_data_basic[ - 'route_target_list']: - if 'route_target' in api_data_basic['route_target_list']: - for route in range( - len(api_data_basic['route_target_list']['route_target'])): - route_target_list.append( - str(api_data_basic['route_target_list']['route_target'][route]).strip('target:')) - complete_api_data.append( - {'key': 'Route Targets', 'value': route_target_list}) - else: - complete_api_data.append( - {'key': 'Route Targets', 'value': '-'}) - if 'floating_ip_pools' in api_data_basic: - for fip in range(len(api_data_basic['floating_ip_pools'])): - fip_api = api_data_basic[ - 'floating_ip_pools'][fip]['to'] - fip_string = fip_api[3] - floating_pool.append(fip_string) - complete_api_data.append( - {'key': 'Floating IP Pools', 'value': floating_pool}) - else: - complete_api_data.append( - {'key': 'Floating IP Pools', 'value': '-'}) - exists = ['true', True] - if api_data_basic['id_perms']['enable'] in exists: - api_admin_state = 'Up' - else: - api_admin_state = 'Down' - complete_api_data.append( - {'key': 'Admin State', 'value': api_admin_state}) - complete_api_data.append( - {'key': 'admin_state_grid_row', 'value': api_admin_state}) - if api_data_basic.get('is_shared'): - shared = 'Enabled' - else: - shared = 'Disabled' - complete_api_data.append( - {'key': 'Shared', 'value': shared}) - complete_api_data.append( - {'key': 'shared_grid_row', 'value': shared}) - if 'router_external' in api_data_basic: - if not api_data_basic.get('router_external'): - external = 'Disabled' - elif api_data_basic.get('router_external'): - external = 'Enabled' - else: - external = 'Disabled' - complete_api_data.append( - {'key': 'External', 'value': external}) - display_name = api_data_basic.get('display_name') - complete_api_data.append( - {'key': 'Display Name', 'value': display_name}) - if 'network_ipam_refs' in api_data_basic: - for ipams in range( - len(api_data_basic['network_ipam_refs'])): - if api_data_basic['network_ipam_refs'][ - ipams]['attr'].get('host_routes'): - host_route_value = api_data_basic['network_ipam_refs'][ - ipams]['attr']['host_routes']['route'] - ipam_refs_to = api_data_basic[ - 'network_ipam_refs'][ipams]['to'] - if api_data_basic['network_ipam_refs'][ - ipams]['to'][2] == 'default-network-ipam': - host_route_sub = [] - for host_route in range(len(host_route_value)): - host_route_sub.append( - str(host_route_value[host_route]['prefix'])) - host_route_string = ",".join(host_route_sub) - ipam_refs_to = api_data_basic[ - 'network_ipam_refs'][ipams]['to'] - ipam_refs_fq = ipam_refs_to[ - 0] + ':' + ipam_refs_to[1] + ':' + ipam_refs_to[2] - host_route_main.append( - ipam_refs_fq + - ' ' + - host_route_string) - else: - host_route_sub = [] - for host_route1 in range( - len(host_route_value)): - host_route_sub.append( - str(host_route_value[host_route1]['prefix'])) - host_route_string = ", ".join(host_route_sub) - host_route_main.append( - str(ipam_refs_to[2]) + ' ' + host_route_string) - if(len(host_route_main) > 0): - complete_api_data.append( - {'key': 'Host Routes', 'value': host_route_main}) - else: - complete_api_data.append( - {'key': 'Host Routes', 'value': '-'}) - - if 'virtual_network_properties' in api_data_basic: - if 'forwarding_mode' in api_data_basic[ - 'virtual_network_properties']: - forwarding_mode = api_data_basic[ - 'virtual_network_properties']['forwarding_mode'] - if forwarding_mode == 'l2': - forwarding_mode = forwarding_mode.title() + ' Only' - elif forwarding_mode == 'l2_l3': - forwarding_mode = 'L2 and L3' - else: - forwarding_mode = 'L2 and L3' - if 'virtual_network_network_id' in api_data_basic: - vnet_id = str(api_data_basic['virtual_network_network_id']) - if 'virtual_network_properties' in api_data_basic and 'vxlan_network_identifier' in api_data_basic[ - 'virtual_network_properties']: - vxlan_net_identifier = str( - api_data_basic['virtual_network_properties']['vxlan_network_identifier']) - if vxlan_net_identifier == 'None': - vxlan_net_identifier = 'Automatic' - else: - vxlan_net_identifier = 'Automatic' - vxlan_net_identifier = vxlan_net_identifier + \ - ' ( ' + vnet_id + ' )' - complete_api_data.append( - { - 'key': 'VxLAN Identifier', - 'value': vxlan_net_identifier - }) - if self.ui.match_ui_kv( - complete_api_data, - dom_arry_basic): - self.logger.info( - "VN config details matched on Config->Networking->Networks page") - else: - self.logger.error( - "VN config details not match on Config->Networking->Networks page") - result = result and False - return result - # end verify_vn_api_basic_data_in_webui - - def verify_service_template_api_basic_data(self): - self.logger.info( - "Verifying service template api server data on Config->Services->Service Templates page...") - self.logger.debug(self.dash) - result = True - service_temp_list_api = self.ui.get_service_template_list_api( - ) - for temp in range(len(service_temp_list_api['service-templates']) - 1): - interface_list = [] - api_fq_name = service_temp_list_api[ - 'service-templates'][temp + 1]['fq_name'][1] - if api_fq_name == 'analyzer-template': - continue - self.ui.click_configure_service_template() - rows = self.ui.get_rows() - self.logger.info( - "Service template fq_name %s exists in api server..checking if exists in webui as well" % - (api_fq_name)) - for i in range(len(rows)): - dom_arry_basic = [] - match_flag = 0 - j = 0 - if rows[i].find_elements_by_tag_name( - 'div')[2].text == api_fq_name: - self.logger.info( - "Service template fq_name %s matched in webui..Verifying basic view details..." % - (api_fq_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - rows_div = rows[i].find_elements_by_tag_name('div') - dom_arry_basic.append( - {'key': 'Name_grid_row', 'value': rows_div[2].text}) - dom_arry_basic.append( - {'key': 'Mode_grid_row', 'value': rows_div[3].text}) - dom_arry_basic.append( - {'key': 'Type_grid_row', 'value': rows_div[4].text}) - dom_arry_basic.append( - {'key': 'Scaling_grid_row', 'value': rows_div[5].text}) - dom_arry_basic.append( - {'key': 'Interface_grid_row', 'value': rows_div[6].text}) - dom_arry_basic.append( - {'key': 'Image_grid_row', 'value': rows_div[7].text}) - dom_arry_basic.append( - {'key': 'Flavor_grid_row', 'value': rows_div[8].text}) - break - if not match_flag: - self.logger.error( - "Service template fq_name exists in apiserver but %s not found in webui..." % - (api_fq_name)) - self.logger.debug(self.dash) - else: - self.ui.click_configure_service_template_basic( - match_index) - rows = self.ui.get_rows() - self.logger.info( - "Verify basic view details for service templatefq_name %s " % - (api_fq_name)) - rows_detail = rows[match_index + 1].find_element_by_class_name( - 'slick-row-detail-container').find_element_by_class_name('row-fluid').find_elements_by_class_name('row-fluid') - for detail in range(len(rows_detail)): - text1 = rows_detail[ - detail].find_element_by_tag_name('label').text - if text1 == 'Interface Type': - dom_arry_basic.append({'key': str(text1), 'value': rows_detail[ - detail].find_element_by_class_name('span10').text}) - else: - dom_arry_basic.append({'key': str(text1), 'value': rows_detail[ - detail].find_element_by_class_name('span10').text}) - - service_temp_api_data = self.ui.get_details( - service_temp_list_api['service-templates'][temp + 1]['href']) - complete_api_data = [] - if 'service-template' in service_temp_api_data: - api_data_basic = service_temp_api_data.get( - 'service-template') - if 'fq_name' in api_data_basic: - complete_api_data.append( - {'key': 'Template', 'value': str(api_data_basic['fq_name'][1])}) - complete_api_data.append( - {'key': 'Name_grid_row', 'value': str(api_data_basic['fq_name'][1])}) - svc_temp_properties = api_data_basic[ - 'service_template_properties'] - if 'service_mode' in svc_temp_properties: - if svc_temp_properties.get('service_mode'): - svc_mode_value = str( - svc_temp_properties['service_mode']).capitalize() - else: - svc_mode_value = '-' - complete_api_data.append( - {'key': 'Mode', 'value': svc_mode_value}) - complete_api_data.append( - {'key': 'Mode_grid_row', 'value': svc_mode_value}) - if 'service_type' in api_data_basic[ - 'service_template_properties']: - svc_type_value = str( - svc_temp_properties['service_type']).capitalize() - complete_api_data.append( - {'key': 'Type', 'value': svc_type_value}) - complete_api_data.append( - {'key': 'Type_grid_row', 'value': svc_type_value}) - if 'service_scaling' in svc_temp_properties: - if svc_temp_properties['service_scaling']: - complete_api_data.append( - { - 'key': 'Scaling', - 'value': str( - svc_temp_properties['service_scaling']).replace( - 'True', - 'Enabled')}) - complete_api_data.append( - { - 'key': 'Scaling_grid_row', - 'value': str( - svc_temp_properties['service_scaling']).replace( - 'True', - 'Enabled')}) - else: - complete_api_data.append( - { - 'key': 'Scaling', - 'value': str( - svc_temp_properties['service_scaling']).replace( - 'False', - 'Disabled')}) - complete_api_data.append( - { - 'key': 'Scaling_grid_row', - 'value': str( - svc_temp_properties['service_scaling']).replace( - 'False', - 'Disabled')}) - if 'interface_type' in svc_temp_properties: - len_svc_temp_properties = len( - svc_temp_properties['interface_type']) - for interface in range(len_svc_temp_properties): - svc_shared_ip = svc_temp_properties[ - 'interface_type'][interface]['shared_ip'] - svc_static_route_enable = svc_temp_properties[ - 'interface_type'][interface]['static_route_enable'] - if svc_shared_ip and svc_static_route_enable: - interface_type = svc_temp_properties['interface_type'][interface][ - 'service_interface_type'].title() + '(' + 'Shared IP' + ', ' + 'Static Route' + ')' - elif not svc_shared_ip and svc_static_route_enable: - interface_type = svc_temp_properties['interface_type'][interface][ - 'service_interface_type'].title() + '(' + 'Static Route' + ')' - elif svc_shared_ip and not svc_static_route_enable: - interface_type = svc_temp_properties['interface_type'][interface][ - 'service_interface_type'].title() + '(' + 'Shared IP' + ')' - else: - interface_type = svc_temp_properties['interface_type'][ - interface]['service_interface_type'].title() - interface_list.append(interface_type) - interface_string = ", ".join(interface_list) - complete_api_data.append( - {'key': 'Interface Type', 'value': interface_string}) - complete_api_data.append( - {'key': 'Interface_grid_row', 'value': interface_string}) - if 'image_name' in svc_temp_properties: - if not svc_temp_properties['image_name']: - image_value = '-' - else: - image_value = str(svc_temp_properties['image_name']) - complete_api_data.append( - {'key': 'Image', 'value': image_value}) - complete_api_data.append( - {'key': 'Image_grid_row', 'value': image_value}) - if 'service_instance_back_refs' in api_data_basic: - service_instances = api_data_basic[ - 'service_instance_back_refs'] - si_text = '' - for index, si in enumerate(service_instances): - if index == 0: - si_text = si['to'][1] + ':' + si['to'][2] - else: - si_text = si_text + ', ' + \ - si['to'][1] + ':' + si['to'][2] - complete_api_data.append( - {'key': 'Instances', 'value': si_text}) - else: - complete_api_data.append( - {'key': 'Instances', 'value': '-'}) - if 'flavor' in svc_temp_properties: - if not svc_temp_properties['flavor']: - flavor_value = '-' - else: - flavor_value = str(svc_temp_properties['flavor']) - complete_api_data.append( - {'key': 'Flavor', 'value': flavor_value}) - complete_api_data.append( - {'key': 'Flavor_grid_row', 'value': flavor_value}) - if self.ui.match_ui_kv( - complete_api_data, - dom_arry_basic): - self.logger.info( - "Service template config details matched on Config->Service Templates page") - else: - self.logger.error( - "Service template config details match failed on Config->Service Templates page") - result = result and False - return result - # end verify_service_template_api_basic_data_in_webui - - def verify_floating_ip_api_data(self): - self.logger.info( - "Verifying fip api server data on Config->Networking->Manage Floating IPs page...") - self.logger.info(self.dash) - result = True - fip_list_api = self.ui.get_fip_list_api() - for fips in range(len(fip_list_api['floating-ips'])): - api_fq_id = fip_list_api['floating-ips'][fips]['uuid'] - self.ui.click_configure_fip() - project_name = fip_list_api.get( - 'floating-ips')[fips].get('fq_name')[1] - if project_name == 'default-project': - continue - self.ui.select_project(project_name) - rows = self.ui.get_rows() - self.logger.info( - "fip fq_id %s exists in api server..checking if exists in webui as well" % - (api_fq_id)) - for i in range(len(rows)): - match_flag = 0 - j = 0 - if rows[i].find_elements_by_tag_name( - 'div')[4].text == api_fq_id: - self.logger.info( - "fip fq_id %s matched in webui..Verifying basic view details now" % - (api_fq_id)) - self.logger.info(self.dash) - match_index = i - match_flag = 1 - dom_arry_basic = [] - dom_arry_basic.append( - {'key': 'IP Address', 'value': rows[i].find_elements_by_tag_name('div')[1].text}) - dom_arry_basic.append( - {'key': 'Instance', 'value': rows[i].find_elements_by_tag_name('div')[2].text}) - dom_arry_basic.append({'key': 'Floating IP and Pool', 'value': rows[ - i].find_elements_by_tag_name('div')[3].text}) - dom_arry_basic.append( - {'key': 'UUID', 'value': rows[i].find_elements_by_tag_name('div')[4].text}) - break - if not match_flag: - self.logger.error( - "fip fq_id exists in apiserver but %s not found in webui..." % - (api_fq_id)) - self.logger.info(self.dash) - else: - fip_api_data = self.ui.get_details( - fip_list_api['floating-ips'][fips]['href']) - complete_api_data = [] - if 'floating-ip' in fip_api_data: - # creating a list of basic view items retrieved from - # opserver - api_data_basic = fip_api_data.get('floating-ip') - if api_data_basic.get('floating_ip_address'): - complete_api_data.append( - {'key': 'IP Address', 'value': api_data_basic['floating_ip_address']}) - if api_data_basic.get('virtual_machine_interface_refs'): - vm_api_data = self.ui.get_details( - api_data_basic['virtual_machine_interface_refs'][0]['href']) - if 'virtual-machine-interface' in vm_api_data: - if vm_api_data[ - 'virtual-machine-interface'].get('virtual_machine_refs'): - complete_api_data.append({'key': 'Instance', 'value': vm_api_data[ - 'virtual-machine-interface']['virtual_machine_refs'][0]['to']}) - else: - complete_api_data.append( - {'key': 'Instance', 'value': '-'}) - if api_data_basic.get('fq_name'): - complete_api_data.append( - { - 'key': 'Floating IP and Pool', - 'value': api_data_basic['fq_name'][2] + - ':' + - api_data_basic['fq_name'][3]}) - if api_data_basic.get('fq_name'): - complete_api_data.append( - {'key': 'UUID', 'value': api_data_basic['fq_name'][4]}) - if self.ui.match_ui_kv( - complete_api_data, - dom_arry_basic): - self.logger.info( - "FIP config data matched on Config->Networking->Manage Floating IPs page") - else: - self.logger.error( - "FIP config data match failed on Config->Networking->Manage Floating IPs page") - result = False - return result - # end verify_floating_ip_api_data_in_webui - - def verify_policy_api_data(self): - self.logger.info( - "Verifying policy api server data on Config->Networking->Policies page ...") - self.logger.debug(self.dash) - result = True - policy_list_api = self.ui.get_policy_list_api() - for policy in range(len(policy_list_api['network-policys']) - 1): - pol_list = [] - net_list = [] - service_list = [] - api_fq_name = policy_list_api[ - 'network-policys'][policy]['fq_name'][2] - project_name = policy_list_api[ - 'network-policys'][policy]['fq_name'][1] - self.ui.click_configure_policies() - if project_name == 'default-project': - continue - self.ui.select_project(project_name) - rows = self.ui.get_rows() - self.logger.info( - "Policy fq_name %s exists in api server..checking if exists in webui as well" % - (api_fq_name)) - for i in range(len(rows)): - dom_arry_basic = [] - match_flag = 0 - detail = 0 - if rows[i].find_elements_by_tag_name( - 'div')[2].text == api_fq_name: - self.logger.info( - "Policy fq_name %s matched in webui..Verifying basic view details..." % - (api_fq_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - dom_arry_basic.append( - {'key': 'Policy', 'value': rows[i].find_elements_by_tag_name('div')[2].text}) - net_grid_row_value = rows[i].find_elements_by_tag_name( - 'div')[3].text.splitlines() - dom_arry_basic.append( - {'key': 'Associated_Networks_grid_row', 'value': net_grid_row_value}) - dom_arry_basic.append({'key': 'Rules_grid_row', 'value': rows[ - i].find_elements_by_tag_name('div')[4].text.splitlines()}) - break - if not match_flag: - self.logger.error( - "Policy fq name exists in apiserver but %s not found in webui..." % - (api_fq_name)) - self.logger.debug(self.dash) - else: - self.ui.click_configure_policies_basic(match_index) - rows = self.ui.get_rows() - self.logger.info( - "Verify basic view details for policy fq_name %s " % - (api_fq_name)) - rows_detail = rows[match_index + 1].find_element_by_class_name( - 'slick-row-detail-container').find_element_by_class_name('row-fluid').find_elements_by_class_name('row-fluid') - while(detail < len(rows_detail)): - text1 = rows_detail[ - detail].find_element_by_tag_name('label').text - if text1 == 'Associated Networks': - dom_arry_basic.append({'key': str(text1), 'value': rows_detail[ - detail].find_element_by_class_name('span11').text.split()}) - elif text1 == 'Rules': - dom_arry_basic.append({'key': str(text1), 'value': rows_detail[ - detail].find_element_by_class_name('span11').text.splitlines()}) - detail = detail + 2 - policy_api_data = self.ui.get_details( - policy_list_api['network-policys'][policy]['href']) - complete_api_data = [] - if 'network-policy' in policy_api_data: - api_data_basic = policy_api_data.get('network-policy') - if 'fq_name' in api_data_basic: - complete_api_data.append( - {'key': 'Policy', 'value': api_data_basic['fq_name'][2]}) - if 'virtual_network_back_refs' in api_data_basic: - for net in range( - len(api_data_basic['virtual_network_back_refs'])): - api_project = api_data_basic[ - 'virtual_network_back_refs'][net]['to'][1] - if project_name == api_project: - fq = api_data_basic[ - 'virtual_network_back_refs'][net]['to'][2] - else: - fq = ':'.join( - api_data_basic['virtual_network_back_refs'][net]['to']) - net_list.append(fq) - complete_api_data.append( - {'key': 'Associated Networks', 'value': net_list}) - net_list_len = len(net_list) - if net_list_len > 2: - net_list_grid_row = net_list[:2] - more_string = '(' + str(net_list_len - 2) + ' more)' - net_list_grid_row.append(more_string) - complete_api_data.append( - {'key': 'Associated_Networks_grid_row', 'value': net_list_grid_row}) - else: - complete_api_data.append( - {'key': 'Associated_Networks_grid_row', 'value': net_list}) - if 'network_policy_entries' in api_data_basic: - for rules in range( - len(api_data_basic['network_policy_entries']['policy_rule'])): - dst_ports = api_data_basic['network_policy_entries'][ - 'policy_rule'][rules]['dst_ports'] - src_ports = api_data_basic['network_policy_entries'][ - 'policy_rule'][rules]['src_ports'] - source_port = [] - desti_port = [] - if dst_ports[0]['start_port'] == -1: - desti_port = 'any' - else: - for item in dst_ports: - if item['start_port'] == item['end_port']: - desti_port.append(item['start_port']) - else: - port_range = str(item['start_port']) + \ - '-' + \ - str(item['end_port']) - desti_port.append(port_range) - if isinstance(desti_port, list): - desti_port = str(desti_port) - desti_port = '[ ' + desti_port[1:-1] + ' ]' - - if src_ports[0]['start_port'] == -1: - source_port = 'any' - else: - for item in src_ports: - if item['start_port'] == item['end_port']: - source_port.append(item['start_port']) - else: - port_range = str(item['start_port']) + \ - '-' + \ - str(item['end_port']) - source_port.append(port_range) - if isinstance(source_port, list): - source_port = str(source_port) - source_port = '[ ' + source_port[1:-1] + ' ]' - - api_src_vnet = api_data_basic['network_policy_entries'][ - 'policy_rule'][rules]['src_addresses'][0]['virtual_network'] - api_dst_vnet = api_data_basic['network_policy_entries'][ - 'policy_rule'][rules]['dst_addresses'][0]['virtual_network'] - api_vnet_match_list = [ - 'default-domain:default-project:default-virtual-network', - 'any', - 'default-domain:default-project:__link_local__', - 'default-domain:default-project:ip-fabric'] - if api_src_vnet: - if api_src_vnet in api_vnet_match_list: - source_network = api_src_vnet - else: - src_vnet_split = api_src_vnet.split(':') - if project_name == src_vnet_split[1]: - source_network = src_vnet_split[2] - else: - source_network = src_vnet_split[ - 2] + ' (' + src_vnet_split[0] + ':' + src_vnet_split[1] + ')' - else: - api_src_vnet = '' - if api_dst_vnet: - if api_dst_vnet in api_vnet_match_list: - dest_network = api_dst_vnet - else: - dst_vnet_split = api_dst_vnet.split(':') - if project_name == dst_vnet_split[1]: - dest_network = dst_vnet_split[2] - else: - dest_network = dst_vnet_split[ - 2] + ' (' + dst_vnet_split[0] + ':' + dst_vnet_split[1] + ')' - else: - api_dst_vnet = '' - action_list = api_data_basic['network_policy_entries'][ - 'policy_rule'][rules]['action_list'] - protocol = api_data_basic['network_policy_entries'][ - 'policy_rule'][rules]['protocol'] - direction = api_data_basic['network_policy_entries'][ - 'policy_rule'][rules]['direction'] - if action_list.get('apply_service'): - for service in range( - len(action_list['apply_service'])): - service_list.append( - action_list['apply_service'][service]) - service_string = ",".join(service_list) - policy_text = 'protocol' + ' ' + protocol + ' ' + 'network' + ' ' + source_network + ' ' + 'ports' + ' ' + source_port + ' ' + \ - direction + ' ' + 'network' + ' ' + dest_network + ' ' + 'ports' + \ - ' ' + desti_port + ' ' + \ - 'apply_service' + ' ' + service_string - pol_list.append(policy_text) - else: - - policy_text = action_list['simple_action'] + ' ' + 'protocol' + ' ' + protocol + ' ' + 'network' + ' ' + source_network + \ - ' ' + 'ports' + ' ' + source_port + ' ' + direction + ' ' + \ - 'network' + ' ' + dest_network + \ - ' ' + 'ports' + ' ' + desti_port - pol_list.append(policy_text) - complete_api_data.append( - {'key': 'Rules', 'value': pol_list}) - if len(pol_list) > 2: - more_count = len(pol_list) - 2 - pol_list_grid_row = pol_list[:2] - more_text = '(' + str(more_count) + ' more)' - pol_list_grid_row.append(more_text) - else: - pol_list_grid_row = pol_list - complete_api_data.append( - {'key': 'Rules_grid_row', 'value': pol_list_grid_row}) - if self.ui.match_ui_kv( - complete_api_data, - dom_arry_basic): - self.logger.info( - "Policy config details matched on Config->Networking->Policies page") - else: - self.logger.error( - "Policy config details match failed on Config->Networking->Policies page") - result = result and False - return result - # end verify_policy_api_basic_data_in_webui - - def verify_ipam_api_data(self): - self.logger.info( - "Verifying ipam config data on Config->Networking->IPAMs page") - self.logger.debug(self.dash) - result = True - ipam_list_api = self.ui.get_ipam_list_api() - for ipam in range(len(ipam_list_api['network-ipams'])): - net_list = [] - api_fq_name = ipam_list_api['network-ipams'][ipam]['fq_name'][2] - project_name = ipam_list_api['network-ipams'][ipam]['fq_name'][1] - if project_name == 'default-project': - continue - self.ui.click_configure_ipam() - self.ui.select_project(project_name) - rows = self.ui.get_rows() - self.logger.info( - "Ipam fq_name %s exists in api server..checking if exists in webui as well" % - (api_fq_name)) - for i in range(len(rows)): - match_flag = 0 - j = 0 - dom_arry_basic = [] - if rows[i].find_elements_by_tag_name( - 'div')[2].text == api_fq_name: - self.logger.info( - "Ipam fq name %s matched in webui..Verifying basic view details..." % - (api_fq_name)) - self.logger.debug(self.dash) - match_index = i - match_flag = 1 - ipam_fq_name = rows[ - i].find_elements_by_tag_name('div')[2].text - dom_arry_basic.append( - {'key': 'Name_grid_row', 'value': rows[i].find_elements_by_tag_name('div')[2].text}) - ip_grid_row_value = ' '.join( - rows[i].find_elements_by_tag_name('div')[3].text.splitlines()) - dom_arry_basic.append( - {'key': 'IP_grid_row', 'value': ip_grid_row_value}) - dom_arry_basic.append( - {'key': 'DNS_grid_row', 'value': rows[i].find_elements_by_tag_name('div')[4].text}) - dom_arry_basic.append( - {'key': 'NTP_grid_row', 'value': rows[i].find_elements_by_tag_name('div')[5].text}) - break - if not match_flag: - self.logger.error( - "Ipam fq_name exists in apiserver but %s not found in webui..." % - (api_fq_name)) - self.logger.debug(self.dash) - else: - self.ui.click_configure_ipam_basic(match_index) - rows = self.ui.get_rows() - self.logger.info( - "Verify basic view details for ipam fq_name %s " % - (api_fq_name)) - rows_detail = rows[match_index + 1].find_element_by_class_name( - 'slick-row-detail-container').find_element_by_class_name('row-fluid').find_elements_by_class_name('row-fluid') - for detail in range(len(rows_detail)): - text1 = rows_detail[ - detail].find_element_by_tag_name('label').text - if text1 == 'IP Blocks': - dom_arry_basic.append({'key': str(text1), 'value': rows_detail[ - detail].find_element_by_class_name('span10').text}) - else: - dom_arry_basic.append({'key': str(text1), 'value': rows_detail[ - detail].find_element_by_class_name('span10').text}) - - ipam_api_data = self.ui.get_details( - ipam_list_api['network-ipams'][ipam]['href']) - complete_api_data = [] - if 'network-ipam' in ipam_api_data: - api_data_basic = ipam_api_data.get('network-ipam') - if 'fq_name' in api_data_basic: - complete_api_data.append( - {'key': 'IPAM Name', 'value': str(api_data_basic['fq_name'][2])}) - complete_api_data.append( - {'key': 'Name_grid_row', 'value': str(api_data_basic['fq_name'][2])}) - if api_data_basic.get('network_ipam_mgmt'): - if api_data_basic['network_ipam_mgmt'].get( - 'ipam_dns_method'): - if api_data_basic['network_ipam_mgmt'][ - 'ipam_dns_method'] == 'default-dns-server': - complete_api_data.append( - {'key': 'DNS Server', 'value': '-'}) - complete_api_data.append( - {'key': 'DNS_grid_row', 'value': '-'}) - elif api_data_basic['network_ipam_mgmt']['ipam_dns_method'] == 'none': - complete_api_data.append( - {'key': 'DNS Server', 'value': 'DNS Mode : None'}) - complete_api_data.append( - {'key': 'DNS_grid_row', 'value': 'DNS Mode : None'}) - elif api_data_basic['network_ipam_mgmt']['ipam_dns_method'] == 'virtual-dns-server': - complete_api_data.append( - { - 'key': 'DNS Server', - 'value': 'Virtual DNS:' + - ' ' + - api_data_basic['network_ipam_mgmt']['ipam_dns_server']['virtual_dns_server_name']}) - complete_api_data.append( - { - 'key': 'DNS_grid_row', - 'value': 'Virtual DNS:' + - ' ' + - api_data_basic['network_ipam_mgmt']['ipam_dns_server']['virtual_dns_server_name']}) - elif api_data_basic['network_ipam_mgmt']['ipam_dns_method'] == 'tenant-dns-server': - dns_server_value = str( - api_data_basic['network_ipam_mgmt']['ipam_dns_method']).split('-')[0].title() + ' ' + 'Managed' + ' ' + 'DNS' + ':' + ' ' + str( - api_data_basic['network_ipam_mgmt']['ipam_dns_server']['tenant_dns_server_address']['ip_address'][0]) - complete_api_data.append( - {'key': 'DNS Server', 'value': dns_server_value}) - complete_api_data.append( - {'key': 'DNS_grid_row', 'value': dns_server_value}) - else: - complete_api_data.append( - {'key': 'DNS Server', 'value': '-'}) - complete_api_data.append( - {'key': 'DNS_grid_row', 'value': '-'}) - if api_data_basic.get('network_ipam_mgmt'): - if api_data_basic['network_ipam_mgmt'].get( - 'dhcp_option_list'): - if api_data_basic['network_ipam_mgmt'][ - 'dhcp_option_list'].get('dhcp_option'): - if len( - api_data_basic['network_ipam_mgmt']['dhcp_option_list']['dhcp_option']) > 1: - ntp_server_value = str(api_data_basic['network_ipam_mgmt']['dhcp_option_list'][ - 'dhcp_option'][0]['dhcp_option_value']) - complete_api_data.append({'key': 'Domain Name', 'value': str(api_data_basic[ - 'network_ipam_mgmt']['dhcp_option_list']['dhcp_option'][1]['dhcp_option_value'])}) - complete_api_data.append( - {'key': 'NTP Server', 'value': ntp_server_value}) - complete_api_data.append( - {'key': 'NTP_grid_row', 'value': ntp_server_value}) - - elif api_data_basic['network_ipam_mgmt']['dhcp_option_list']['dhcp_option'][0]['dhcp_option_name'] == '4': - ntp_server_value = str(api_data_basic['network_ipam_mgmt']['dhcp_option_list'][ - 'dhcp_option'][0]['dhcp_option_value']) - complete_api_data.append( - {'key': 'NTP Server', 'value': ntp_server_value}) - complete_api_data.append( - {'key': 'NTP_grid_row', 'value': ntp_server_value}) - elif api_data_basic['network_ipam_mgmt']['dhcp_option_list']['dhcp_option'][0]['dhcp_option_name'] == '15': - complete_api_data.append({'key': 'Domain Name', 'value': str(api_data_basic[ - 'network_ipam_mgmt']['dhcp_option_list']['dhcp_option'][0]['dhcp_option_value'])}) - else: - complete_api_data.append( - {'key': 'NTP Server', 'value': '-'}) - complete_api_data.append( - {'key': 'NTP_grid_row', 'value': '-'}) - complete_api_data.append( - {'key': 'Domain Name', 'value': '-'}) - else: - complete_api_data.append( - {'key': 'NTP Server', 'value': '-'}) - complete_api_data.append( - {'key': 'NTP_grid_row', 'value': '-'}) - complete_api_data.append( - {'key': 'Domain Name', 'value': '-'}) - if 'virtual_network_back_refs' in api_data_basic: - for net in range( - len(api_data_basic['virtual_network_back_refs'])): - for ip_sub in range( - len(api_data_basic['virtual_network_back_refs'][net]['attr']['ipam_subnets'])): - api_project = api_data_basic[ - 'virtual_network_back_refs'][net]['to'][1] - if project_name == api_project: - fq = str( - api_data_basic['virtual_network_back_refs'][net]['to'][2]) - else: - fq = ':'.join( - api_data_basic['virtual_network_back_refs'][net]['to']) - ip_prefix = str(api_data_basic['virtual_network_back_refs'][net][ - 'attr']['ipam_subnets'][ip_sub]['subnet']['ip_prefix']) - ip_prefix_len = str(api_data_basic['virtual_network_back_refs'][net]['attr'][ - 'ipam_subnets'][ip_sub]['subnet']['ip_prefix_len']) - default_gateway = str(api_data_basic['virtual_network_back_refs'][net][ - 'attr']['ipam_subnets'][ip_sub]['default_gateway']) - net_list.append( - fq + - ' - ' + - ip_prefix + - '/' + - ip_prefix_len + - '(' + - default_gateway + - ')') - net_string = ' '.join(net_list) - complete_api_data.append( - {'key': 'IP Blocks', 'value': net_string}) - if len(net_list) > 2: - net_string_grid_row = ' '.join( - net_list[:2]) + ' (' + str(len(net_list) - 2) + ' more)' - else: - net_string_grid_row = net_string - complete_api_data.append( - {'key': 'IP_grid_row', 'value': net_string_grid_row}) - if self.ui.match_ui_kv( - complete_api_data, - dom_arry_basic): - self.logger.info( - "Ipam config data matched on Config->Networking->IPAM") - else: - self.logger.error( - "Ipam config data match failed on Config->Networking->IPAM") - result = result and False - return result - # end verify_ipam_api_data_in_webui - - def verify_vm_ops_data_in_webui(self, fixture): - self.logger.info( - "Verifying vn %s opserver data on Monitor->Networking->Instances page" % - (fixture.vn_name)) - vm_list = self.ui.get_vm_list_ops() - - if not self.ui.click_monitor_instances(): - result = result and False - rows = self.ui.get_rows() - if len(rows) != len(vm_list): - self.logger.error(" VM count not matched in webui") - else: - self.logger.info(" VM count matched in webui") - for i in range(len(vm_list)): - vm_name = vm_list[i]['name'] - # end verify_vm_ops_data_in_webui - - def verify_vn_ops_data_in_webui(self, fixture): - vn_list = self.ui.get_vn_list_ops(fixture) - self.logger.info( - "VN details for %s got from ops server and Verifying in webui : " % - (vn_list)) - if not self.ui.click_configure_networks(): - result = result and False - rows = self.ui.get_rows() - ln = len(vn_list) - for i in range(ln): - vn_name = vn_list[i]['name'] - details = self.ui.get_vn_details(vn_list[i]['href']) - UveVirtualNetworkConfig - if 'UveVirtualNetwokConfig' in details: - total_acl_rules_ops - if 'UveVirtualNetworkAgent' in details: - UveVirtualNetworkAgent_dict = details['UveVirtualNetworkAgent'] - egress_flow_count_api = details[ - 'UveVirtualNetworkAgent']['egress_flow_count'] - ingress_flow_count_api = details[ - 'UveVirtualNetworkAgent']['ingress_flow_count'] - interface_list_count_api = len( - details['UveVirtualNetworkAgent']['interface_list_count']) - total_acl_rules_count = details[ - 'UveVirtualNetworkAgent']['total_acl_rules'] - if self.ui.check_element_exists_by_xpath( - row[j + 1], "//label[contains(text(), 'Ingress Flows')]"): - for n in range(floating_ip_length_api): - fip_api = details[ - 'virtual-network']['floating_ip_pools'][n]['to'] - if fip_ui[n] == fip_api[3] + \ - ' (' + fip_api[0] + ':' + fip_api[1] + ')': - self.logger.info(" Fip matched ") - if not self.ui.click_monitor_networks(): - result = result and False - for j in range(len(rows)): - rows = self.browser.find_element_by_class_name( - 'k-grid-content').find_element_by_tag_name('tbody').find_elements_by_tag_name('tr') - fq_name = rows[j].find_elements_by_tag_name('a')[1].text - if(fq_name == vn_list[i]['name']): - self.logger.info(" %s VN verified in monitor page " % - (fq_name)) - rows[j].find_elements_by_tag_name( - 'td')[0].find_element_by_tag_name('a').click() - rows = self.ui.get_rows() - expanded_row = rows[ - j + - 1].find_element_by_class_name('inline row-fluid position-relative pull-right margin-0-5') - expanded_row.find_element_by_class_name( - 'icon-cog icon-only bigger-110').click() - expanded_row.find_elements_by_tag_name('a')[1].click() - basicdetails_ui_data = rows[ - j + - 1].find_element_by_xpath("//*[contains(@id, 'basicDetails')]").find_elements_by_class_name("row-fluid") - ingress_ui = basicdetails_ui_data[0].text.split('\n')[1] - egress_ui = basicdetails_ui_data[1].text.split('\n')[1] - acl_ui = basicdetails_ui_data[2].text.split('\n')[1] - intf_ui = basicdetails_ui_data[3].text.split('\n')[1] - vrf_ui = basicdetails_ui_data[4].text.split('\n')[1] - break - else: - self.logger.error(" %s VN not found in monitor page " % - (fq_name)) - details = self.ui.get_vn_details_api(vn_list[i]['href']) - j = 0 - for j in range(len(rows)): - if not self.ui.click_monitor_networks(): - result = result and False - rows = self.browser.find_element_by_class_name( - 'k-grid-content').find_element_by_tag_name('tbody').find_elements_by_tag_name('tr') - if (rows[j].find_elements_by_tag_name('td')[2].get_attribute( - 'innerHTML') == details['virtual-network']['fq_name'][2]): - if rows[j].find_elements_by_tag_name( - 'td')[4].text == ip_block: - self.logger.info("Ip blocks verified ") - rows[j].find_elements_by_tag_name( - 'td')[0].find_element_by_tag_name('a').click() - rows = self.ui.get_rows() - ui_ip_block = rows[ - j + - 1].find_element_by_class_name('span11').text.split('\n')[1] - if (ui_ip_block.split(' ')[0] == ':'.join(details['virtual-network']['network_ipam_refs'][0]['to']) and ui_ip_block.split(' ')[ - 1] == ip_block and ui_ip_block.split(' ')[2] == details['virtual-network']['network_ipam_refs'][0]['attr']['ipam_subnets'][0]['default_gateway']): - self.logger.info( - "Subnets details matched") - else: - self.logger.error("Ip block not matched") - forwarding_mode = rows[ - j + - 1].find_elements_by_class_name('span2')[0].text.split('\n')[1] - vxlan = rows[ - j + - 1].find_elements_by_class_name('span2')[1].text.split('\n')[1] - network_dict = {'l2_l3': 'L2 and L3'} - if network_dict[details[ - 'virtual-network']['virtual_network_properties']['forwarding_mode']] == forwarding_mode: - self.logger.info(" Forwarding mode matched ") - else: - self.logger.error("Forwarding mode not matched ") - if details[ - 'virtual-network']['virtual_network_properties']['vxlan_network_identifier'] is None: - vxlan_api = 'Automatic' - else: - vxlan_api = details[ - 'virtual-network']['virtual_network_properties']['vxlan_network_identifier'] - if vxlan_api == vxlan: - self.logger.info(" Vxlan matched ") - else: - self.logger.info(" Vxlan not matched ") - rows[j].find_elements_by_tag_name( - 'td')[0].find_element_by_tag_name('a').click() - break - elif (j == range(len(rows))): - self.logger.info( - "Vn name %s : %s is not matched in webui " % - (fixture.vn_name, details['virtual-network']['fq_name'][2])) - # end verify_vn_ops_data_in_webui - - def verify_vn_in_webui(self, fixture): - self.ui.screenshot('vm_verify') - if not self.ui.click_configure_networks(): - result = result and False - time.sleep(2) - rows = self.ui.get_rows() - ln = len(rows) - vn_flag = 0 - for i in range(len(rows)): - if (rows[i].find_elements_by_tag_name('div')[2].get_attribute('innerHTML') == fixture.vn_name and rows[ - i].find_elements_by_tag_name('div')[4].text == fixture.vn_subnets[0]): - vn_flag = 1 - rows[i].find_elements_by_tag_name( - 'div')[0].find_element_by_tag_name('i').click() - rows = self.ui.get_rows() - ip_blocks = rows[ - i + - 1].find_element_by_class_name('span11').text.split('\n')[1] - if (ip_blocks.split(' ')[0] == ':'.join( - fixture.ipam_fq_name) and ip_blocks.split(' ')[1] == fixture.vn_subnets[0]): - self.logger.info( - "Vn name %s and ip block %s verified in configure page " % - (fixture.vn_name, fixture.vn_subnets)) - else: - self.logger.error( - "Ip block details failed to verify in configure page %s " % - (fixture.vn_subnets)) - self.ui.screenshot('Verify_vn_configure_page_ip_block') - vn_flag = 0 - break - if not self.ui.click_monitor_networks(): - result = result and False - time.sleep(3) - rows = self.ui.get_rows() - vn_entry_flag = 0 - for i in range(len(rows)): - fq_name = rows[i].find_elements_by_tag_name( - 'div')[1].find_element_by_tag_name('div').text - if(fq_name == fixture.ipam_fq_name[0] + ":" + fixture.project_name + ":" + fixture.vn_name): - self.logger.info(" %s VN verified in monitor page " % - (fq_name)) - vn_entry_flag = 1 - break - if not vn_entry_flag: - self.logger.error("VN %s Verification failed in monitor page" % - (fixture.vn_name)) - self.ui.screenshot('verify_vn_monitor_page') - if vn_entry_flag: - self.logger.info( - " VN %s and subnet verified on config/monitor pages" % - (fixture.vn_name)) - # if self.ui.verify_uuid_table(fixture.vn_id): - # self.logger.info( "VN %s UUID verified in webui table " %(fixture.vn_name)) - # else: - # self.logger.error( "VN %s UUID Verification failed in webui table " %(fixture.vn_name)) - # self.browser.get_screenshot_as_file('verify_vn_configure_page_ip_block.png') - fixture.obj = fixture.quantum_h.get_vn_obj_if_present( - fixture.vn_name, fixture.project_id) - fq_type = 'virtual_network' - full_fq_name = fixture.vn_fq_name + ':' + fixture.vn_id - # if self.ui.verify_fq_name_table(full_fq_name, fq_type): - # self.logger.info( "fq_name %s found in fq Table for %s VN" %(fixture.vn_fq_name,fixture.vn_name)) - # else: - # self.logger.error( "fq_name %s failed in fq Table for %s VN" %(fixture.vn_fq_name,fixture.vn_name)) - # self.browser.get_screenshot_as_file('setting_page_configure_fq_name_error.png') - return True - # end verify_vn_in_webui - - def delete_policy(self, fixture): - if not self.ui.delete_element(fixture, 'policy_delete'): - self.logger.info("Policy deletion failed") - return False - return True - # end delete_policy_in_webui - - def delete_svc_instance(self, fixture): - self.ui.delete_element(fixture, 'svc_instance_delete') - time.sleep(25) - # end svc_instance_delete - - def delete_svc_template(self, fixture): - self.ui.delete_element(fixture, 'svc_template_delete') - # end svc_template_delete - - def delete_vn(self, fixture): - self._delete_port(fixture) - self._delete_router(fixture) - if not self.ui.delete_element(fixture, 'vn_delete'): - self.logger.info("Vn deletion failed") - return False - return True - # end vn_delete - - def _delete_router(self, fixture): - self.ui.delete_element(fixture, 'router_delete') - - def _delete_port(self, fixture): - self.ui.delete_element(fixture, 'port_delete') - - def delete_ipam(self, fixture): - if not self.ui.click_configure_ipam(): - result = result and False - rows = self.ui.get_rows() - for ipam in range(len(rows)): - tdArry = rows[ipam].find_elements_by_class_name('slick-cell') - if (len(tdArry) > 2): - if (tdArry[2].text == fixture.name): - tdArry[1].find_element_by_tag_name('input').click() - self.browser.find_element_by_id( - 'btnDeleteIpam').find_element_by_tag_name('i').click() - self.browser.find_element_by_id( - 'btnCnfRemoveMainPopupOK').click() - if not self.ui.check_error_msg("Delete ipam"): - raise Exception("Ipam deletion failed") - break - self.ui.wait_till_ajax_done(self.browser) - self.logger.info( - "%s got deleted using contrail-webui" % (fixture.name)) - break - # end ipam_delete - - def cleanup(self): - self.detach_ipam_from_dns_server() - return True - # end cleanup - - def delete_dns_server_and_record(self): - self.detach_ipam_from_dns_server() - self.delete_dns_record() - self.dns_server() - - def delete_dns_server(self): - self.ui.delete_element('dns_server_delete') - - def delete_dns_record(self): - self.ui.delete_element('dns_record_delete') - - def detach_ipam_from_dns_server(self): - self.logger.info( - "Detaching ipams from dns servers...") - result = True - try: - if not self.ui.click_configure_dns_servers(): - result = result and False - rows = self.ui.get_rows(canvas=True) - for index in range(len(rows)): - self.ui.click_element('icon-cog', 'class', browser=rows[index]) - self.ui.click_element('tooltip-success', 'class') - try: - ipams = self.ui.find_element( - ['s2id_msIPams', 'select2-search-choice-close'], ['id', 'class'], if_elements=[1]) - except: - ipams = None - pass - for ipam in ipams: - ipam.click() - if not self.ui.click_on_create('DNSServer', save=True): - result = result and False - self.ui.check_error_msg("Detach ipams") - except WebDriverException: - if len(rows): - result = result and False - self.logger.warning("ipam detach from router failed") - return result - # end detach_ipam_from_dns_server - - def service_template_delete_in_webui(self, fixture): - if not self.ui.click_configure_service_template(): - result = result and False - rows = self.ui.get_rows() - for temp in range(len(rows)): - tdArry = rows[temp].find_elements_by_class_name('slick-cell') - if (len(tdArry) > 2): - if (tdArry[2].text == fixture.st_name): - tdArry[1].find_element_by_tag_name('input').click() - self.browser.find_element_by_id( - 'btnDeletesvcTemplate').find_element_by_tag_name('i').click() - self.browser.find_element_by_id('btnCnfDelPopupOK').click() - if not self.ui.check_error_msg("Delete service template"): - raise Exception("Service template deletion failed") - break - self.ui.wait_till_ajax_done(self.browser) - self.logger.info("%s got deleted using contrail-webui" % - (fixture.st_name)) - break - # end service_template_delete_in_webui - - def service_instance_delete_in_webui(self, fixture): - if not self.ui.click_configure_service_instance(): - result = result and False - rows = self.ui.get_rows() - for inst in range(len(rows)): - tdArry = rows[inst].find_elements_by_class_name('slick-cell') - if (len(tdArry) > 2): - if (tdArry[2].text == fixture.si_name): - tdArry[1].find_element_by_tag_name('input').click() - self.browser.find_element_by_id( - 'btnDeletesvcInstances').find_element_by_tag_name('i').click() - self.browser.find_element_by_id( - 'btnCnfDelSInstPopupOK').click() - if not self.ui.check_error_msg("Delete service instance"): - raise Exception("Service instance deletion failed") - break - self.ui.wait_till_ajax_done(self.browser) - self.logger.info("%s got deleted using contrail-webui" % - (fixture.si_name)) - break - # end service_instance_delete_in_webui - - def dns_server_delete(self, name): - if not self.ui.click_configure_dns_server(): - result = result and False - rows = self.ui.get_rows() - for server in range(len(rows)): - tdArry = rows[server].find_elements_by_class_name('slick-cell') - if (len(tdArry) > 2): - if (tdArry[2].text == name): - tdArry[1].find_element_by_tag_name('input').click() - self.browser.find_element_by_id( - 'btnDeleteDNSServer').click() - self.browser.find_element_by_id('btnCnfDelPopupOK').click() - if not self.ui.check_error_msg("Delete dns server"): - raise Exception("Dns server deletion failed") - break - self.ui.wait_till_ajax_done(self.browser) - self.logger.info( - "%s got deleted using contrail-webui" % (name)) - break - # end dns_server_delete_in_webui - - def dns_record_delete(self, name): - if not self.ui.click_configure_dns_record(): - result = result and False - rows = self.ui.get_rows() - for record in range(len(rows)): - tdArry = rows[record].find_elements_by_class_name('slick-cell') - if (len(tdArry) > 2): - if (tdArry[2].text == name): - tdArry[1].find_element_by_tag_name('input').click() - self.browser.find_element_by_id( - 'btnDeleteDNSRecord').click() - self.browser.find_element_by_id( - 'btnCnfDelMainPopupOK').click() - if not self.ui.check_error_msg("Delete dns record"): - raise Exception("Dns record deletion failed") - break - self.ui.wait_till_ajax_done(self.browser) - self.logger.info( - "%s got deleted using contrail-webui" % (name)) - break - # end dns_record_delete_in_webui - - def create_vm(self, fixture): - result = True - if not WebuiTest.os_release: - WebuiTest.os_release = self.os_release - try: - self.browser_openstack = fixture.browser_openstack - con = self.connections.ui_login - con.login( - self.browser_openstack, - con.os_url, - con.username, - con.password) - self.ui.select_project_in_openstack( - fixture.project_name, - self.browser_openstack, self.os_release) - self.ui.click_instances(self.browser_openstack) - fixture.image_name = 'ubuntu' - fixture.nova_h.get_image(image_name=fixture.image_name) - time.sleep(2) - self.ui.click_element( - 'Launch Instance', - 'link_text', - self.browser_openstack, - jquery=False, - wait=5) - self.logger.info( - 'Creating instance name %s with image name %s using openstack horizon' % - (fixture.vm_name, fixture.image_name)) - xpath_image_type = "//select[@name='source_type']/option[contains(text(), 'image') or contains(text(),'Image')]" - self.ui.click_element( - xpath_image_type, - 'xpath', - self.browser_openstack, - jquery=False, - wait=2) - xpath_image_name = "//select[@name='image_id']/option[contains(text(), '" + \ - fixture.image_name + "')]" - self.ui.click_element( - xpath_image_name, - 'xpath', - self.browser_openstack, - jquery=False, - wait=2) - self.ui.find_element( - 'id_name', - browser=self.browser_openstack).send_keys( - fixture.vm_name) - self.browser_openstack.find_element_by_xpath( - "//select[@name='availability_zone']/option[text()='nova']").click() - self.browser_openstack.find_element_by_xpath( - "//select[@name='flavor']/option[text()='m1.small']").click() - self.ui.click_element( - "//input[@value='Launch']", - 'xpath', - self.browser_openstack, - jquery=False, - wait=4) - networks = self.ui.find_element( - ['available_network', 'li'], ['id', 'tag'], self.browser_openstack, [1]) - for net in networks: - vn_match = net.text.split('(')[0] - if (vn_match == fixture.vn_name): - net.find_element_by_class_name('btn').click() - break - self.ui.click_element( - "//input[@value='Launch']", - 'xpath', - self.browser_openstack) - self.ui.wait_till_ajax_done(self.browser_openstack) - self.logger.debug('VM %s launched using openstack horizon' % - (fixture.vm_name)) - self.logger.info('Waiting for VM %s to come into active state' % - (fixture.vm_name)) - time.sleep(10) - rows_os = self.ui.find_element( - ['form', 'tbody', 'tr'], ['tag', 'tag', 'tag'], self.browser_openstack, [2]) - for i in range(len(rows_os)): - rows_os = self.ui.find_element( - ['form', 'tbody', 'tr'], ['tag', 'tag', 'tag'], self.browser_openstack, [2]) - if(rows_os[i].find_elements_by_tag_name('td')[1].text == fixture.vm_name): - counter = 0 - vm_active = False - while not vm_active: - vm_active_status1 = self.browser_openstack.find_element_by_tag_name('form').find_element_by_tag_name( - 'tbody').find_elements_by_tag_name('tr')[i].find_elements_by_tag_name( - 'td')[6].text - vm_active_status2 = self.browser_openstack.find_element_by_tag_name('form').find_element_by_tag_name( - 'tbody').find_elements_by_tag_name('tr')[i].find_elements_by_tag_name('td')[5].text - - if(vm_active_status1 == 'Active' or vm_active_status2 == 'Active'): - self.logger.info( - "%s status changed to Active state in Horizon" % - (fixture.vm_name)) - vm_active = True - time.sleep(5) - elif(vm_active_status1 == 'Error' or vm_active_status2 == 'Error'): - self.logger.error( - "%s state went into Error state in horizon" % - (fixture.vm_name)) - self.ui.screenshot( - 'verify_vm_state_openstack_' + - fixture.vm_name, - self.browser_openstack) - return "Error" - else: - self.logger.info( - "%s state is not yet Active in horizon, waiting for more time..." % - (fixture.vm_name)) - counter = counter + 1 - time.sleep(3) - self.browser_openstack.find_element_by_link_text( - 'Instances').click() - self.ui.wait_till_ajax_done( - self.browser_openstack) - time.sleep(3) - if(counter >= 100): - fixuture.logger.error( - "VM %s failed to come into active state" % - (fixture.vm_name)) - self.ui.screenshot( - 'verify_vm_not_active_openstack_' + - fixture.vm_name, - self.browser_openstack) - break - time.sleep(10) - fixture.vm_obj = fixture.nova_h.get_vm_if_present( - fixture.vm_name, fixture.project_fixture.uuid) - fixture.vm_objs = fixture.nova_h.get_vm_list( - name_pattern=fixture.vm_name, - project_id=fixture.project_fixture.uuid) - fixture.verify_on_setup() - except WebDriverException: - self.logger.error( - 'Error while creating VM %s using horizon with image name %s failed' % - (fixture.vm_name, fixture.image_name)) - self.ui.screenshot( - 'verify_vm_error_openstack_' + - fixture.vm_name, - self.browser_openstack) - result = result and False - raise - return result - # end create_vm - - def delete_vm(self, fixture): - self.browser_openstack = fixture.browser_openstack - con = self.connections.ui_login - con.login( - self.browser_openstack, - con.os_url, - con.username, - con.password) - project_name = fixture.project_name - self.ui.select_project_in_openstack( - project_name, - self.browser_openstack, self.os_release) - self.ui.click_instances(self.browser_openstack) - rows = self.ui.find_element( - ['instances', 'tbody'], ['id', 'tag'], self.browser_openstack) - rows = rows.find_elements_by_tag_name('tr') - for instance in rows: - if fixture.vm_name == instance.find_element_by_tag_name('a').text: - instance.find_elements_by_tag_name( - 'td')[0].find_element_by_tag_name('input').click() - break - ln = len(rows) - launch_instance = self.ui.click_element( - 'instances__action_terminate', - browser=self.browser_openstack) - self.ui.click_element( - 'Terminate Instances', - 'link_text', - self.browser_openstack) - time.sleep(8) - self.ui.click_instances(self.browser_openstack) - if not self.verify_vm_in_openstack(fixture.vm_name): - self.logger.info("VM %s got deleted using openstack horizon" % - (fixture.vm_name)) - else: - self.logger.error("VM %s exists" % (fixture.vm_name)) - # end vm_delete_in_openstack - - def verify_vm_in_openstack(self, vm_name): - rows = self.ui.find_element( - ['instances', 'tbody', 'tr'], ['id', 'tag', 'tag'], self.browser_openstack, [2]) - len_td = len(rows[0].find_elements_by_tag_name('td')) - if len_td == 1: - self.logger.info("No vm found") - return False - else: - for instance in rows: - if vm_name == instance.find_element_by_tag_name('a').text: - self.logger.info("%s vm exists" % (vm_name)) - return True - return False - # end verify_vm_in_openstack - - def verify_vm(self, fixture): - result = True - try: - if not self.ui.click_monitor_instances(): - result = result and False - rows = self.ui.get_rows() - ln = len(rows) - vm_flag = 0 - for i in range(len(rows)): - rows_count = len(rows) - vm_name = self.ui.find_element( - 'instance', - 'name', - browser=rows[i]).text - vm_vn = self.ui.get_slick_cell_text(rows[i], 2).split(' ')[0] - if(vm_name == fixture.vm_name and fixture.vn_name == vm_vn): - self.logger.info( - "VM %s vm exists..will verify row expansion basic details" % - (fixture.vm_name)) - retry_count = 0 - while True: - self.logger.debug("Count is" + str(retry_count)) - if retry_count > 20: - self.logger.error('Vm details failed to load') - break - self.browser.find_element_by_xpath( - "//*[@id='mon_net_instances']").find_element_by_tag_name('a').click() - time.sleep(1) - rows = self.ui.get_rows() - rows[i].find_elements_by_tag_name( - 'div')[0].find_element_by_tag_name('i').click() - try: - retry_count = retry_count + 1 - rows = self.ui.get_rows() - rows[ - i + - 1].find_elements_by_class_name('row-fluid')[0].click() - self.ui.wait_till_ajax_done(self.browser) - break - except WebDriverException: - pass - rows = self.ui.get_rows() - row_details = rows[i + 1].find_element_by_xpath( - "//*[contains(@id, 'basicDetails')]").find_elements_by_class_name('row-fluid')[5] - vm_status = row_details.find_elements_by_tag_name( - 'div')[8].text - vm_ip_and_mac = row_details.find_elements_by_tag_name( - 'div')[2].text - assert vm_status == 'Active' - assert vm_ip_and_mac.splitlines()[0].split( - ':')[1].strip() == fixture.vm_ip - vm_flag = 1 - break - assert vm_flag, "VM name or VM uuid or VM ip or VM status verifications in WebUI for VM %s failed" % ( - fixture.vm_name) - self.logger.info( - "Vm name,vm uuid,vm ip and vm status,vm network verification in WebUI for VM %s passed" % - (fixture.vm_name)) - mon_net_networks = self.ui.find_element('mon_net_networks') - self.ui.click_element('Networks', 'link_text', mon_net_networks) - time.sleep(4) - self.ui.wait_till_ajax_done(self.browser) - rows = self.ui.get_rows() - for i in range(len(rows)): - if(self.ui.get_slick_cell_text(rows[i], 1) == fixture.vn_fq_name.split(':')[0] + ":" + fixture.project_name + ":" + fixture.vn_name): - rows[i].find_elements_by_tag_name( - 'div')[0].find_element_by_tag_name('i').click() - time.sleep(2) - self.ui.wait_till_ajax_done(self.browser) - rows = self.ui.get_rows() - vm_ids = rows[i + 1].find_element_by_xpath("//div[contains(@id, 'basicDetails')]").find_elements_by_class_name( - 'row-fluid')[7].find_elements_by_tag_name('div')[1].text - if fixture.vm_id in vm_ids: - self.logger.info( - "Vm id matched on Monitor->Netoworking->Networks basic details page %s" % - (fixture.vn_name)) - else: - self.logger.error( - "Vm id not matched on Monitor->Netoworking->Networks basic details page %s" % - (fixture.vm_name)) - self.ui.screenshot( - 'vm_create_check' + - fixture.vm_name + - fixture.vm_id) - result = result and False - break - self.logger.info("VM verification in webui %s passed" % - (fixture.vm_name)) - except WebDriverException: - self.logger.error("vm %s test error " % (fixture.vm_name)) - self.ui.screenshot( - 'verify_vm_test_openstack_error' + - fixture.vm_name) - result = result and False - return result - # end verify_vm - - def create_floatingip_pool(self, fixture, pool_name, vn_name): - try: - if not self.ui.click_configure_networks(): - result = result and False - self.ui.select_project(fixture.project_name) - rows = self.ui.get_rows() - self.logger.info( - "Creating floating ip pool %s using contrail-webui" % - (pool_name)) - for net in rows: - if (self.ui.get_slick_cell_text(net, 2) == fixture.vn_name): - net.find_element_by_class_name('icon-cog').click() - time.sleep(3) - self.browser.find_element_by_class_name( - 'tooltip-success').find_element_by_tag_name('i').click() - time.sleep(2) - self.ui.click_element( - "//span[contains(text(), 'Floating IP Pools')]", - 'xpath') - time.sleep(2) - icon = self.ui.find_element( - "//div[@title='Add Floating IP Pool below']", - 'xpath') - icon.find_element_by_tag_name('i').click() - self.ui.send_keys( - fixture.pool_name, - "//input[@placeholder='Pool Name']", - 'xpath') - self.browser.find_element_by_id( - 'fipTuples').find_elements_by_tag_name('input')[1].click() - project_elements = self.browser.find_elements_by_xpath( - "//*[@class = 'select2-match']/..") - self._click_if_element_found( - fixture.project_name, project_elements) - self.ui.wait_till_ajax_done(self.browser) - self.browser.find_element_by_xpath( - "//button[@id = 'btnCreateVNOK']").click() - self.ui.wait_till_ajax_done(self.browser) - time.sleep(2) - if not self.ui.check_error_msg("Creating fip pool"): - raise Exception("Create fip pool failed") - self.logger.info( - "Fip pool %s created using contrail-webui" % - (fixture.pool_name)) - break - except WebDriverException: - self.logger.error("Fip %s Error while creating floating ip pool " % - (fixture.pool_name)) - self.ui.screenshot("fip_create_error") - raise - # end create_floatingip_pool - - def bind_policies(self, fixture): - result = True - policy_fq_names = [ - fixture.quantum_h.get_policy_fq_name(x) for x in fixture.policy_obj[ - fixture.vn]] - result = True - try: - if not self.ui.click_configure_networks(): - result = result and False - self.ui.select_project(fixture.project_name) - rows = self.ui.get_rows() - self.logger.info("Binding policies %s using contrail-webui" % - (policy_fq_names)) - for net in rows: - if (self.ui.get_slick_cell_text(net, 2) == fixture.vn): - net.find_element_by_class_name('icon-cog').click() - self.ui.wait_till_ajax_done(self.browser) - self.browser.find_element_by_class_name( - 'tooltip-success').find_element_by_tag_name('i').click() - self.ui.wait_till_ajax_done(self.browser) - for policy in policy_fq_names: - self.ui.click_element( - ['s2id_msNetworkPolicies', 'input'], ['id', 'tag']) - pol = policy[2] - self.ui.select_from_dropdown(pol) - self.browser.find_element_by_xpath( - "//button[@id = 'btnCreateVNOK']").click() - self.ui.wait_till_ajax_done(self.browser) - time.sleep(2) - if not self.ui.check_error_msg("Binding policies"): - result = result and False - raise Exception("Policy association failed") - self.logger.info( - "Associated Policy %s using contrail-webui" % - (policy_fq_names)) - time.sleep(5) - break - except WebDriverException: - self.logger.error( - "Error while %s binding polices " % - (policy_fq_names)) - self.ui.screenshot("policy_bind_error") - result = result and False - raise - return result - # end bind_policies - - def detach_policies(self, fixture): - policy_fq_names = [ - fixture.quantum_h.get_policy_fq_name(x) for x in fixture.policy_obj[ - fixture.vn]] - result = True - try: - if not self.ui.click_configure_networks(): - result = result and False - self.ui.select_project(fixture.project_name) - rows = self.ui.get_rows() - self.logger.info("Detaching policies %s using contrail-webui" % - (policy_fq_names)) - for net in rows: - if (self.ui.get_slick_cell_text(net, 2) == fixture.vn): - self.ui.click_element('icon-cog', 'class', net) - self.ui.wait_till_ajax_done(self.browser) - self.browser.find_element_by_class_name( - 'tooltip-success').find_element_by_tag_name('i').click() - self.ui.wait_till_ajax_done(self.browser) - for policy in policy_fq_names: - ui_policies_obj = self.ui.find_element( - ['s2id_msNetworkPolicies', 'li'], ['id', 'tag'], if_elements=[1]) - pol = policy[2] - for indx in range(len(ui_policies_obj) - 1): - if ui_policies_obj[indx].find_element_by_tag_name( - 'div').text == pol: - ui_policies_obj[ - indx].find_element_by_tag_name('a').click() - break - # self.ui.select_from_dropdown(pol) - self.browser.find_element_by_xpath( - "//button[@id = 'btnCreateVNOK']").click() - self.ui.wait_till_ajax_done(self.browser) - time.sleep(2) - if not self.ui.check_error_msg("Detaching policies"): - raise Exception("Policy detach failed") - self.logger.info( - "Detached Policies %s using contrail-webui" % - (policy_fq_names)) - break - except WebDriverException: - self.logger.error( - "Error while %s detaching polices " % - (policy_fq_names)) - self.ui.screenshot("policy_detach_error") - # end detach_policies - - def create_and_assoc_fip( - self, - fixture, - fip_pool_vn_id, - vm_id, - vm_name, - project=None): - result = True - try: - fixture.vm_name = vm_name - fixture.vm_id = vm_id - if not self.ui.click_configure_networks(): - result = result and False - self.ui.select_project(fixture.project_name) - rows = self.ui.get_rows() - self.logger.info( - "Creating and associating fip %s using contrail-webui" % - (fip_pool_vn_id)) - for net in rows: - if (self.ui.get_slick_cell_text(net, 2) == fixture.vn_name): - self.ui.click_element( - ['config_net_fip', 'a'], ['id', 'tag']) - self.ui.select_project(fixture.project_name) - self.ui.click_element('btnCreatefip') - self.ui.click_element( - ["//div[@id='s2id_ddFipPool']", 'a'], ['xpath', 'tag']) - fip_fixture_fq = fixture.project_name + ':' + \ - fixture.vn_name + ':' + fixture.pool_name - if not self.ui.select_from_dropdown( - fip_fixture_fq, - grep=True): - self.logger.error( - "Fip %s not found in dropdown " % - (fip_fixture_fq)) - self.ui.click_element('btnCreatefipCancel') - else: - self.ui.click_element('btnCreatefipOK') - if not self.ui.check_error_msg("Creating Fip"): - raise Exception("Create fip failed") - fip_rows = self.ui.find_element('grid-canvas', 'class') - rows1 = self.ui.get_rows(fip_rows) - fixture_vn_pool = fixture.vn_name + ':' + fixture.pool_name - for element in rows1: - fip_ui_fq = self.ui.get_slick_cell_text(element, 3) - if fip_ui_fq == fixture_vn_pool: - element.find_element_by_class_name( - 'icon-cog').click() - self.ui.wait_till_ajax_done(self.browser) - element.find_element_by_xpath( - "//a[@class='tooltip-success']").click() - self.ui.wait_till_ajax_done(self.browser) - pool = self.browser.find_element_by_xpath( - "//div[@id='s2id_ddAssociate']").find_element_by_tag_name('a').click() - time.sleep(1) - self.ui.wait_till_ajax_done(self.browser) - if self.ui.select_from_dropdown(vm_id, grep=True): - self.ui.click_element('btnAssociatePopupOK') - else: - self.ui.click_element( - 'btnAssociatePopupCancel') - self.logger.error( - "not able to associate vm id %s as it is not found in dropdown " % - (vm_id)) - break - if not self.ui.check_error_msg("Fip Associate"): - raise Exception("Fip association failed") - time.sleep(1) - break - except WebDriverException: - self.logger.error( - "Error while creating floating ip and associating it.") - self.ui.screenshot("fip_assoc_error") - result = result and False - raise - return result - # end create_and_assoc_fip - - def disassoc_floatingip(self, fixture, vm_id): - try: - if not self.ui.click_configure_fip(): - result = result and False - self.ui.select_project(fixture.project_name) - gridfip = self.ui.find_element('gridfip') - rows = self.ui.get_rows(gridfip) - self.logger.info("Disassociating fip %s using contrail-webui" % - (fixture.pool_name)) - for element in rows: - if self.ui.get_slick_cell_text(element, 2) == vm_id: - element.find_element_by_class_name('icon-cog').click() - self.ui.wait_till_ajax_done(self.browser) - element.find_elements_by_xpath( - "//a[@class='tooltip-success']")[1].click() - self.ui.wait_till_ajax_done(self.browser) - self.ui.click_element('btnDisassociatePopupOK') - self.ui.check_error_msg('disassociate_vm') - self.ui.delete_element(fixture, 'disassociate_fip') - break - except WebDriverException: - self.logger.error( - "Error while disassociating fip.") - self.ui.screenshot("fip_disassoc_error") - # end disassoc_floatingip - - def delete_floatingip_pool(self, fixture): - result = True - try: - if not self.ui.click_configure_networks(): - result = result and False - self.ui.select_project(fixture.project_name) - rows = self.ui.get_rows() - self.logger.info("Deleting fip pool %s using contrail-webui" % - (fixture.pool_name)) - for net in rows: - if (self.ui.get_slick_cell_text(net, 2) == fixture.vn_name): - net.find_element_by_class_name('icon-cog').click() - self.ui.wait_till_ajax_done(self.browser) - self.browser.find_element_by_class_name( - 'tooltip-success').find_element_by_tag_name('i').click() - self.ui.wait_till_ajax_done(self.browser) - fip_text = net.find_element_by_xpath( - "//span[contains(text(), 'Floating IP Pools')]") - fip_text.find_element_by_xpath( - '..').find_element_by_tag_name('i').click() - self.ui.click_element( - ['fipTuples', 'icon-minus'], ['id', 'class']) - self.browser.find_element_by_xpath( - "//button[@id = 'btnCreateVNOK']").click() - self.ui.wait_till_ajax_done(self.browser) - time.sleep(2) - if not self.ui.check_error_msg("Deleting_fip"): - raise Exception("Delete fip failed") - self.logger.info( - "Deleted fip pool %s using contrail-webui" % - (fixture.pool_name)) - time.sleep(20) - break - except WebDriverException: - self.logger.error( - "Error while %s deleting fip" % - (fixture.pool_name)) - self.ui.screenshot("fip_delete_error") - # end delete_fip - - def verify_fip_in_webui(self, fixture): - if not self.ui.click_configure_networks(): - result = result and False - rows = self.ui.find_element(['gridVN', 'tbody', 'tr'], [ - 'id', 'tag', 'tag'], if_elements=[2]) - for i in range(len(rows)): - vn_name = rows[i].find_elements_by_tag_name('td')[2].text - if vn_name == fixture.vn_name: - rows[i].find_elements_by_tag_name( - 'td')[0].find_element_by_tag_name('a').click() - rows = self.ui.get_rows() - fip_check = rows[ - i + 1].find_elements_by_xpath("//td/div/div/div")[1].text - if fip_check.split('\n')[1].split(' ')[0] == fixture.pool_name: - self.logger.info( - "Fip pool %s verified in contrail-webui configure network page" % - (fixture.pool_name)) - break - self.ui.click_element("//*[@id='config_net_fip']/a", 'xpath') - self.ui.wait_till_ajax_done(self.browser) - rows = self.browser.find_element_by_xpath( - "//div[@id='gridfip']/table/tbody").find_elements_by_tag_name('tr') - for i in range(len(rows)): - fip = rows[i].find_elements_by_tag_name('td')[3].text.split(':')[1] - vn = rows[i].find_elements_by_tag_name('td')[3].text.split(':')[0] - fip_ip = self.ui.get_slick_cell_text(rows[i], 1) - if rows[i].find_elements_by_tag_name( - 'td')[2].text == fixture.vm_id: - if vn == fixture.vn_name and fip == fixture.pool_name: - self.logger.info("Fip is found attached with vm %s " % - (fixture.vm_name)) - self.logger.info("VM %s is found associated with FIP %s " % - (fixture.vm_name, fip)) - else: - self.logger.info( - "Association of %s VM failed with FIP %s " % - (fixture.vm_name, fip)) - break - if not self.ui.click_monitor_instances(): - result = result and False - rows = self.browser.find_element_by_class_name( - 'k-grid-content').find_element_by_tag_name('tbody').find_elements_by_tag_name('tr') - ln = len(rows) - vm_flag = 0 - for i in range(len(rows)): - vm_name = rows[i].find_elements_by_tag_name( - 'td')[1].find_element_by_tag_name('div').text - vm_uuid = rows[i].find_elements_by_tag_name('td')[2].text - vm_vn = rows[i].find_elements_by_tag_name( - 'td')[3].text.split(' ')[0] - if(vm_name == fixture.vm_name and fixture.vm_id == vm_uuid and vm_vn == fixture.vn_name): - rows[i].find_elements_by_tag_name( - 'td')[0].find_element_by_tag_name('a').click() - self.ui.wait_till_ajax_done(self.browser) - rows = self.browser.find_element_by_class_name( - 'k-grid-content').find_element_by_tag_name('tbody').find_elements_by_tag_name('tr') - fip_check_vm = rows[i + 1].find_element_by_xpath("//*[contains(@id, 'basicDetails')]").find_elements_by_tag_name( - 'div')[0].find_elements_by_tag_name('div')[1].text - if fip_check_vm.split(' ')[0] == fip_ip and fip_check_vm.split(' ')[ - 1] == '\(' + 'default-domain' + ':' + fixture.project_name + ':' + fixture.vn_name + '\)': - self.logger.info( - "FIP verified in monitor instance page for vm %s " % - (fixture.vm_name)) - else: - self.logger.info( - "FIP failed to verify in monitor instance page for vm %s" % - (fixture.vm_name)) - break - # end verify_fip_in_webui - - def verify_project_quotas(self): - self.logger.info( - "Verifying project quotas api server data on Config->Nwetworking->project quotas page ...") - result = True - const_str = ['Not Set', 'Unlimited'] - fip_list_api = self.ui.get_fip_list_api() - ipam_list_api = self.ui.get_ipam_list_api() - policy_list_api = self.ui.get_policy_list_api() - svc_instance_list_api = self.ui.get_service_instance_list_api() - floating_ip_pool_list_api = self.ui.get_floating_pool_list_api() - security_grp_list_api = self.ui.get_security_group_list_api() - vn_list_api = self.ui.get_vn_list_api() - project_list_api = self.ui.get_project_list_api() - vm_intf_refs_list_api = self.ui.get_vm_intf_refs_list_api() - routers_list_api = self.ui.get_routers_list_api() - routers_count_dict = self.ui.count_quotas( - routers_list_api.get('logical-routers')) - subnets_count_dict = self.ui.subnets_count_quotas( - vn_list_api['virtual-networks']) - security_grp_rules_count_dict = self.ui.security_grp_rules_count_quotas( - security_grp_list_api.get('security-groups')) - vn_count_dict = self.ui.count_quotas( - vn_list_api.get('virtual-networks')) - fips_count_dict = self.ui.count_quotas( - fip_list_api.get('floating-ips')) - policy_count_dict = self.ui.count_quotas( - policy_list_api.get('network-policys')) - ipam_count_dict = self.ui.count_quotas( - ipam_list_api.get('network-ipams')) - fip_pool_count_dict = self.ui.count_quotas( - floating_ip_pool_list_api.get('floating-ip-pools')) - svc_instance_count_dict = self.ui.count_quotas( - svc_instance_list_api.get('service-instances')) - security_grp_count_dict = self.ui.count_quotas( - security_grp_list_api.get('security-groups')) - ports_count_dict = self.ui.count_quotas( - vm_intf_refs_list_api.get('virtual-machine-interfaces')) - for index, project in enumerate(project_list_api['projects']): - prj = project.get('fq_name')[1] - if prj == 'default-project': - continue - api_data = [] - prj_quotas_dict = self.ui.get_details( - project_list_api['projects'][index]['href']).get('project').get('quota') - if not prj_quotas_dict: - self.logger.warning( - "Project quotas details not found for %s" % - (prj)) - result = True - continue - not_found = [-1, None] - if prj_quotas_dict.get('subnet') in not_found: - subnets_limit_api = const_str - else: - subnets_limit_api = prj_quotas_dict.get('subnet') - if prj_quotas_dict.get('virtual_machine_interface') in not_found: - ports_limit_api = const_str - else: - ports_limit_api = prj_quotas_dict.get( - 'virtual_machine_interface') - if prj_quotas_dict.get('security_group_rule') in not_found: - security_grp_rules_limit_api = 'Unlimited' - else: - security_grp_rules_limit_api = prj_quotas_dict.get( - 'security_group_rule') - if prj_quotas_dict.get('security_group') in not_found: - security_grps_limit_api = 'Unlimited' - else: - security_grps_limit_api = prj_quotas_dict.get('security_group') - if prj_quotas_dict.get('virtual_network') in not_found: - vnets_limit_api = const_str - else: - vnets_limit_api = prj_quotas_dict.get('virtual_network') - if not prj_quotas_dict.get('floating_ip_pool'): - pools_limit_api = 'Not Set' - else: - pools_limit_api = prj_quotas_dict.get('floating_ip_pool') - if prj_quotas_dict.get('floating_ip') in not_found: - fips_limit_api = const_str - else: - fips_limit_api = prj_quotas_dict.get('floating_ip') - if not prj_quotas_dict.get('network_ipam'): - ipams_limit_api = 'Not Set' - else: - ipams_limit_api = prj_quotas_dict.get('network_ipam') - if prj_quotas_dict.get('logical_router') in not_found: - routers_limit_api = const_str - else: - routers_limit_api = prj_quotas_dict.get('logical_router') - if not prj_quotas_dict.get('access_control_list'): - policies_limit_api = 'Not Set' - else: - policies_limit_api = prj_quotas_dict.get('access_control_list') - if not prj_quotas_dict.get('service_instance'): - svc_instances_limit_api = 'Not Set' - else: - svc_instances_limit_api = prj_quotas_dict.get( - 'service_instance') - if not vn_count_dict.get(prj): - vn_count_dict[prj] = '0' - if not fip_pool_count_dict.get(prj): - fip_pool_count_dict[prj] = '0' - if not policy_count_dict.get(prj): - policy_count_dict[prj] = '0' - if not ipam_count_dict.get(prj): - ipam_count_dict[prj] = '0' - if not svc_instance_count_dict.get(prj): - svc_instance_count_dict[prj] = '0' - if not security_grp_count_dict.get(prj): - security_grp_count_dict[prj] = '0' - if not fips_count_dict.get(prj): - fips_count_dict[prj] = '0' - if not ports_count_dict.get(prj): - ports_count_dict[prj] = '0' - if not subnets_count_dict.get(prj): - subnets_count_dict[prj] = '0' - if not security_grp_rules_count_dict.get(prj): - security_grp_rules_count_dict[prj] = '0' - if not routers_count_dict.get(prj): - routers_count_dict[prj] = '0' - self.logger.info( - "Verifying project quotas for project %s ..." % - (prj)) - self.ui.keyvalue_list( - api_data, - vnets=vn_count_dict[prj], - pools=fip_pool_count_dict[prj], - policies=policy_count_dict[prj], - ipams=ipam_count_dict[prj], - svc_instances=svc_instance_count_dict[prj], - security_grps=security_grp_count_dict[prj], - fips=fips_count_dict[prj], - ports=ports_count_dict[prj], - subnets=subnets_count_dict[prj], - security_grp_rules=security_grp_rules_count_dict[prj], - routers=routers_count_dict[prj], - vnets_limit=vnets_limit_api, - subnets_limit=subnets_limit_api, - ports_limit=ports_limit_api, - fips_limit=fips_limit_api, - pools_limit=pools_limit_api, - policies_limit=policies_limit_api, - ipams_limit=ipams_limit_api, - svc_instances_limit=svc_instances_limit_api, - security_grps_limit=security_grps_limit_api, - security_grp_rules_limit=security_grp_rules_limit_api, - routers_limit=routers_limit_api) - if not self.ui.click_configure_project_quotas(): - result = result and False - self.ui.select_project(prj) - rows = self.ui.find_element('grid-canvas', 'class') - rows = self.ui.get_rows(rows) - used = [] - limit = [] - for row in rows: - used.append( - self.ui.find_element( - ('div', 2), 'tag', row, elements=True).text) - limit.append( - self.ui.find_element( - ('div', 1), 'tag', row, elements=True).text) - vnets, subnets, ports, fips, pools, policies, routers, ipams, svc_instances, security_grps, security_grp_rules = used[ - 0], used[1], used[2], used[3], used[4], used[5], used[6], used[7], used[8], used[9], used[10] - vnets_limit, subnets_limit, ports_limit, fips_limit, pools_limit, policies_limit, routers_limit, ipams_limit, svc_instances_limit, security_grps_limit, security_grp_rules_limit = limit[ - 0], limit[1], limit[2], limit[3], limit[4], limit[5], limit[6], limit[7], limit[8], limit[9], limit[10] - if vnets_limit in const_str: - vnets_limit = const_str - if ports_limit in const_str: - ports_limit = const_str - if subnets_limit in const_str: - subnets_limit = const_str - if fips_limit in const_str: - fips_limit = const_str - - ui_data = [] - self.ui.keyvalue_list( - ui_data, - vnets=vnets, - pools=pools, - policies=policies, - ipams=ipams, - svc_instances=svc_instances, - security_grps=security_grps, - fips=fips, - security_grp_rules=security_grp_rules, - subnets=subnets, - ports=ports, - routers=routers, - vnets_limit=vnets_limit, - subnets_limit=subnets_limit, - ports_limit=ports_limit, - fips_limit=fips_limit, - pools_limit=pools_limit, - policies_limit=policies_limit, - ipams_limit=ipams_limit, - svc_instances_limit=svc_instances_limit, - security_grps_limit=security_grps_limit, - security_grp_rules_limit=security_grp_rules_limit, - routers_limit=routers_limit) - if self.ui.match_ui_kv(api_data, ui_data): - self.logger.info("Project quotas matched for %s" % (prj)) - else: - self.logger.info("Project quotas not matched for %s" % (prj)) - return result - # end verify_project_quota - - def verify_service_instance_api_basic_data(self): - self.logger.info( - "Verifying service instances api server data on Config->services->service instances...") - self.logger.info(self.dash) - result = True - service_instance_list_api = self.ui.get_service_instance_list_api() - for instance in range( - len(service_instance_list_api['service-instances'])): - net_list, network_lists1, network_lists3, inst_net_list, power_list, vm_list, status_list, power1_list, status1_list, vm1_list, dom_arry_basic = [ - [] for _ in range(11)] - template_string, image, flavor, status_main_row = [ - '' for _ in range(4)] - svc_fq_name = service_instance_list_api[ - 'service-instances'][instance] - api_fq_name = svc_fq_name['fq_name'][2] - self.ui.click_configure_service_instance() - project = svc_fq_name['fq_name'][1] - self.ui.select_project(project) - time.sleep(30) - rows = self.ui.get_rows(canvas=True) - self.logger.info( - "service instance fq_name %s exists in api server..checking if exists in webui as well" % - (api_fq_name)) - for i in range(len(rows)): - not_match_count = 0 - match_flag = 0 - if rows[i].find_elements_by_tag_name( - 'div')[2].text == api_fq_name: - self.logger.info( - "service instance fq name %s matched in webui..Verifying basic view details now" % - (api_fq_name)) - self.logger.info(self.dash) - match_index = i - match_flag = 1 - div_ele = rows[i].find_elements_by_tag_name('div') - self.ui.keyvalue_list( - dom_arry_basic, - Name_main_row=div_ele[2].text, - Template_main_row=div_ele[3].text, - Status_main_row=div_ele[4].text.strip(), - no_of_instances_main_row=div_ele[6].text, - Networks_main_row=div_ele[7].text.split(',')) - break - if not match_flag: - self.logger.error( - "service instance fq_name exists in apiserver but %s not found in webui..." % - (api_fq_name)) - self.logger.info(self.dash) - else: - self.logger.info( - "Click and retrieve basic view details in webui for service instance fq_name %s " % - (api_fq_name)) - self.ui.click_configure_service_instance_basic(match_index) - rows = self.ui.get_rows(canvas=True) - rows_detail = rows[match_index + 1].find_element_by_class_name( - 'slick-row-detail-container').find_element_by_class_name('row-fluid').find_elements_by_class_name('row-fluid') - for detail in range(len(rows_detail)): - text1 = rows_detail[ - detail].find_element_by_tag_name('label').text - if text1 == 'Instance Details': - continue - elif text1 == 'Networks': - network_lists = rows_detail[detail].find_element_by_class_name( - 'span10').text.split(',') - dom_arry_basic.append( - {'key': str(text1), 'value': network_lists}) - elif text1 == 'Virtual Machine': - keys_text = rows_detail[detail].find_element_by_class_name( - 'span10').find_elements_by_tag_name('div') - count = 7 - for keys in range((len(keys_text) / 7) - 1): - dom_arry_basic1 = [] - complete_api_data1 = [] - network_list = [] - virtual_net_list = [] - status = keys_text[count].find_elements_by_class_name( - 'span2')[1].text - vm_name = keys_text[ - count].find_elements_by_class_name('span2')[0].text - power = keys_text[count].find_elements_by_class_name( - 'span2')[2].text - network_list = keys_text[count].find_element_by_class_name( - 'span10').text.split() - count = count + 7 - self.ui.keyvalue_list( - dom_arry_basic1, - Virtual_machine=vm_name, - Status=status, - Power_State=power, - Networkss=network_list) - self.ui.click_monitor_instances() - rows = self.ui.get_rows(canvas=True) - vmi_list_ops = self.ui.get_vmi_list_ops() - for insta in range(len(rows)): - if self.ui.get_slick_cell_text( - rows[insta], - 1) == vm_name: - uuid = self.ui.get_slick_cell_text( - rows[insta], - 2) - for vm_inst in range(len(vmi_list_ops)): - vmi_inst_ops_data = self.ui.get_details( - vmi_list_ops[vm_inst]['href']) - ops_data_basic_intf = vmi_inst_ops_data.get( - 'UveVMInterfaceAgent') - if ops_data_basic_intf[ - 'vm_name'] == vm_name: - vmi_inst_ops_data = self.ui.get_details( - vmi_list_ops[vm_inst]['href']) - if 'UveVMInterfaceAgent' in vmi_inst_ops_data: - ops_data_basic = vmi_inst_ops_data.get( - 'UveVMInterfaceAgent') - vm1 = ops_data_basic['vm_name'] - if ops_data_basic.get( - 'active'): - status1 = 'ACTIVE' - power1 = 'RUNNING' - status_main_row = 'Active' - else: - status_main_row = 'Inactive' - if ops_data_basic.get( - 'virtual_network'): - if ops_data_basic['virtual_network'].split( - ':')[1] == project: - if ops_data_basic.get( - 'ip_address'): - virtual_net_list.append(ops_data_basic['virtual_network'].split( - ':')[2] + ':' + str(ops_data_basic['ip_address'])) - break - break - self.ui.keyvalue_list( - complete_api_data1, - Networkss=virtual_net_list, - Virtual_machine=vm1, - Status=status1, - Power_State=power1) - self.logger.info( - "Matching the instance details of service instance %s " % - (vm1)) - if self.ui.match_ui_kv( - complete_api_data1, - dom_arry_basic1): - self.logger.info( - "Service instance %s config details matched on Config->Services->Service Instances page" % - (vm1)) - else: - self.logger.error( - "Service instance %s config details not matched on Config->Services->Service Instances page" % - (vm1)) - if keys != ((len(keys_text) / 7) - 1) - 1: - self.ui.click_configure_service_instance() - self.ui.click_configure_service_instance_basic( - match_index) - rows = self.ui.get_rows(canvas=True) - rows_detail = rows[match_index + 1].find_element_by_class_name( - 'slick-row-detail-container').find_element_by_class_name('row-fluid').find_elements_by_class_name('row-fluid') - keys_text = rows_detail[detail].find_element_by_class_name( - 'span10').find_elements_by_tag_name('div') - else: - break - else: - dom_arry_basic.append({'key': str(text1), 'value': rows_detail[ - detail].find_element_by_class_name('span10').text}) - service_inst_api_data = self.ui.get_details( - service_instance_list_api['service-instances'][instance]['href']) - complete_api_data = [] - service_temp_list_api = self.ui.get_service_template_list_api() - if 'service-instance' in service_inst_api_data: - api_data_basic = service_inst_api_data.get( - 'service-instance') - if 'fq_name' in api_data_basic: - project = api_data_basic['fq_name'][1] - complete_api_data.append( - {'key': 'Instance Name', 'value': api_data_basic['fq_name'][2]}) - complete_api_data.append( - {'key': 'Name_main_row', 'value': api_data_basic['fq_name'][2]}) - if 'display_name' in api_data_basic: - complete_api_data.append( - {'key': 'Display Name', 'value': api_data_basic['display_name']}) - if api_data_basic.get('service_template_refs'): - template_string = api_data_basic[ - 'service_template_refs'][0]['to'][1] - for temp in range( - len(service_temp_list_api['service-templates']) - 1): - if template_string == service_temp_list_api[ - 'service-templates'][temp + 1]['fq_name'][1]: - service_temp_api_data = self.ui.get_details( - service_temp_list_api['service-templates'][temp + 1]['href']) - if 'service-template' in service_temp_api_data: - api1_data_basic = service_temp_api_data.get( - 'service-template') - if 'service_mode' in api1_data_basic[ - 'service_template_properties']: - attached_temp = api1_data_basic[ - 'service_template_properties']['service_mode'].capitalize() - svc_prop = api1_data_basic[ - 'service_template_properties'] - if 'image_name' in svc_prop: - image = svc_prop['image_name'] - if 'flavor' in svc_prop: - flavor = svc_prop['flavor'] - break - self.ui.keyvalue_list( - complete_api_data, - Template=template_string + ' ' + - '(' + attached_temp + ')', - Template_main_row=template_string + - ' ' + '(' + attached_temp + ')', - Status_main_row=status_main_row) - if api_data_basic.get('service_instance_properties'): - serv_inst_list = api_data_basic[ - 'service_instance_properties'] - for key in serv_inst_list: - key_list = [ - 'left_virtual_network', - 'right_virtual_network', - 'management_virtual_network'] - if key == 'scale_out': - if serv_inst_list.get('scale_out'): - inst_value = str( - serv_inst_list['scale_out']['max_instances']) + ' ' + 'Instances' - complete_api_data.append( - {'key': 'Number of instances', 'value': inst_value}) - complete_api_data.append( - {'key': 'no_of_instances_main_row', 'value': inst_value}) - elif key == 'interface_list': - inst_net_list1 = serv_inst_list['interface_list'] - if len(inst_net_list1) != len(inst_net_list): - for inst_nets1 in range(len(inst_net_list1)): - for inst_nets in range(len(inst_net_list)): - if inst_net_list1[inst_nets1].get( - 'virtual_network') != inst_net_list[inst_nets]: - not_match_count = not_match_count + \ - 1 - if not_match_count == len( - inst_net_list): - other_net = inst_net_list1[ - inst_nets1].get('virtual_network') - if other_net == '': - pass - # net_list.append( - # 'Other Network : Automatic') - elif other_net.split(':')[1] == project: - net_list.append( - 'Other Network : ' + - other_net.split(':')[2]) - else: - net_list.append( - net + - ' : ' + - other_net.split(':')[2] + - '(' + - other_net.split(':')[0] + - ':' + - other_net.split(':')[1] + - ')') - else: - break - elif key in key_list: - net = key - net_value = serv_inst_list.get(net) - net = net.replace('_virtual_', ' ').title() - inst_net_list.append(net_value) - if net_value == '' or net_value is None: - net_list.append(net + ' : Automatic') - elif net_value.split(':')[1] == project: - net_list.append( - net + - ' : ' + - net_value.split(':')[2]) - else: - net_list.append( - net + - ' : ' + - net_value.split(':')[2] + - '(' + - net_value.split(':')[0] + - ':' + - net_value.split(':')[1] + - ')') - self.ui.keyvalue_list( - complete_api_data, - Networks=net_list, - Networks_main_row=net_list, - Image=image, - Flavor=flavor) - if self.ui.match_ui_kv( - complete_api_data, - dom_arry_basic): - self.logger.info( - "Service instance config data matched on Config->Services->Service Instances page") - else: - self.logger.error( - "Service instance config data match failed on Config->Services->Service Instances page") - result = result and False - return result - # end verify_service_instance_data - - def verify_config_nodes_ops_grid_page_data(self, host_name, ops_data): - webui_data = [] - self.ui.click_monitor_config_nodes() - rows = self.browser.find_element_by_class_name('grid-canvas') - base_indx = 0 - rows = self.ui.get_rows(rows) - for hosts in range(len(rows)): - if self.ui.get_slick_cell_text( - rows[hosts], - base_indx) == host_name: - webui_data.append( - {'key': 'Hostname', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx)}) - webui_data.append({'key': 'IP Address', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 1)}) - webui_data.append( - {'key': 'Version', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 2)}) - webui_data.append( - {'key': 'Status', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 3)}) - webui_data.append({'key': 'CPU', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 4) + ' %'}) - webui_data.append( - {'key': 'Memory', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 5)}) - if self.ui.match_ui_kv(ops_data, webui_data): - return True - else: - return False - # end verify_config_nodes_ops_grid_page_data - - def verify_analytics_nodes_ops_grid_page_data(self, host_name, ops_data): - webui_data = [] - self.ui.click_monitor_analytics_nodes() - rows = self.ui.get_rows() - for hosts in range(len(rows)): - base_indx = 0 - if self.ui.get_slick_cell_text( - rows[hosts], - base_indx) == host_name: - webui_data.append( - {'key': 'Hostname', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx)}) - webui_data.append({'key': 'IP Address', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 1)}) - webui_data.append( - {'key': 'Version', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 2)}) - webui_data.append( - {'key': 'Status', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 3)}) - webui_data.append({'key': 'CPU', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 4) + ' %'}) - webui_data.append( - {'key': 'Memory', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 5)}) - webui_data.append({'key': 'Generators', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 6)}) - if self.ui.match_ui_kv(ops_data, webui_data): - return True - else: - return False - # end verify_analytics_nodes_ops_grid_page_data - - def verify_vrouter_ops_grid_page_data(self, host_name, ops_data): - webui_data = [] - self.ui.click_monitor_vrouters() - rows = self.ui.get_rows() - base_indx = 0 - for hosts in range(len(rows)): - if self.ui.get_slick_cell_text( - rows[hosts], - base_indx) == host_name: - webui_data.append( - {'key': 'Hostname', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx)}) - webui_data.append({'key': 'IP Address', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 1)}) - webui_data.append( - {'key': 'Version', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 2)}) - webui_data.append( - {'key': 'Status', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 3)}) - webui_data.append({'key': 'CPU', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 5) + ' %'}) - webui_data.append( - {'key': 'Memory', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 6)}) - webui_data.append({'key': 'Networks', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 7)}) - webui_data.append({'key': 'Instances', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 8)}) - webui_data.append({'key': 'Interfaces', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 9)}) - if self.ui.match_ui_kv(ops_data, webui_data): - return True - else: - return False - # end verify_vrouter_ops_grid_page_data - - def verify_bgp_routers_ops_grid_page_data(self, host_name, ops_data): - webui_data = [] - self.ui.click_monitor_control_nodes() - rows = self.ui.get_rows() - base_indx = 0 - for hosts in range(len(rows) - 1): - if self.ui.get_slick_cell_text( - rows[hosts], - base_indx) == host_name: - webui_data.append( - {'key': 'Hostname', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx)}) - webui_data.append({'key': 'IP Address', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 1)}) - webui_data.append( - {'key': 'Version', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 2)}) - webui_data.append( - {'key': 'Status', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 3)}) - webui_data.append({'key': 'CPU', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 4) + ' %'}) - webui_data.append( - {'key': 'Memory', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 5)}) - webui_data.append( - {'key': 'Peers', 'value': self.ui.get_slick_cell_text(rows[hosts], base_indx + 6)}) - webui_data.append({'key': 'vRouters', 'value': self.ui.get_slick_cell_text( - rows[hosts], base_indx + 7)}) - if self.ui.match_ui_kv(ops_data, webui_data): - return True - else: - return False - # end verify_bgp_routers_ops_grid_page_data - - def check_alerts(self): - self.logger.info("Capturing screenshot for alerts...") - self.logger.debug(self.dash) - if not self.ui.click_monitor_dashboard(): - result = result and False - text = self.ui.find_element( - ['alerts-box', 'text'], ['id', 'class']).text - if text: - self.logger.warning("Alerts found %s" % (text)) - self.ui.click_element( - ['moreAlertsLink', 'More'], ['id', 'link_text']) - self.ui.screenshot("Alerts") - self.ui.click_element('alertsClose') - # end check_alerts - - def verify_vm_ops_basic_grid_data(self, vm_name, vm_ops_data, ops_uuid): - if not self.ui.click_monitor_instances(): - result = result and False - dom_arry_basic = [] - vm_name1 = '' - net = '' - network_list = [] - network_grid_list = [] - ip_grid_list = [] - ip_list = [] - fip_list = [] - vrouter = '' - rows = self.ui.get_rows() - for inst in range(len(rows)): - if rows[inst].find_elements_by_class_name( - 'slick-cell')[1].text == vm_name: - dom_arry_basic.append( - {'key': 'Instance_name_grid_row', 'value': vm_name}) - dom_arry_basic.append({'key': 'uuid_grid_row', 'value': rows[ - inst].find_elements_by_class_name('slick-cell')[2].text}) - dom_arry_basic.append({'key': 'vn_grid_row', 'value': rows[ - inst].find_elements_by_class_name('slick-cell')[3].text.splitlines()}) - dom_arry_basic.append({'key': 'interface_grid_row', 'value': rows[ - inst].find_elements_by_class_name('slick-cell')[4].text}) - dom_arry_basic.append({'key': 'vrouter_grid_row', 'value': rows[ - inst].find_elements_by_class_name('slick-cell')[5].text}) - dom_arry_basic.append({'key': 'ip_address_grid_row', 'value': rows[ - inst].find_elements_by_class_name('slick-cell')[6].text.splitlines()}) - dom_arry_basic.append({'key': 'fip_grid_row', 'value': rows[ - inst].find_elements_by_class_name('slick-cell')[7].text.splitlines()}) - break - if vm_ops_data.get('UveVirtualMachineAgent'): - ops_data = vm_ops_data['UveVirtualMachineAgent'] - if ops_data.get('interface_list'): - for interface in range(len(ops_data['interface_list'])): - if ops_data['interface_list'][interface].get('vm_name'): - vm_name1 = ops_data['interface_list'][ - interface]['vm_name'] - if ops_data['interface_list'][ - interface].get('virtual_network'): - net = ops_data['interface_list'][ - interface]['virtual_network'] - network_list.append( - net.split(':')[2] + ' (' + net.split(':')[1] + ')') - - if ops_data['interface_list'][interface].get('ip_address'): - ip_list.append( - ops_data['interface_list'][interface]['ip_address']) - if ops_data['interface_list'][ - interface].get('floating_ips'): - for fips in range( - len(ops_data['interface_list'][interface]['floating_ips'])): - fip_list.append( - ops_data['interface_list'][interface]['floating_ips'][fips]['ip_address'] + - ' (0 B/0 B)') - if len(network_list) > 2: - network_grid_list.extend( - [network_list[0], network_list[1]]) - network_grid_list.append( - '(' + str((len(network_list) - 2)) + ' more' + ')') - else: - network_grid_list = network_list - if len(fip_list) > 2: - fip_grid_list.extend([fip_list[0], fip_list[1]]) - fip_grid_list.append( - '(' + str((len(fip_list) - 2)) + ' more' + ')') - else: - fip_grid_list = fip_list - if len(ip_list) > 2: - ip_grid_list.extend([ip_list[0], ip_list[1]]) - ip_grid_list.append( - '(' + str((len(ip_list) - 2)) + ' more' + ')') - else: - ip_grid_list = ip_list - - if ops_data.get('vrouter'): - vrouter = ops_data['vrouter'] - complete_ops_data = [] - complete_ops_data.append( - {'key': 'Instance_name_grid_row', 'value': vm_name1}) - complete_ops_data.append( - {'key': 'uuid_grid_row', 'value': ops_uuid}) - complete_ops_data.append( - {'key': 'vn_grid_row', 'value': network_grid_list}) - complete_ops_data.append( - {'key': 'interface_grid_row', 'value': str(len(network_list))}) - complete_ops_data.append( - {'key': 'vrouter_grid_row', 'value': vrouter}) - complete_ops_data.append( - {'key': 'ip_address_grid_row', 'value': ip_grid_list}) - complete_ops_data.append( - {'key': 'fip_grid_row', 'value': fip_grid_list}) - if self.ui.match_ui_kv(complete_ops_data, dom_arry_basic): - return True - else: - return False diff --git a/junit-noframes.xsl b/junit-noframes.xsl deleted file mode 100644 index c0807ed55..000000000 --- a/junit-noframes.xsl +++ /dev/null @@ -1,554 +0,0 @@ - - - - - - - - - - Test Results - - - - - - - - - - -
- - - -
- - - -
- - - -
- - - - - - -
- - - - - - - - -

Packages

- Note: package statistics are not computed recursively, they only sum up all of its testsuites numbers. - - - - - - - - - - - - - - - - - - Failure - Error - - - - - - - - - - -
- - - -
-
- - <br> - - -

Properties

- - - : - -
- - - - - - - - - - - - -

Package

- - - - - - -
- Back to top -

-

- - - - - - - - -

TestCase

- - - - - - - - - - -
- -

- - Back to top - - - - -

Summary

- - - - - - - - - - - - - - - - - - - Failure - Error - - - - - - - - - - -
TestsFailuresErrorsSkippedSuccess rateTime
- - - - - - - -
- - - - -
- Note: failures are anticipated and checked for with assertions while errors are unanticipated. -
-
- - - - cur = TestCases['.'] = new Array(); - - - cur[''] = ''; - - - - - -

Test Results

- - - - - -
Designed for use with JUnit and Ant.
-
-
- - - - Name - Tests - Errors - Failures - Skipped - Time(s) - - - - - - - Name - Tests - Errors - Failures - Skipped - Time(s) - - - - - - - Name - Status - Type - Time(s) - - - - - - - - - - - Failure - Error - - - - - - - - - - - - - - - - - - - - - - Error - - - - - - Failure - - - - Error - - - - Skipped - - - - Success - - - - - - - - - - - - - - - - - - - - - - - - N/A - - - - - - -

- - - -
- - -
- - - - - - \' - - - - - \\ - - - - - - - - - - - - - - - - -
- - - -
- - - -
-
- - - - - - - - - - - -
- diff --git a/locust/trial_1.py b/locust/trial_1.py deleted file mode 100644 index 78c64fd80..000000000 --- a/locust/trial_1.py +++ /dev/null @@ -1,18 +0,0 @@ -from locust import HttpLocust, TaskSet, task - -def projects_query(l): - response = l.client.request(method="GET", url="/projects", - auth=('admin','contrail123')) - -class UserTasks(TaskSet): - # one can specify tasks like this - tasks = [projects_query] - -class WebsiteUser(HttpLocust): - """ - Locust user class that does requests to the locust web server running on localhost - """ - host = "http://127.0.0.1:8095" - min_wait = 100 - max_wait = 100 - task_set = UserTasks diff --git a/log_conf.ini b/log_conf.ini deleted file mode 100755 index 3a7e63ffc..000000000 --- a/log_conf.ini +++ /dev/null @@ -1,51 +0,0 @@ -[log_screen] -# set if log redirection to console needed -log_to_console= yes - -[loggers] -keys=root,log01 - -[logger_root] -handlers=screen -#qualname=(root) -level=DEBUG - -[logger_log01] -handlers=file -qualname=log01 -level=DEBUG -propagate=0 - - -[formatters] -keys=std - -[formatter_std] -format=%(asctime)s %(levelname)s %(message)s -datefmt=%a, %d %b %Y %H:%M:%S -#datefmt=%m-%d-%Y -#format=%(asctime)s [ %(levelname)5s ] %(message)s - - -[handlers] -keys=file,screen -#keys=file - -[handler_file] -#class= handlers.MemoryHandler -class= handlers.MemoryHandler -formatter=std -level=DEBUG -target= -args=(1000, ERROR) -#args=( 'test_details.log.2014-04-20-23:31:35','a') -#args is of the form : ( log-file-name , write-mode) - -[handler_screen] -#class=handlers.StreamHandler -class=StreamHandler -formatter=std -level=DEBUG -stream=sys.stdout -args=(sys.stdout,) - diff --git a/logging.conf.sample b/logging.conf.sample deleted file mode 100644 index e63ac26c0..000000000 --- a/logging.conf.sample +++ /dev/null @@ -1,35 +0,0 @@ -[loggers] -keys=root - -[handlers] -keys=file,devel,syslog - -[formatters] -keys=simple,tests - -[logger_root] -level=DEBUG -handlers=file - -[handler_file] -class=FileHandler -level=DEBUG -args=('test_run.log', 'w+') -formatter=tests - -[handler_syslog] -class=handlers.SysLogHandler -level=ERROR -args = ('/dev/log', handlers.SysLogHandler.LOG_USER) - -[handler_devel] -class=StreamHandler -level=DEBUG -args=(sys.stdout,) -formatter=simple - -#[formatter_tests] -#class = tempest.openstack.common.log.ContextFormatter - -[formatter_simple] -format=%(asctime)s.%(msecs)03d %(process)d %(levelname)s: %(message)s diff --git a/misc/sample_param_test.py b/misc/sample_param_test.py deleted file mode 100644 index 282b49f5b..000000000 --- a/misc/sample_param_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Need to import path to test/fixtures and test/scripts/ -# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/' -# -# To run tests, you can do 'python -m testtools.run tests'. To run specific tests, -# You can do 'python -m testtools.run -l tests' -# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD -# -import os -from common.openstack_libs import nova_client as mynovaclient -from common.openstack_libs import nova_exception as novaException -import unittest -import fixtures -import testtools -import traceback - -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from vm_test import * -from common.connections import ContrailConnections -from floating_ip import * -from policy_test import * -from multiple_vn_vm_test import * -from contrail_fixtures import * -from tcutils.wrappers import prepost_wrapper -from tcutils.poc import (TemplateTestCase, template, Call) -from test_arguments import * - - -class TestSanityFixture(testtools.TestCase, fixtures.TestWithFixtures): - - __metaclass__ = TemplateTestCase - -# @classmethod - def setUp(self): - super(TestSanityFixture, self).setUp() - if 'PARAMS_FILE' in os.environ: - self.ini_file = os.environ.get('PARAMS_FILE') - else: - self.ini_file = 'params.ini' - self.inputs = self.useFixture(ContrailTestInit(self.ini_file)) - self.connections = ContrailConnections(self.inputs) - self.quantum_h = self.connections.quantum_h - self.nova_h = self.connections.nova_h - self.vnc_lib = self.connections.vnc_lib - self.logger = self.inputs.logger - self.agent_inspect = self.connections.agent_inspect - self.cn_inspect = self.connections.cn_inspect - # end setUpClass - - def cleanUp(self): - super(TestSanityFixture, self).cleanUp() - # end cleanUp - - def runTest(self): - pass - # end runTest - - @template(env.test_vn_add_delete_params) - @preposttest_wrapper - def test_vn_add_delete(self, vn_name, vn_subnets): - '''Test to validate VN creation and deletion. - ''' - vn_obj = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets)) - assert vn_obj.verify_on_setup() - assert vn_obj - return True - # end - -# end TestSanityFixture diff --git a/misc/test_arguments.py b/misc/test_arguments.py deleted file mode 100644 index becb61293..000000000 --- a/misc/test_arguments.py +++ /dev/null @@ -1,6 +0,0 @@ -from fabric.api import env -from tcutils.poc import Call - -env.test_vn_add_delete_params = { - "TestSet1": Call(vn_name="vn10", vn_subnets=['22.1.1.0/24']), - "TestSet2": Call(vn_name="vn20", vn_subnets=['20.1.1.0/24'])} diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index dd2e06cfe..000000000 --- a/requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -fixtures==1.0.0 -testtools==1.7.1 -testresources==0.2.7 -discover -testrepository -junitxml -pytun -requests==2.3.0 -pexpect -pyvirtualdisplay -selenium -unittest2 -xmltodict -junos-eznc==1.2.2 -pyvmomi==5.5.0 -linecache2 diff --git a/run_tests.sh b/run_tests.sh index db794d2d6..fb48595d5 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,8 +1,19 @@ #!/usr/bin/env bash + +source tools/common.sh + +function die +{ + local message=$1 + [ -z "$message" ] && message="Died" + echo "${BASH_SOURCE[1]}: line ${BASH_LINENO[0]}: ${FUNCNAME[1]}: $message." >&2 + exit 1 +} function usage { echo "Usage: $0 [OPTION]..." echo "Run Contrail test suite" echo "" + echo " -p, --prepare Only prepare the system and exit. This is useable when somebody want to run the tests manually." echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -n, --no-site-packages Isolate the virtualenv from the global Python environment" @@ -21,6 +32,7 @@ function usage { echo " -T, --tags Only run tests taged with tags" echo " -c, --concurrency Number of threads to be spawned" echo " --contrail-fab-path Contrail fab path, default to /opt/contrail/utils" + echo " --test-failure-threshold Contrail test failure threshold" echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr " } testrargs="" @@ -42,9 +54,11 @@ logging_config=logging.conf send_mail=0 concurrency="" parallel=0 +failure_threshold='' contrail_fab_path='/opt/contrail/utils' +export SCRIPT_TS=${SCRIPT_TS:-$(date +"%Y_%m_%d_%H_%M_%S")} -if ! options=$(getopt -o VNnfuUsthdC:lLmF:T:c: -l virtual-env,no-virtual-env,no-site-packages,force,update,upload,sanity,parallel,help,debug,config:logging,logging-config,send-mail,features:tags:concurrency:contrail-fab-path: -- "$@") +if ! options=$(getopt -o pVNnfuUsthdC:lLmF:T:c: -l test-failure-threshold:,prepare,virtual-env,no-virtual-env,no-site-packages,force,update,upload,sanity,parallel,help,debug,config:,logging,logging-config,send-mail,features:,tags:,concurrency:,contrail-fab-path: -- "$@") then # parse error usage @@ -56,6 +70,7 @@ first_uu=yes while [ $# -gt 0 ]; do case "$1" in -h|--help) usage; exit;; + -p|--prepare) prepare; exit;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -n|--no-site-packages) no_site_packages=1;; @@ -73,26 +88,14 @@ while [ $# -gt 0 ]; do -m|--send-mail) send_mail=1;; -c|--concurrency) concurrency=$2; shift;; --contrail-fab-path) contrail_fab_path=$2; shift;; + --test-failure-threshold) failure_threshold=$2; shift;; --) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;; *) testrargs+=" $1";; esac shift done -#if [ -n $tags ];then -# testrargs+=$tags -#fi - -#export SCRIPT_TS=$(date +"%F_%T") -if [ -n "$config_file" ]; then - config_file=`readlink -f "$config_file"` - export TEST_CONFIG_DIR=`dirname "$config_file"` - export TEST_CONFIG_FILE=`basename "$config_file"` -fi - -if [ ! -f "$config_file" ]; then - python tools/configure.py $(readlink -f .) -p $contrail_fab_path -fi +prepare if [ $logging -eq 1 ]; then if [ ! -f "$logging_config" ]; then @@ -309,6 +312,21 @@ if [ $? -eq 0 ];then fi } +function stop_on_failure { + files='result*' + if [[ $failure_threshold ]];then + limit=$failure_threshold + result=`python tools/stop_on_fail.py --files ${files} --threshold ${limit}` + if [[ $result =~ 'Failures within limit' ]]; then + return 0 + else + return $(echo $a | awk '{printf "%d", $3}') + fi + fi + return 0 +} + + export PYTHONPATH=$PATH:$PWD/scripts:$PWD/fixtures:$PWD apply_patches export TEST_DELAY_FACTOR=${TEST_DELAY_FACTOR:-1} @@ -330,6 +348,10 @@ setup_tors if [[ ! -z $path ]];then for p in $path do + export REPORT_DETAILS_FILE=report_details_${SCRIPT_TS}.ini + echo $REPORT_DETAILS_FILE + export EMAIL_SUBJECT_PREFIX=$p + rm -rf result*.xml run_tests $p run_tests_serial $p python tools/report_gen.py $TEST_CONFIG_FILE $REPORT_DETAILS_FILE @@ -359,11 +381,21 @@ if [[ -z $path ]] && [[ -z $testrargs ]];then fi sleep 2 -python tools/report_gen.py $TEST_CONFIG_FILE $REPORT_DETAILS_FILE -generate_html +python tools/report_gen.py $TEST_CONFIG_FILE +echo "Generated report_details* file: $REPORT_DETAILS_FILE" +generate_html upload_to_web_server sleep 2 send_mail $TEST_CONFIG_FILE $REPORT_FILE $REPORT_DETAILS_FILE retval=$? - -exit $retval +stop_on_failure ; rv_stop_on_fail=$? +if [[ $rv_stop_on_fail > 0 ]]; then + exit $rv_stop_on_fail +else + # exit value more than 300 or so will revert the exit value in bash to a lower number, so checking that. + if [ $retval -lt 101 ]; then + exit $((100+$retval)) + else + exit $retval + fi +fi diff --git a/sanity-test-cases-details.txt b/sanity-test-cases-details.txt deleted file mode 100644 index e83e386c4..000000000 --- a/sanity-test-cases-details.txt +++ /dev/null @@ -1,584 +0,0 @@ - -List of Sanity Test Cases: -========================== - - 1.TEST : test_all_publishers_registered_to_discovery_service - ----------------------------------------------------------------------- - - Description:Validate all services are registered to discovery service - - Steps: - 1.Gets expected services to be published to discovery from testbed.py - 2.Gets actually published services to discovery from :5998/services.json - 3.Find out any diff between expected and actual list of publishers - fails test case if there is any diff - 4.Checkes all the published services are up from discovery - fails if any of them down - - Pass: Step 3 and 4 Should pass - - 2.TEST : test_agent_gets_control_nodes_from_discovery - ----------------------------------------------------------------------- - - Description:Validate agents subscribed to control node service - - Steps: - 1.Get all xmpp-clients from connected to a xmpp server from discovery - 2.From introspect of each of those xmpp-clients,verify if that client connected to the same xmpp server and connection established - - Pass : Step2 should pass - - 3. TEST : test_control_nodes_subscribed_to_ifmap_service - ----------------------------------------------------------------------- - - Description: Validate control nodes subscribed to ifmap service - - Steps: - 1.Verify that control-node subscribed to ifmap server and the get the ifmap server info from discovery - fails otherwise - 2.Go to control node introspect to verify if control node actually connected to that ifmap - fails otherwise - - Pass: Verification of step1 and 2 should pass. - - 4. TEST : test_agents_connected_to_collector_service - ----------------------------------------------------------------------- - Description: Validate agents subscribed to collector service - - Steps: - 1.Verify all agents subscribed to collector service from discovery. - - Pass: Setp 1 verification should pass. - - 5. TEST : test_vn_add_delete - ----------------------------------------------------------------------- - DESCRIPTION : Test to validate VN creation and deletion. - - Steps: - 1. Create VN with subnet - 2. Verify VN against control-node, collector and API - 3. Delete VN and verify - Pass criteria: Step 2 and 3 should pass. - - 6. TEST : test_vm_add_delete - ----------------------------------------------------------------------- - - Steps: - 1. Create a VN and launch VM within it - 2. Verify VN and VM against control-node, collector and API - 3. Delete VM & VN and verify - Pass criteria: Step 2 and 3 should pass - - - 7. TEST : test_sec_group_add_delete - ----------------------------------------------------------------------- - - DESCRIPTION : Verify security group add delete - - Steps: - 1. Create custom security group with rule in it - 2. Delete custom security group - Pass criteria: Step 1 and 2 should pass - - 8. TEST : test_vm_with_sec_group - ----------------------------------------------------------------------- - DESCRIPTION : Verify attach dettach security group in VM - - Steps: - 1. Create VN with subnet - 2. Create security group with custom rules - 3. Launch VM in custom created security group and verify - 4. Remove secuity group association with VM - 5. Add back custom security group to VM and verify - 6. Try to delete security group with association to VM. It should fail. - Pass criteria: Step 2,3,4,5 and 6 should pass - - 9. TEST : test_floating_ip - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate floating-ip Assignment to a VM. - - Steps: - - 1. Pick VN from resource pool which has VM'in it - 2. Create FIP pool for resource FIP VN fvn - 3. Associate FIP from pool to test VM and verify - 4. Ping to FIP from test VM - Pass criteria: Step 2,3 and 4 should pass - - - 10. TEST : test_ping_within_vn - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate Ping between two VMs within a VN. - - Steps: - 1. Pick VN from resource pool which has 2 VM's within it - 2. Verify VN & VM against control-node, collector and API - 3. Ping from one VM to another which are launched in same network - Pass criteria: Step 2 and 3 should pass - - 11. TEST : test_policy_to_deny - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate that with policy having rule to disable icmp within the VN, ping between VMs should fail - - Steps: - 1. Pick 2 VN from resource pool which have one VM in each - 2. Create policy with icmp deny rule - 3. Associate policy to both VN - 4. Ping from one VM to another. Ping should fail - Pass criteria: Step 2,3 and 4 should pass - - 12. TEST : test_remove_policy_with_ref - ----------------------------------------------------------------------- - TEST DESCRIPTION : This tests the following scenarios. - - Steps: - 1. Test to validate that policy removal will fail when it referenced with VN. - 2. validate vn_policy data in api-s against quantum-vn data, when created and unbind policy from VN thru quantum APIs. - 3. validate policy data in api-s against quantum-policy data, when created and deleted thru quantum APIs. - - - 13. TEST : test_ipam_add_delete - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate IPAM creation, association of a VN and creating VMs in the VN. Ping b/w the VMs should be successful. - - Steps: - 1. Create non-default IPAM - 2. Create VN with user-created IPAM and verify - 3. Launch 2 VM's within VN which is using non-default IPAM - 4. Ping between these 2 VM's - Pass criteria: Step 1,2,3 and 4 should pass - - 14. TEST : test_project_add_delete - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate that a new project can be added and deleted - 1. Create new tenant using keystone and verify it and default SG - 2. Delete tenant and verify - Pass criteria: Step 1 and 2 should pass - - 15. TEST : test_policy - ----------------------------------------------------------------------- - TEST DESCRIPTION : Configure policies based on topology and run policy related verifications. - 1. Create 4 virtual-networks - 2. Create multiple policy with different options and attach to networks - 3. Launch virtual-machines in virtual-network created - 4. Verify below items: - For each vn present in compute [vn has vm in compute] - -whats the expected policy list for the vn - -derive expected system rules for vn in vna - -get actual system rules for vn in vna - -compare - - 16. TEST : test_policy_modify_vn_policy - ----------------------------------------------------------------------- - TEST DESCRIPTION : This test verifies different policy attached to VN - - Steps: - 1. Create VN with two policy attached - 2. Launch instance in above network - 3. Verify configured policy in agent - 4. Try adding third policy and verify same in agent - 5. Verify unbind policy from network and verify - - 17. TEST : test_repeated_policy_modify - ----------------------------------------------------------------------- - TEST DESCRIPTION : Configure policies based on topology; Replace VN's existing policy - [same policy name but with different rule set] multiple times and verify. - Steps: - 1. Create network with 10 policy attached with 'X' rules specified - 2. Keeping policy name intact change rules to 'Y' - 3. Verify 'Y' rules in agent - - 18. TEST : test_multi_vn_repeated_policy_update_with_ping - ----------------------------------------------------------------------- - TEST DESCRIPTION : Call repeated_policy_update_test_with_ping with multi VN scenario - - Steps: - 1. Create 2 networks and launch single instance in each - 2. Create multiple policy with rules and attached to network - 3. Send ping and verify expected result - 4. Modify rules and verify ping result based on action - - 19. TEST : test_process_restart_in_policy_between_vns - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate that with policy having rule to check icmp fwding between - VMs on different VNs , ping between VMs should pass with process restarts - Steps: - 1. Pick 2 VN's from resource pool which has one VM each - 2. Create policy with icmp allow rule between those VN's and bind it networks - 3. Ping from one VM to another VM - 4. Restart process 'vrouter' and 'control' on setup - 5. Ping again between VM's after process restart - Pass criteria: Step 2,3,4 and 5 should pass - - 20. TEST : test_vm_file_trf_tftp_tests - ----------------------------------------------------------------------- - TEST DESCRIPTION: Test to validate File Transfer using tftp between VMs. Files of different sizes. - steps: - 1. Creating vm's - vm1 and vm2 and a Vn - vn222 - 2. Transfer file from vm1 to vm2 with diferrent file sizes using tftp - 3. file sizes - 1000,1101,1202,1303,1373, 1374,2210, 2845, 3000, 10000, 10000003 - 4. verify files present in vm2 match with the size of the file sent. - Pass criteria: File in vm2 should match with the transferred file size from vm1 - - 21. TEST : test_vm_file_trf_scp_tests - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate File Transfer using scp between VMs. Files of different sizes. - Steps: - 1. Creating vm's - vm1 and vm2 and a Vn - vn222 - 2. Transfer file from vm1 to vm2 with diferrent file sizes using scp - 3. file sizes - 1000,1101,1202,1303,1373, 1374,2210, 2845, 3000, 10000, 10000003 - 4. verify files present in vm2 match with the size of the file sent. - Pass criteria: File in vm2 should match with the transferred file size from vm1 - - 22. TEST : test_ping_on_broadcast_multicast - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate Ping on subnet broadcast,link local multucast,network broadcast. - - Steps: - 1. Create network and launch 4 instances - 2. On each ubuntu VM disable flag of icmp_echo_ignore_broadcasts - 3. Verify ping to VM metadata from corresponding compute nodes - 4. From VM ping to subnet broadcast IP and verify no loss - - 23. TEST : test_ping_within_vn_two_vms_two_different_subnets - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate Ping between two VMs within a VN-2 vms in 2 different subnets. - - Steps: - 1. Validate Ping between two VMs within a VN-2 vms in 2 different subnets. - 2. Validate ping to subnet broadcast not responded back by other vm - 3. Validate ping to network broadcast (all 255) is responded back by other vm - - 24. TEST : test_mx_gateway - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate floating-ip from a public pool assignment to a VM. - It creates a VM, assigns a FIP to it and pings to outside the cluster. - 1.Check env variable MX_GW_TEST is set to 1. This confirm the MX present in Setup. - 2.Create 2 Vns. One public100 and other vn200. VN public100 created with IP pool accessible from outside network. - 3.VM vm200 launched under vn200. - 4.VM vm200 get floating ip from public100 network - 5.Configure the control with MX peering if not present. - 6.Try to ping outside network and check connecivity - - Pass criteria: Step 6 should pass - - 25. TEST : test_change_of_rt_in_vn - ----------------------------------------------------------------------- - TEST DESCRIPTION : Verify the impact of change in route target of a vn - Steps: - 1.Test configuration is simillar with (test_mx_gateway) - 2.In this test, first configure the public100 VN with wrong route target value (Mismatch with MX) - 3.Check the communication outside virtual network cluster fails - 4.Modify the route target value(Matching with MX) - 5.Communication should pass - Pass criteria: Step 3 and 5 should pass. - - 26. TEST : test_control_node_switchover - ----------------------------------------------------------------------- - TEST DESCRIPTION : Stop the control node and check peering with agent fallback to other control node. - - Steps: - - 1. Pick one VN from respource pool which has 2 VM's in it - 2. Verify ping between VM's - 3. Find active control node in cluster by agent inspect - 4. Stop control service on active control node - 5. Verify agents are connected to new active control-node using xmpp connections - 6. Bring back control service on previous active node - 7. Verify ping between VM's again after bringing up control serveice - Pass criteria: Step 2,5 and 7 should pass - - 27. TEST : test_svc_monitor_datapath - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate the service chaining transparent/bridge datapath. - steps: - 1. Create two VN's and launch a VM on each VN - 2. Create transparent service template and service instance. - 3. Create a policy to allow traffic from VN1 to VN2 via/appy_service transparent serivce instacnce - 4. Send ICMP traffic from VN1 to VN2 - Pass criteria: Traffic should go through VN1 to VN2. - - 28. TEST : test_svc_in_network_datapath - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate the service chaining in network datapath. - steps: - 1. Create two VN's and launch a VM on each VN - 2. Create in-network service template and service instance. - 3. Create a policy to allow traffic from VN1 to VN2 via/appy_service in-network serivce instacnce - 4. Send ICMP traffic from VN1 to VN2 - Pass criteria: Traffic should go through VN1 to VN2 - - 29 TEST : test_svc_transparent_with_3_instance - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate the service chaining transparent/bridge datapath with 3 service instance. - steps: - 1. Create two VN's and launch a VM on each VN - 2. Create transparent service template and 3 service instance. - 3. Create a policy to allow traffic from VN1 to VN2 via/appy_service 3 transparent serivce instacnce - 4. Send ICMP traffic from VN1 to VN2 - Pass criteria: Traffic should go through VN1 to VN2. - - 30 TEST : test_process_restart_with_multiple_vn_vm - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate that multiple VM creation and deletion passes. - - Steps: - 1.Create 32 vn and 1 vm in each vn - 2.Restart vrouter service in each compute node - 3.Verify all vns /vms are fine after restart - fails otherwise - - 31 TEST : test_metadata_service - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate metadata service on VM creation. - - Steps: - 1.Verify from global-vrouter-config if metadata configures or not - fails otherwise - 2.Create a shell script which writes 'hello world ' in a file in /tmp and save the script on the nova api node - 3.Create a vm with userdata pointing to that script - script should get executed during vm boot up - 4.Go to the vm and verify if the file with 'hello world ' written saved in /tmp of the vm - fails otherwise - - 32 TEST : test_generic_link_local_service - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate generic linklocal service - running nova list from vm. - - Steps: - 1.Create generic link local service to be able to wget to jenkins - 2.Create a vm - 3.Try wget to jenkins - passes if successful else fails - - 33 TEST : test_dns_resolution_for_link_local_service - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to verify DNS resolution for link local service - Steps: - 1. Create instance - 2. Configure few link service using IP/DNS option - 3. Verify DNS resolution for services created - 4. Perform ssh,curl and wget operation using services - - 34 TEST : test_svc_mirroring - ----------------------------------------------------------------------- - - Validate the service chain mirroring - Test steps: - 1. Create the SI/ST in svc_mode specified. - 2. Create vn11/vm1, vn21/vm2 - 3. Create the policy rule for ICMP/UDP and attach to vn's - 4. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to the analyzer - 5. If its a single analyzer only ICMP(5 pkts) will be sent else ICMP and UDP traffic will be sent. - Pass criteria : - count = sent - single node : Pkts mirrored to the analyzer should be equal to 'count' - multinode :Pkts mirrored to the analyzer should be equal to '2xcount' - - 35 TEST : test_verify_generator_collector_connections - ----------------------------------------------------------------------- - TEST DESCRIPTION : Verify generator:module connections to collector - - Steps: - - 1.Verify all generators connected to collector - fails otherwise - 2.Get the xmpp peers in vrouter uve and get the active xmpp peer out of it - 3.Verify from agent introspect that active xmpp matches with step 2 - fails otherwise - 4.Get bgp peers from bgp-peer uve and verify from control node introspect that that matches - fails otherwise - - 36 TEST : test_apply_policy_fip_on_same_vn_gw_mx - ----------------------------------------------------------------------- - TEST DESCRIPTION : A particular VN is configure with policy to talk accross VN's and FIP to access outside - - Steps: - - 1. Set encap priority before starting test - 2. Create two networks and launch instance each - 3. Conifgure policy to allow traffic between networks - 4. Send ICMP traffic, verify traffic & encap type using tcpdump on compute - 5. Assign floating up to VM and check public connectivity from VM - - 37 TEST : test_vdns_ping_same_vn - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test vdns functionality. On VM launch agent should dynamically update dns records to dns agent - - Steps: - 1. Create vDNS server - 2. Create IPAM using above vDNS data - 3. Create VN using above IPAM and launch 2 VM's within it - 4. Ping between these 2 VM's using dns name - 5. Try to delete vDNS server which has IPAM back-ref[Negative case] - 6. Add CNAME VDNS record for vm1-test and - verify we able to ping by alias name - Pass criteria: Step 4,5 and 6 should pass - - 38 TEST : test_verify_object_logs - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate object logs - 1.Create vn/vm and verify object log tables updated with those vn/vm - fails otherwise - - 39 TEST : test_verify_flow_tables - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to validate flow tables - - Steps: - 1.Creat 2 vn and 1 vm in each vn - 2.Create policy between vns - 3.send 100 udp packets from vn1 to vn2 - 4.Verify in vrouter uve that active flow matches with the agent introspect - fails otherwise - 5.Query flowrecord table for the flow and verify packet count mtches 100 - fails otherwise - 6.Query flow series table or the flow and verify packet count mtches 100 - fails otherwise - - 40 TEST : test_create_delete_vpc - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate create VPC - - Steps: - 1.Create vpc - 2.Verify the created vpc - 3.Delete the vpc - 4.Verify the deleted vpc - - 41 TEST : test_subnet_create_delete - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate create subnet in vpc with valid CIDR - - Steps: - 1.Create a VPC and verify - 2.Add subnet to vpc and verify - - 42 TEST : test_ping_between_instances - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test ping between instances in subnet - - Steps: - 1.Create VPC with subnet - 2.Launch two instances - 3.Do a ping test b/w the two instances - - 43 TEST : test_acl_with_association - ----------------------------------------------------------------------- - TEST DESCRIPTION : Create ACL, associate it with a subnet, add and replace rules - - Steps: - 1.Create an ACL - 2 Associate it with a subnet - 3 Add acl rules and replace rules - 4 Do ping test with rules applied - - 44 TEST : test_security_group - ----------------------------------------------------------------------- - TEST DESCRIPTION : Create Security Groups, Add and Delete Rules - - Steps: - 1.Create security groups for VPC - 2.Add new rules to the security group - 3.Launch VM and do ping test to verify the rules - 4.Delete the rules - - 45 TEST : test_allocate_floating_ip - ----------------------------------------------------------------------- - TEST DESCRIPTION : Allocate a floating IP - - Steps: - 1.Create an FIP pool for VN -public under admin and launch an instance - 2.Launch instance under VPC - 3.Associate FIP to thie instance - 4.Ping test to and from FIP - - - 46 TEST : test_ecmp_svc_in_network_with_static_route_no_policy - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate service chaining in-network mode datapath having a static route entries of the either virtual - virtual networks pointing to the corresponding interfaces of the service instance. We will not configure any policy. - Test steps: - 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. - 2. Creating a service instance in in-network mode with 1 instance and left-interface - of the service instance sharing the IP and both the left and the right interfaces enabled for static route. - 3. Delete the policy. - 4. Checking for ping and tcp traffic between vm1 and vm2. - Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1. - - - 47 TEST : test_ecmp_svc_in_network_nat_with_3_instance - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate ECMP with service chaining in-network-nat mode datapath having service instance - - Test steps: - 1.Creating vm's - vm1 and vm2 in networks vn1 and vn2. - 2.Creating a service instance in transparent mode with 3 instances. - 3.Creating a service chain by applying the service instance as a service in a policy between the VNs. - 4.Checking for ping and bidirectional tcp traffic between vm1 and vm2. - Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1 and vice-versa. - - 48 TEST : test_ecmp_svc_transparent_with_3_instance - ----------------------------------------------------------------------- - TEST DESCRIPTION : Validate ECMP with service chaining transparent mode datapath having service instance - - Test steps: - 1.Creating vm's - vm1 and vm2 in networks vn1 and vn2. - 2.Creating a service instance in transparent mode with 3 instances. - 3.Creating a service chain by applying the service instance as a service in a policy between the VNs. - 4.Checking for ping and bidirectional tcp traffic between vm1 and vm2. - Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1 and vice-versa. - - 49 TEST : test_all - ----------------------------------------------------------------------- - TEST DESCRIPTION : - Steps: - 1.Create 2 users - alice ,bob as staff role - 2.Create a poject - myProj - 3 .Disable write on myProj and try to create a ipam on the project as user alice/bob - should fail ; else test fails - 4.Chage the permission on myProj to 777.Try to create ipam on myProj as bob/alice - should be successful; else test fails - 5.Disable read on myProj and try to read a ipam on the project as user alice/bob - should fail ; else test fails - 6.Disable write on myProj and try to delete a ipam on the project as user alice/bob - should fail ; else test fails - 7.Disable execute on a IPAM and try to link that ipam to a vn as user alice/bob - should fail ; else test fails - 8.On Ipam Set IPAM perms such that only owner has read/write permissions;try to read as other user;should fail;else test fails - 9.Set IPAM perms such that only owner has write permissions;try to update ipam with other user;should fail;else test fails - 10.Set IPAM perms such that owner/group has read/write permissions.try to update/read ipam with other user;should pass;else test fails - - 50 TEST : test_netperf_within_vn - ----------------------------------------------------------------------- - TEST DESCRIPTION : Check the throughput between the VM's within the same VN - - Steps: - - 1. Create VN and launch two instance within network - 2. Set CPU to highest performance in compute nodes before running test - 3. Run netperf command for fixed duration to find throughput - - 51 TEST : test_with_vxlan_l2_mode - ----------------------------------------------------------------------- - TEST DESCRIPTION : - Description: Verify IPv6 (non IP communication) between 2 VM which under a VN configured in L2 only mode - - Steps: - 1.VXLAN configured as highest encapsulation priority. - 2.Configured 2 VN . EVPN-MGMT-VN(configured with default l2-l3 mode ) and EVPN-L2-VN (configured with L2 only mode) - 3.Create 2 Vms. Both connected to all 2 VN. Connection with EVPN-MGMT-VN is only to access to VM - 4.Configured IPv6 address on interface which is connected L2 only vn - 5.Check the IPv6 communication between them. - - Pass criteria: Step 5 should pass - - 52 TEST : test_with_vxlan_encap_agent_restart - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test agent restart with VXLAN Encap - - Steps: - 1. Configure VXLAN as highest priority - 2. Configure 2 VM under a VN configured with l2-l3 mode - 3. Check IPV6 (non ip) communication between them - 4. Restart the contrail-grouter service. - 5. Again check the IPV6 (non ip) communication between them. - - 53 TEST : test_with_vxlan_encap_to_verify_l2_vm_file_trf_by_scp - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to verify scp of a file with vxlan encap - - Steps: - 1. Launch three vms dhcp-vm, vm1 and vm2 with 2 interfaces (l2-l3 and l2 only) - 2. For dhcp-vm configure dhcp-server on eth1 - 3. Bring up eth1 of other 2 vms and get ip from dhcp-server configured - 4. Do scp for different file sizes using this ip - - 54 TEST : test_with_vxlan_encap_to_verify_l2_vm_file_trf_by_tftp - ----------------------------------------------------------------------- - TEST DESCRIPTION : Test to verify tftp of a file with vxlan encap - - Steps: - 1. Launch three vms dhcp-vm, vm1 and vm2 with 2 interfaces (l2-l3 and l2 only) - 2. For dhcp-vm configure dhcp-server on eth1 - 3. Bring up eth1 of other 2 vms and get ip from dhcp-server configured - 4. Do tftp for different file sizes using this ip diff --git a/sanity_params.ini.sample b/sanity_params.ini.sample deleted file mode 100755 index 13f9b8f96..000000000 --- a/sanity_params.ini.sample +++ /dev/null @@ -1,121 +0,0 @@ -[Basic] -# Orchestrator could be openstack, vcenter -orchestrator=$__orch__ -# JSON file describing the test setup(individual hosts roles and credentials) -provFile=$__testbed_json_file__ - -# Specify the endpoint_type that will be used for the openstack client (publicURL/internalURL) -# endpoint_type=publicURL - -# Nova Keypair -key=$__nova_keypair_name__ - -# Admin tenant credentials -stackUser=$__stack_user__ -stackPassword=$__stack_password__ -stackTenant=$__stack_tenant__ -stackDomain=$__stack_domain__ - -# Keystone IP can be VIP ip if HA setup -auth_ip=$__auth_ip__ -auth_port=$__auth_port__ - -# To enable multi tenancy set the below to True, default False -multiTenancy=$__multi_tenancy__ - -# Set the default stack to test. Can be 'v4', 'v6' or 'dual' -AddressFamily=$__address_family__ - -# A short description about the test run. eg. "R2.0: Ubuntu havana high availability test" -logScenario=$__log_scenario__ - -# If you dont want fixture cleanups to remove the objects which are created as part of setUp of the fixture, set fixtureCleanup to 'no'. Default value should be 'yes'. If objects are already present before start of tests, they are not deleted. To clean them up forcefully, set fixtureCleanup to 'force' -fixtureCleanup=$__fixture_cleanup__ - -# Host IP for public network reachability tests -# Default is 10.204.216.50 -public_host= - -[WebServer] -# The URL to which the test log file and HTML report would be uploaded to. -# path is the local filesystem path to which the files will get copied to -# Ex: http://10.204.216.50/Docs/ -host=$__webserver__ -username=$__webserver_user__ -password=$__webserver_password__ - -# Absolute path in the webserver where the logs and reports has to be copied -logPath=$__webserver_log_dir__ -reportPath=$__webserver_report_dir__ -webRoot=$__webroot__ - -[Mail] -server=$__mail_server__ -port=$__mail_port__ -# Can specify multiple comma separated receiver mailIDs -mailTo=$__receiver_mail_id__ -mailSender=$__sender_mail_id__ - - -# If the test setup is behind proxy server then provide the url for the same -# eg: http://username:password@foo.bar:8080 -[proxy] -proxy_url=$__http_proxy__ - -[use_devicemanager_for_md5] -use_devicemanager_for_md5=$__use_devicemanager_for_md5__ - -[ui] -# Provide webui=True to run webui verification testcases, Default = False -webui=$__webui__ -# # # Provide horizon=True to run horizon verification testcases, Default = False -horizon=$__horizon__ -# # # Provide browser details for gui based testing, 'chrome' or 'firefox' or None -ui_browser=$__ui_browser__ -# # # if object creation has to be through gui set the config flag to 'contrail' or 'horizon' or False . default False -ui_config=$__ui_config__ -# # - -# Set the same to True if devstack env -[devstack] -devstack=$__devstack__ - - -[router] -#Route Target and ASN details -route_target=$__public_vn_rtgt__ -asn=$__router_asn__ - -#List of Router name and IP tuples -#eg: [('mx1', '1.1.1.1'), ('mx2', '1.1.1.2')] -router_info=$__router_name_ip_tuples__ - -# Floating IP pool subnet and name info -fip_pool=$__public_vn_subnet__ -fip_pool_name=$__public_vn_name__ -public_virtual_network=$__public_virtual_network__ -public_tenant_name=$__public_tenant_name__ - - -[HA] -# HA config -ha_setup=$__ha_setup__ -ipmi_username=$__ipmi_username__ -ipmi_password=$__ipmi_password__ - -[vcenter] -# VCenter configuration -vcenter_dc=$__vcenter_dc__ -vcenter_server=$__vcenter_server__ -vcenter_port=$__vcenter_port__ -vcenter_username=$__vcenter_username__ -vcenter_password=$__vcenter_password__ -vcenter_datacenter=$__vcenter_datacenter__ -vcenter_compute=$__vcenter_compute__ - -[debug] -# To pause the test execution, in pdb prompt, on failure set stop_on_fail to True, default False -stop_on_fail=$__stop_on_fail__ - -# set to False to skip verification of VM launch, default True -verify_on_setup=$__test_verify_on_setup__ diff --git a/sanity_testbed.json.sample b/sanity_testbed.json.sample deleted file mode 100644 index 9a19c8dd1..000000000 --- a/sanity_testbed.json.sample +++ /dev/null @@ -1,120 +0,0 @@ -{ - "hosts": [ - { - "control-ip": "10.204.216.63", - "data-ip": "10.204.216.63", - "ip": "10.204.216.63", - "name": "nodec6", - "password": "c0ntrail123", - "roles": [ - { - "type": "database" - }, - { - "type": "collector" - }, - { - "type": "webui" - } - ], - "username": "root" - }, - { - "control-ip": "10.204.216.64", - "data-ip": "10.204.216.64", - "ip": "10.204.216.64", - "name": "nodec7", - "password": "c0ntrail123", - "roles": [ - { - "type": "openstack" - }, - { - "type": "cfgm" - }, - { - "type": "bgp" - } - ], - "username": "root" - }, - { - "control-ip": "10.204.216.65", - "data-ip": "10.204.216.65", - "ip": "10.204.216.65", - "name": "nodec8", - "password": "c0ntrail123", - "roles": [ - { - "type": "cfgm" - }, - { - "type": "bgp" - } - ], - "username": "root" - }, - { - "control-ip": "10.204.216.66", - "data-ip": "10.204.216.66", - "ip": "10.204.216.66", - "name": "nodec9", - "password": "c0ntrail123", - "roles": [ - { - "type": "compute" - } - ], - "username": "root" - }, - { - "control-ip": "10.204.216.67", - "data-ip": "10.204.216.67", - "ip": "10.204.216.67", - "name": "nodec10", - "password": "c0ntrail123", - "roles": [ - { - "type": "compute" - } - ], - "username": "root" - } - ], - "vgw": [], - "hosts_ipmi": [{"10.204.216.67": "10.204.6.67", - "10.204.216.66": "10.204.6.66" - }], - "tor_agent": {"root@node6":[{ - "tor_ip":"10.204.217.191", - "tor_id":"1", - "tor_type":"ovs", - "tor_ovs_port":"9998", - "tor_ovs_protocol":"pssl", - "tor_tsn_ip":"192.168.192.2", - "tor_tsn_name":"nodec10", - "tor_name":"bng-contrail-qfx51-10", - "tor_tunnel_ip":"9.0.0.1", - "tor_vendor_name":"Juniper", - "tor_http_server_port": "9010"}, - ]}, - "physical_routers": {"bng-contrail-qfx51-10":{ - "vendor": "juniper", - "model" : "mx", - "asn" : "64512", - "name" : "bng-contrail-qfx51-10", - "ssh_username" : "root", - "ssh_password" : "password", - "mgmt_ip" : "10.204.217.191", - "tunnel_ip" : "7.0.0.1", - "ports" : [], - "type" : "router"}, - }, - "tor_hosts": {"10.204.217.213": [{ - "tor_port": "ge-0/0/42", - "host_port" : "em1", - "mgmt_ip" : "10.204.217.153", - "username" : "root", - "password" : "c0ntrail123"}, - ]} -} diff --git a/scripts/__init__.py b/scripts/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/analytics/__init__.py b/scripts/analytics/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/analytics/analytics_tests.py b/scripts/analytics/analytics_tests.py index 8c8b6f5f8..d6c5175a9 100644 --- a/scripts/analytics/analytics_tests.py +++ b/scripts/analytics/analytics_tests.py @@ -2235,7 +2235,7 @@ def join_threads(self,thread_objects=[]): def get_value_from_query_threads(self): while not self.que.empty(): - self.logger.info("******** Verifying resutlts *************") + self.logger.info("%%%%%%%% Verifying results %%%%%%%%%%%%%") try: assert self.que.get() except Exception as e: diff --git a/scripts/analytics/base.py b/scripts/analytics/base.py deleted file mode 100644 index d4a2e7a46..000000000 --- a/scripts/analytics/base.py +++ /dev/null @@ -1,185 +0,0 @@ -import test -from common import isolated_creds -from vn_test import * -from vm_test import * -import fixtures - -class AnalyticsBaseTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(AnalyticsBaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, cls.inputs, ini_file = cls.ini_file, logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.orch = cls.connections.orch - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.analytics_obj=cls.connections.analytics_obj - resource_class = cls.__name__ + 'Resource' - cls.res = ResourceFactory.createResource(resource_class) - #end setUpClass - - @classmethod - def tearDownClass(cls): - cls.res.cleanUp() - cls.isolated_creds.delete_tenant() - super(AnalyticsBaseTest, cls).tearDownClass() - #end tearDownClass - -class ResourceFactory: - factories = {} - def createResource(id): - if not ResourceFactory.factories.has_key(id): - ResourceFactory.factories[id] = \ - eval(id + '.Factory()') - return ResourceFactory.factories[id].create() - createResource = staticmethod(createResource) - - -class BaseResource(fixtures.Fixture): - - def setUp(self,inputs,connections): - super(BaseResource , self).setUp() - self.inputs = inputs - self.connections = connections - self.setup_common_objects(self.inputs , self.connections) - - def cleanUp(self): - super(BaseResource, self).cleanUp() - - def setup_common_objects(self, inputs , connections): - - self.inputs = inputs - self.connections = connections - (self.vn1_name, self.vn1_subnets)= ("vn1", ["192.168.1.0/24"]) - (self.vn2_name, self.vn2_subnets)= ("vn2", ["192.168.2.0/24"]) - (self.fip_vn_name, self.fip_vn_subnets)= ("fip_vn", ['100.1.1.0/24']) - (self.vn1_vm1_name, self.vn1_vm2_name)=( 'vn1_vm1', 'vn1_vm2') - self.vn2_vm1_name= 'vn2_vm1' - self.vn2_vm2_name= 'vn2_vm2' - self.fvn_vm1_name= 'fvn_vm1' - - # Configure 3 VNs, one of them being Floating-VN - self.vn1_fixture=self.useFixture( VNFixture(project_name= self.inputs.project_name, - connections= self.connections, inputs= self.inputs, - vn_name= self.vn1_name, subnets= self.vn1_subnets)) - - self.vn2_fixture=self.useFixture( VNFixture(project_name= self.inputs.project_name, - connections= self.connections, inputs= self.inputs, - vn_name= self.vn2_name, subnets= self.vn2_subnets)) - - self.fvn_fixture=self.useFixture( VNFixture(project_name= self.inputs.project_name, - connections= self.connections, inputs= self.inputs, - vn_name= self.fip_vn_name, subnets= self.fip_vn_subnets)) - - # Making sure VM falls on diffrent compute host - host_list = self.connections.orch.get_hosts() - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - # Configure 6 VMs in VN1, 1 VM in VN2, and 1 VM in FVN - self.vn1_vm1_fixture=self.useFixture(VMFixture(project_name= self.inputs.project_name, - connections= self.connections, vn_obj= self.vn1_fixture.obj, - vm_name= self.vn1_vm1_name,image_name='ubuntu-traffic', - flavor='contrail_flavor_medium', node_name=compute_1)) - - self.vn1_vm2_fixture=self.useFixture(VMFixture(project_name= self.inputs.project_name, - connections= self.connections, vn_obj= self.vn1_fixture.obj, - vm_name= self.vn1_vm2_name , image_name='ubuntu-traffic', - flavor='contrail_flavor_medium')) - - self.vn2_vm2_fixture=self.useFixture(VMFixture(project_name= self.inputs.project_name, - connections= self.connections, vn_obj= self.vn2_fixture.obj, - vm_name= self.vn2_vm2_name, image_name='ubuntu-traffic', flavor='contrail_flavor_medium', - node_name=compute_2)) -# - self.fvn_vm1_fixture=self.useFixture(VMFixture(project_name= self.inputs.project_name, - connections= self.connections, vn_obj= self.fvn_fixture.obj, - vm_name= self.fvn_vm1_name)) - - self.verify_common_objects() - #end setup_common_objects - - def verify_common_objects(self): - assert self.vn1_fixture.verify_on_setup() - assert self.vn2_fixture.verify_on_setup() - assert self.fvn_fixture.verify_on_setup() - assert self.vn1_vm1_fixture.verify_on_setup() - assert self.vn2_vm2_fixture.verify_on_setup() - #end verify_common_objects - -class AnalyticsTestSanityResource (BaseResource): - - def setUp(self,inputs,connections): - pass - #super(AnalyticsTestSanityResource , self).setUp(inputs,connections) - - def cleanUp(self): - pass - #super(AnalyticsTestSanityResource, self).cleanUp() - - class Factory: - def create(self): return AnalyticsTestSanityResource() - -class AnalyticsTestSanity1Resource (BaseResource): - - def setUp(self,inputs,connections): - pass - #super(AnalyticsTestSanity1Resource , self).setUp(inputs,connections) - - def cleanUp(self): - pass - #super(AnalyticsTestSanity1Resource, self).cleanUp() - - class Factory: - def create(self): return AnalyticsTestSanity1Resource() - - -class AnalyticsTestSanity2Resource (BaseResource): - - def setUp(self,inputs,connections): - pass - #super(AnalyticsTestSanity2Resource , self).setUp(inputs,connections) - - def cleanUp(self): - pass - #super(AnalyticsTestSanity2Resource, self).cleanUp() - - class Factory: - def create(self): return AnalyticsTestSanity2Resource() - -class AnalyticsTestSanity3Resource (BaseResource): - - def setUp(self,inputs,connections): - pass - #super(AnalyticsTestSanity3Resource , self).setUp(inputs,connections) - - def cleanUp(self): - pass - #super(AnalyticsTestSanity3Resource, self).cleanUp() - - class Factory: - def create(self): return AnalyticsTestSanity3Resource() - -class AnalyticsTestSanityWithResourceResource(BaseResource): - - def setUp(self,inputs,connections): - super(AnalyticsTestSanityWithResourceResource , self).setUp(inputs,connections) - - def cleanUp(self): - super(AnalyticsTestSanityWithResourceResource, self).cleanUp() - - class Factory: - def create(self): return AnalyticsTestSanityWithResourceResource() -#End resource - - diff --git a/scripts/analytics/test_analytics.py b/scripts/analytics/test_analytics.py index 4801b3179..658e145b1 100644 --- a/scripts/analytics/test_analytics.py +++ b/scripts/analytics/test_analytics.py @@ -40,7 +40,7 @@ def test_contrail_status(self): assert self.inputs.verify_state() return True - @test.attr(type=['sanity', 'vcenter']) + @test.attr(type=['vcenter']) @preposttest_wrapper def test_contrail_alarms(self): ''' Test to check if alarms are present @@ -51,6 +51,52 @@ def test_contrail_alarms(self): assert False, "alarms generated %s" % (alarms) return True + @test.attr(type=['vcenter']) + def test_cfgm_alarms(self): + ''' Test whether contrail config alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_cfgm_alarms() + return True + + @test.attr(type=['vcenter']) + def test_db_alarms(self): + ''' Test whether contrail database alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_db_alarms() + return True + + @test.attr(type=['vcenter']) + def test_analytics_alarms(self): + ''' Test whether contrail analytics alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_analytics_alarms() + return True + + @test.attr(type=['vcenter']) + def test_control_alarms(self): + ''' Test whether contrail control alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_control_alarms() + return True + + @test.attr(type=['vcenter']) + def test_vrouter_alarms(self): + ''' Test whether contrail vrouter alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_vrouter_alarms() + return True + + @preposttest_wrapper def test_bgprouter_uve_for_xmpp_and_bgp_peer_count(self): ''' Test bgp-router uve for active xmp/bgpp connections count @@ -94,82 +140,6 @@ def test_config_node_uve_states(self): assert result return True - @test.attr(type=['sanity', 'ci_sanity', 'vcenter']) - @preposttest_wrapper - def test_verify_object_logs(self): - ''' - Description: Test to validate object logs - 1.Create vn/vm and verify object log tables updated with those vn/vm - fails otherwise - Maintainer: sandipd@juniper.net - ''' - vn_name='vn22' - vn_subnets=['22.1.1.0/24'] - vm1_name='vm_test' - start_time=self.analytics_obj.getstarttime(self.inputs.cfgm_ip) - vn_fixture= self.useFixture(VNFixture(project_name= self.inputs.project_name, connections= self.connections, - vn_name=vn_name, inputs= self.inputs, subnets= vn_subnets)) - vn_obj= vn_fixture.obj - vm1_fixture= self.useFixture(VMFixture(connections= self.connections, - vn_obj=vn_obj, vm_name= vm1_name, project_name= self.inputs.project_name)) - #getting vm uuid - assert vm1_fixture.verify_on_setup() - vm_uuid=vm1_fixture.vm_id - self.logger.info("Waiting for logs to be updated in the database...") - time.sleep(20) - query='('+'ObjectId=%s)'%vn_fixture.vn_fq_name - result=True - self.logger.info("Verifying ObjectVNTable through opserver %s.."%(self.inputs.collector_ips[0])) - res2=self.analytics_obj.ops_inspect[self.inputs.collector_ips[0]].post_query('ObjectVNTable', - start_time=start_time,end_time='now' - ,select_fields=['ObjectId', 'Source', - 'ObjectLog', 'SystemLog','Messagetype', - 'ModuleId','MessageTS'], - where_clause=query) - self.logger.info("query output : %s"%(res2)) - if not res2: - st=self.analytics_obj.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database\ - (node= self.inputs.collector_names[0], module= 'QueryEngine',trace_buffer_name= 'QeTraceBuf') - self.logger.info("status: %s"%(st)) - assert res2 - - self.logger.info("Getting object logs for vm") - query='('+'ObjectId='+ vm_uuid +')' - self.logger.info("Verifying ObjectVMTable through opserver %s.."%(self.inputs.collector_ips[0])) - res1=self.analytics_obj.ops_inspect[self.inputs.collector_ips[0]].post_query('ObjectVMTable', - start_time=start_time,end_time='now' - ,select_fields=['ObjectId', 'Source', - 'ObjectLog', 'SystemLog','Messagetype', - 'ModuleId','MessageTS'], - where_clause=query) - self.logger.info("query output : %s"%(res1)) - if not res1: - st=self.analytics_obj.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database\ - (node= self.inputs.collector_names[0], module= 'QueryEngine',trace_buffer_name= 'QeTraceBuf') - self.logger.info("status: %s"%(st)) - assert res1 - - self.logger.info("Getting object logs for ObjectRoutingInstance table") -# object_id=self.inputs.project_fq_name[0]+':'+self.inputs.project_fq_name[1]+vn_name+':'+vn_name - object_id='%s:%s:%s:%s'%(self.inputs.project_fq_name[0],self.inputs.project_fq_name[1],vn_name,vn_name) -# query='('+'ObjectId=default-domain:admin:'+vn_name+')' - query='(ObjectId=%s)'%(object_id) - - self.logger.info("Verifying ObjectRoutingInstance through opserver %s.."%(self.inputs.collector_ips[0])) - res1=self.analytics_obj.ops_inspect[self.inputs.collector_ips[0]].post_query('ObjectRoutingInstance', - start_time=start_time,end_time='now' - ,select_fields=['ObjectId', 'Source', - 'ObjectLog', 'SystemLog','Messagetype', - 'ModuleId','MessageTS'], - where_clause=query) - self.logger.info("query output : %s"%(res1)) - if not res1: - self.logger.warn("ObjectRoutingInstance query did not return any output") - st=self.analytics_obj.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database\ - (node= self.inputs.collector_names[0], module= 'QueryEngine',trace_buffer_name= 'QeTraceBuf') - self.logger.info("status: %s"%(st)) - assert res1 - return True - @preposttest_wrapper def test_verify_hrefs(self): ''' Test all hrefs for collector/agents/bgp-routers etc @@ -285,7 +255,7 @@ def itest_object_tables_parallel_query(self): vm_fixture= self.useFixture(create_multiple_vn_and_multiple_vm_fixture (connections= self.connections, vn_name=vn_name, vm_name=vm1_name, inputs= self.inputs,project_name= self.inputs.project_name, subnets= vn_subnets,vn_count=vn_count_for_test,vm_count=1,subnet_count=1, - image_name='cirros-0.3.0-x86_64-uec',ram='512')) + image_name='cirros',ram='512')) compute_ip=[] time.sleep(100) @@ -390,14 +360,14 @@ def test_verify_generator_connections_to_collector_node(self): ''' self.analytics_obj.verify_generator_connection_to_collector() - @test.attr(type=['sanity']) @preposttest_wrapper def test_db_purge(self): ''' Test to db purge ''' + start_time = self.analytics_obj.getstarttime(self.inputs.collector_ip) purge_id = self.analytics_obj.get_purge_id(20) - assert self.analytics_obj.verify_purge_info_in_database_uve(purge_id) + assert self.analytics_obj.verify_purge_info_in_database_uve(purge_id,start_time) @test.attr(type=['sanity', 'vcenter']) @preposttest_wrapper @@ -407,11 +377,10 @@ def test_db_nodemgr_status(self): ''' assert self.analytics_obj.verify_database_process_running('contrail-database-nodemgr') - @test.attr(type=['sanity', 'vcenter']) @preposttest_wrapper def test_contrail_database_status(self): ''' Test to verify contrail database status ''' - assert self.analytics_obj.verify_database_process_running('contrail-database') + assert self.analytics_obj.verify_database_process_running('kafka') diff --git a/scripts/ceilometer_tests/base.py b/scripts/ceilometer_tests/base.py index 185eb6f65..a6b4a38e7 100644 --- a/scripts/ceilometer_tests/base.py +++ b/scripts/ceilometer_tests/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 from common import isolated_creds from vn_test import * from vm_test import * @@ -7,7 +7,7 @@ from common import create_public_vn from openstack import OpenstackAuth -class CeilometerBaseTest(test.BaseTestCase): +class CeilometerBaseTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): @@ -19,24 +19,21 @@ def setUpClass(cls): inst = cls() raise inst.skipTest( "Skipping Test.Ceilometer not enabled in the setup") - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, cls.inputs, ini_file = cls.ini_file, logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib cls.agent_inspect= cls.connections.agent_inspect cls.cn_inspect= cls.connections.cn_inspect cls.analytics_obj=cls.connections.analytics_obj + if cls.inputs.admin_username: + public_creds = cls.admin_isolated_creds + else: + public_creds = cls.isolated_creds cls.public_vn_obj = create_public_vn.PublicVn( - cls.__name__, - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) + public_creds, + cls.inputs, + ini_file=cls.ini_file, + logger=cls.logger) cls.public_vn_obj.configure_control_nodes() resource_class = cls.__name__ + 'Resource' @@ -46,7 +43,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.res.cleanUp() - cls.isolated_creds.delete_tenant() super(CeilometerBaseTest, cls).tearDownClass() #end tearDownClass @@ -110,6 +106,8 @@ def setup_common_objects(self, inputs , connections): assert self.vm1_fixture.verify_on_setup() # Adding further projects to floating IP. + if not getattr(self.public_vn_obj, 'fip_fixture', None): + return self.logger.info('Adding project %s to FIP pool %s' % (self.inputs.project_name, fip_pool_name)) project_obj = self.public_vn_obj.fip_fixture.assoc_project\ diff --git a/scripts/ceilometer_tests/test_ceilometer.py b/scripts/ceilometer_tests/test_ceilometer.py index 5fe28f85a..418191ac0 100644 --- a/scripts/ceilometer_tests/test_ceilometer.py +++ b/scripts/ceilometer_tests/test_ceilometer.py @@ -23,7 +23,7 @@ def setUpClass(cls): cls.cclient = ceilometer_client.CeilometerClient(cls.auth_url, cls.inputs.stack_user, cls.inputs.stack_password, - 'admin', + cls.inputs.project_name, cls.c_url, insecure = True) cls.cclient = cls.cclient.get_cclient() @@ -89,6 +89,12 @@ def test_sample_floating_ip_transmit_packets(self): Verifying ceilometer sample - ip.floating.transmit.bytes Verifying ceilometer sample - ip.floating.receive.bytes""" + if os.environ.get('MX_GW_TEST', 0) != '1': + self.logger.info( + "Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test") + raise self.skipTest( + "Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test") + self.logger.info('Sleeping for 1 mins for sample to be collected...') time.sleep(60) self.logger.info('Starting verification...') diff --git a/scripts/discovery_regression/__init__.py b/scripts/discovery_regression/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/discovery_regression/base.py b/scripts/discovery_regression/base.py deleted file mode 100644 index 826b0ec0b..000000000 --- a/scripts/discovery_regression/base.py +++ /dev/null @@ -1,32 +0,0 @@ -import test -from common.connections import ContrailConnections -from common import isolated_creds - -class BaseDiscoveryTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BaseDiscoveryTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.analytics_obj=cls.connections.analytics_obj - cls.ds_obj = cls.connections.ds_verification_obj - #end setUpClass - - @classmethod - def tearDownClass(cls): - cls.isolated_creds.delete_tenant() - super(BaseDiscoveryTest, cls).tearDownClass() - #end tearDownClass - diff --git a/scripts/discovery_regression/test_discovery.py b/scripts/discovery_regression/test_discovery.py index c7c4166aa..c256c07d8 100644 --- a/scripts/discovery_regression/test_discovery.py +++ b/scripts/discovery_regression/test_discovery.py @@ -25,40 +25,6 @@ def runTest(self): pass # end runTest - @test.attr(type=['sanity', 'ci_sanity', 'vcenter']) - @preposttest_wrapper - def test_all_publishers_registered_to_discovery_service(self): - ''' - Description:Validate all services are registered to discovery service - Steps: - 1.Gets expected services to be published to discovery from testbed.py - 2.Gets actually published services to discovery from :5998/services.json - 3.Find out any diff between expected and actual list of publishers - fails test case if there is any diff - 4.Checkes all the published services are up from discovery - fails if any of them down - Maintainer: sandipd@juniper.net - ''' - for ip in self.inputs.cfgm_ips: - self.logger.info("Verifying for ip %s" % (ip)) - assert self.ds_obj.verify_registered_services_to_discovery_service( - ip) - return True - - - @test.attr(type=['sanity', 'ci_sanity', 'vcenter']) - @preposttest_wrapper - def test_agent_gets_control_nodes_from_discovery(self): - ''' - Description:Validate agents subscribed to control node service - Steps: - 1.Get all xmpp-clients from connected to a xmpp server from discovery - 2.From introspect of each of those xmpp-clients,verify if that client connected to the same xmpp server and connection established- fails otherwise - Maintainer: sandipd@juniper.net - ''' - for ip in self.inputs.cfgm_ips: - self.logger.info("Verifying for ip %s" % (ip)) - assert self.ds_obj.verify_bgp_connection(ip) - return True - @preposttest_wrapper def test_agents_connected_to_dns_service(self): ''' Validate agents subscribed to dns service @@ -70,7 +36,6 @@ def test_agents_connected_to_dns_service(self): return True @test.attr(type=['sanity', 'vcenter']) - #@test.attr(type=['sanity', 'ci_sanity']) @preposttest_wrapper def test_agents_connected_to_collector_service(self): ''' @@ -106,22 +71,6 @@ def test_control_nodes_connected_to_collector_service(self): ip) return True - @test.attr(type=['sanity', 'ci_sanity', 'vcenter']) - @preposttest_wrapper - def test_control_nodes_subscribed_to_ifmap_service(self): - ''' - Description: Validate control nodes subscribed to ifmap service - 1.Verify that control-node subscribed to ifmap server and the get the ifmap server info from discovery - fails otherwise - 2.Go to control node introspect to verify if control node actually connected to that ifmap - fails otherwise - - Maintainer: sandipd@juniper.net - ''' - for ip in self.inputs.cfgm_ips: - self.logger.info("Verifying for ip %s" % (ip)) - assert self.ds_obj.verify_control_nodes_subscribed_to_ifmap_service( - ip) - return True - @preposttest_wrapper def test_dns_agents_subscribed_to_ifmap_service(self): ''' Validate dns agents subscribed to ifmap service @@ -168,56 +117,6 @@ def test_ServiceMonitor_subscribed_to_collector_service(self): ) return True - @preposttest_wrapper - def itest_control_node_restart_and_validate_status_of_the_service(self): - ''' Validate restart of control node services - - ''' - result = True - svc_lst = [] - svc_lst = self.ds_obj.get_all_control_services(self.inputs.cfgm_ip) - for elem in svc_lst: - if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'): - self.logger.info("Service %s is up" % (elem,)) - result = result and True - else: - self.logger.warn("Service %s is down" % (elem,)) - result = result and False - svc_lst.remove(elem) - # Stopping the control node service - for elem in svc_lst: - ip = elem[0] - self.logger.info("Stopping service %s.." % (elem,)) - self.inputs.stop_service('contrail-control', [ip]) - time.sleep(20) - for elem in svc_lst: - ip = elem[0] - if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'): - self.logger.warn("Service %s is still up" % (elem,)) - result = result and False - else: - self.logger.info("Service %s is down" % (elem,)) - result = result and True - # Starting the control node service - for elem in svc_lst: - ip = elem[0] - self.logger.info("Starting service %s.." % (elem,)) - self.inputs.start_service('contrail-control', [ip]) - time.sleep(6) - for elem in svc_lst: - ip = elem[0] - if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'): - self.logger.info( - "Service %s came up after service was started" % (elem,)) - result = result and True - else: - self.logger.info( - "Service %s is down even after service was started" % (elem,)) - result = result and False - - assert result - return True - @preposttest_wrapper def test_cleanup(self): ''' cleanup service from discovery @@ -229,21 +128,90 @@ def test_cleanup(self): @test.attr(type=['sanity', 'vcenter']) @preposttest_wrapper - def test_webui_subscribed_to_opserver_service(self): - ''' Validate webui subscribed to opserver service + def test_control_nodes_subscribed_to_ifmap_service(self): + ''' + Description: Validate control nodes subscribed to ifmap service + 1.Verify that control-node subscribed to ifmap server and the get the ifmap server info from discovery - fails otherwise + 2.Go to control node introspect to verify if control node actually connected to that ifmap - fails otherwise + Maintainer: sandipd@juniper.net ''' - assert self.ds_obj.verify_webui_subscribed_to_opserver_service( - ) + for ip in self.inputs.cfgm_ips: + self.logger.debug("Verifying for ip %s" % (ip)) + assert self.ds_obj.verify_control_nodes_subscribed_to_ifmap_service( + ip) return True - @test.attr(type=['sanity', 'vcenter']) @preposttest_wrapper - def test_webui_subscribed_to_apiserver_service(self): - ''' Validate webui subscribed to apiserver service - + def test_rule_create_delete(self): + ''' Validate rules get created and deleted successfully. + Also verify that created rules are found in the display. + Read all the rules together. + Steps: + 1. This test case creates multiple rules for Xmpp-Server and DNS-server + 2. Then it searches for the created rules to check if they are configured properly or not + 3. Read all the rules that are present. + 4. Delete all the configured rules. + 5. Search for the rules if they have been deleted properly or not. ''' - assert self.ds_obj.verify_webui_subscribed_to_apiserver_service( - ) - return True + result = True + ds_ip = self.ds_obj.inputs.cfgm_ip + if len(self.inputs.cfgm_control_ip) > 0: + self.logger.info("Creating rules corresponding to xmpp-server and dns-server running on all config nodes for vrouter agent running in same subnets") + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + self.ds_obj.discovery_rule_config("add_rule",\ + 'default-discovery-service-assignment', cfgm_control_ip,\ + 'xmpp-server', cfgm_control_ip, 'contrail-vrouter-agent:0') + self.ds_obj.discovery_rule_config("add_rule",\ + 'default-discovery-service-assignment', cfgm_control_ip,\ + 'dns-server', cfgm_control_ip, 'contrail-vrouter-agent:0') + self.ds_obj.read_rule('default-discovery-service-assignment') + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + result1 = self.ds_obj.discovery_rule_config("find_rule",\ + 'default-discovery-service-assignment',cfgm_control_ip,\ + 'xmpp-server', cfgm_control_ip,'contrail-vrouter-agent:0') + if result1 == False: + self.logger.error("While searching for the configured rule, it was not found. Configuration failed") + result = False + result2 = self.ds_obj.discovery_rule_config("find_rule",\ + 'default-discovery-service-assignment',cfgm_control_ip,\ + 'dns-server', cfgm_control_ip,'contrail-vrouter-agent:0') + if result2 == False: + self.logger.error("While searching for the configured rule, it was not found. Configuration failed") + result = False + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + self.ds_obj.discovery_rule_config('del_rule',\ + 'default-discovery-service-assignment', cfgm_control_ip,\ + 'xmpp-server', cfgm_control_ip,'contrail-vrouter-agent:0') + self.ds_obj.discovery_rule_config('del_rule',\ + 'default-discovery-service-assignment', cfgm_control_ip,\ + 'dns-server', cfgm_control_ip,'contrail-vrouter-agent:0') + self.ds_obj.read_rule("default-discovery-service-assignment") + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + result1 = self.ds_obj.discovery_rule_config("find_rule",\ + 'default-discovery-service-assignment',cfgm_control_ip,\ + 'xmpp-server', cfgm_control_ip,'contrail-vrouter-agent:0') + if result1 == True: + self.logger.error("While searching for the deleted rule, it was found. Deletion failed") + result = False + result2 = self.ds_obj.discovery_rule_config("find_rule",\ + 'default-discovery-service-assignment',cfgm_control_ip,\ + 'dns-server', cfgm_control_ip,'contrail-vrouter-agent:0') + if result2 == True: + self.logger.error("While searching for the deleted rule, it was found. Deletion failed") + result = False + assert result + # end TestDiscoveryFixture diff --git a/scripts/ecmp/base.py b/scripts/ecmp/base.py index 27a62d8da..96861ed6c 100644 --- a/scripts/ecmp/base.py +++ b/scripts/ecmp/base.py @@ -1,34 +1,23 @@ -import test +import test_v1 from common.connections import ContrailConnections from common import isolated_creds -class BaseECMPTest(test.BaseTestCase): +class BaseECMPTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseECMPTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib -# cls.logger= cls.inputs.logger - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.analytics_obj=cls.connections.analytics_obj - #end setUpClass + cls.vnc_lib = cls.connections.vnc_lib + cls.agent_inspect = cls.connections.agent_inspect + cls.orch = cls.connections.orch + cls.cn_inspect = cls.connections.cn_inspect + cls.analytics_obj = cls.connections.analytics_obj + # end setUpClass @classmethod def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseECMPTest, cls).tearDownClass() #end tearDownClass diff --git a/scripts/ecmp/sanity.py b/scripts/ecmp/sanity.py index e41dd9fbe..881a6a535 100644 --- a/scripts/ecmp/sanity.py +++ b/scripts/ecmp/sanity.py @@ -1589,7 +1589,7 @@ def test_ecmp_bw_two_vms_same_fip(self): # Get the project_fixture self.project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) # Read the project obj and set to the floating ip object. fip_obj.set_project(self.project_fixture.project_obj) diff --git a/scripts/ecmp/sanity_with_setup.py b/scripts/ecmp/sanity_with_setup.py index 102b0770c..3be25fb35 100644 --- a/scripts/ecmp/sanity_with_setup.py +++ b/scripts/ecmp/sanity_with_setup.py @@ -108,14 +108,14 @@ def test_ecmp_svc_in_network_with_static_route_no_policy(self): self.verify_traffic_flow(self.vm1_fixture, self.vm2_fixture) self.logger.info( - '***** Will Detach the policy from the networks and delete it *****') + '%%%%% Will Detach the policy from the networks and delete it %%%%%') self.detach_policy(self.vn1_policy_fix) self.detach_policy(self.vn2_policy_fix) self.unconfig_policy(self.policy_fixture) sleep(30) self.logger.info( - '***** Ping and traffic between the networks should go thru fine because of the static route configuration *****') + '%%%%% Ping and traffic between the networks should go thru fine because of the static route configuration %%%%%') assert self.vm1_fixture.ping_with_certainty(self.vm2_fixture.vm_ip) return True diff --git a/scripts/ecmp/test_ecmp.py b/scripts/ecmp/test_ecmp.py index deb04f2e3..149d47d58 100644 --- a/scripts/ecmp/test_ecmp.py +++ b/scripts/ecmp/test_ecmp.py @@ -53,10 +53,28 @@ def test_ecmp_svc_transparent_with_3_instance(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_transparent_datapath( - si_count=1, svc_scaling=True, max_inst=2, svc_img_name='tiny_trans_fw', ci=True) + si_count=1, svc_scaling=True, max_inst=2, svc_mode='transparent', ci=True) return True # end test_ecmp_svc_transparent_with_3_instance + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_ecmp_svc_v2_transparent_with_3_instance(self): + """ + Description: Validate ECMP with version 2 service chaining transparent mode datapath having service instance + Test steps: + 1.Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2.Creating a service instance in transparent mode with 3 instances. + 3.Creating a service chain by applying the service instance as a service in a policy between the VNs. + 4.Checking for ping and bidirectional tcp traffic between vm1 and vm2. + Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1 and vice-versa. + Maintainer : ganeshahv@juniper.net + """ + self.verify_svc_transparent_datapath( + si_count=1, svc_scaling=True, max_inst=2, svc_mode='transparent', ci=True, st_version=2) + return True + # end test_ecmp_svc_v2_transparent_with_3_instance + @test.attr(type=['sanity']) @preposttest_wrapper def test_ecmp_svc_in_network_with_3_instance(self): @@ -74,7 +92,7 @@ def test_ecmp_svc_in_network_with_3_instance(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_in_network_datapath( - si_count=1, svc_scaling=True, max_inst=3) + si_count=1, svc_scaling=True, max_inst=3, svc_mode='in-network') svm_ids = self.si_fixtures[0].svm_ids self.get_rt_info_tap_intf_list( self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) @@ -99,10 +117,19 @@ def test_ecmp_svc_in_network_with_static_route_no_policy(self): Maintainer : ganeshahv@juniper.net """ - vn1_subnet_list = ['100.1.1.0/24'] - vn2_subnet_list = ['200.1.1.0/24'] - self.verify_svc_in_network_datapath(si_count=1, svc_scaling=True, max_inst=1, static_route=[ - 'None', vn2_subnet_list[0], vn1_subnet_list[0]], vn1_subnets=vn1_subnet_list, vn2_subnets=vn2_subnet_list) + vn1_subnet_list = [] + vn2_subnet_list = [] + static_route = None + if self.inputs.get_af() == 'v6': + vn1_subnet_list += ['2100::/64'] + vn2_subnet_list += ['2200::/64'] + static_route = ['None', vn2_subnet_list[0], vn1_subnet_list[0]] + vn1_subnet_list += ['100.1.1.0/24'] + vn2_subnet_list += ['200.1.1.0/24'] + if not static_route: + static_route = ['None', vn2_subnet_list[0], vn1_subnet_list[0]] + self.verify_svc_in_network_datapath(si_count=1, svc_mode='in-network', svc_scaling=True, max_inst=1, static_route=static_route, + vn1_subnets=vn1_subnet_list, vn2_subnets=vn2_subnet_list) svm_ids = self.si_fixtures[0].svm_ids self.get_rt_info_tap_intf_list( self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) @@ -110,13 +137,13 @@ def test_ecmp_svc_in_network_with_static_route_no_policy(self): self.verify_traffic_flow( self.vm1_fixture, dst_vm_list, self.si_fixtures[0], self.vn1_fixture) self.logger.info( - '***** Will Detach the policy from the networks and delete it *****') + '%%%%% Will Detach the policy from the networks and delete it %%%%%') self.detach_policy(self.vn1_policy_fix) self.detach_policy(self.vn2_policy_fix) self.unconfig_policy(self.policy_fixture) sleep(30) self.logger.info( - '***** Ping and traffic between the networks should go thru fine because of the static route configuration *****') + '%%%%% Ping and traffic between the networks should go thru fine because of the static route configuration %%%%%') assert self.vm1_fixture.ping_with_certainty(self.vm2_fixture.vm_ip) # Cleaning up @@ -153,7 +180,7 @@ def test_ecmp_in_pol_based_svc(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_transparent_datapath( - si_count=1, svc_scaling=True, max_inst=3, proto='tcp') + svc_mode='transparent', si_count=1, svc_scaling=True, max_inst=3, proto='tcp') self.vm1_fixture.put_pub_key_to_vm() self.vm2_fixture.put_pub_key_to_vm() # TFTP from Left VM to Right VM is expected to fail @@ -186,7 +213,7 @@ def test_ecmp_in_pol_based_svc_pol_update(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_transparent_datapath( - si_count=1, svc_scaling=True, max_inst=3, proto='tcp') + svc_mode='transparent', si_count=1, svc_scaling=True, max_inst=3, proto='tcp') self.vm1_fixture.put_pub_key_to_vm() self.vm2_fixture.put_pub_key_to_vm() # TFTP from Left VM to Right VM is expected to fail @@ -229,7 +256,7 @@ def test_ecmp_in_pol_based_svc_pol_update(self): @preposttest_wrapper def test_multi_SC_with_ecmp(self): """ - Description: Validate Multiple Service Instances with ECMP. + Description: Validate Multiple Service Instances with ECMP. Test steps: 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. 2. Creating 3 service instances in transparent mode with 3 instances each. @@ -241,10 +268,30 @@ def test_multi_SC_with_ecmp(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_transparent_datapath( - si_count=3, svc_scaling=True, max_inst=3) + svc_mode='transparent', si_count=3, svc_scaling=True, max_inst=3) return True # end test_multi_SC_with_ecmp + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_ecmp_svc_v2_in_network_nat_with_3_instance(self): + """ + Description: Validate ECMP with v2 service chaining in-network-nat mode datapath having service instance + Test steps: + 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2. Creating a service instance in in-network-nat mode with 3 instances and + left-interface of the service instances sharing the IP and enabled for static route. + + 3. Creating a service chain by applying the service instance as a service in a policy between the VNs. + 4. Checking for ping and tcp traffic between vm1 and vm2. + Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1. + Maintainer : ganeshahv@juniper.net + """ + self.verify_svc_in_network_datapath( + si_count=1, svc_scaling=True, max_inst=2, svc_mode='in-network-nat', ci=True, st_version=2) + return True + # end test_ecmp_svc_v2_in_network_nat_with_3_instance + @test.attr(type=['ci_sanity_WIP']) @preposttest_wrapper def test_ecmp_svc_in_network_nat_with_3_instance(self): @@ -261,7 +308,7 @@ def test_ecmp_svc_in_network_nat_with_3_instance(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_in_network_datapath( - si_count=1, svc_scaling=True, max_inst=2, svc_mode='in-network-nat', svc_img_name='tiny_nat_fw', ci=True) + si_count=1, svc_scaling=True, max_inst=2, svc_mode='in-network-nat', ci=True) return True # end test_ecmp_svc_in_network_nat_with_3_instance @@ -274,7 +321,7 @@ def test_ecmp_svc_in_network_with_3_instance_add_flows(self): 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. 2. Creating a service instance in in-network-nat mode with 3 instances and left-interface of the service instances sharing the IP and enabled for static route. - 3. Start traffic and and more flows. + 3. Start traffic and and more flows. 4. Creating a service chain by applying the service instance as a service in a policy b etween the VNs. 5. Checking for ping and tcp traffic between vm1 and vm2. @@ -283,7 +330,7 @@ def test_ecmp_svc_in_network_with_3_instance_add_flows(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_in_network_datapath( - si_count=1, svc_scaling=True, max_inst=3) + svc_mode='in-network', si_count=1, svc_scaling=True, max_inst=3) svm_ids = self.si_fixtures[0].svm_ids self.get_rt_info_tap_intf_list( self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) @@ -291,13 +338,13 @@ def test_ecmp_svc_in_network_with_3_instance_add_flows(self): for vm in vm_list: vm.install_pkg("Traffic") old_stream1 = Stream( - protocol="ip", proto="icmp", src=self.vm1_fixture.vm_ip, + proto="icmp", src=self.vm1_fixture.vm_ip, dst=self.vm2_fixture.vm_ip, sport=unicode(10000), dport=unicode(11000)) old_stream2 = Stream( - protocol="ip", proto="udp", src=self.vm1_fixture.vm_ip, + proto="udp", src=self.vm1_fixture.vm_ip, dst=self.vm2_fixture.vm_ip, sport=unicode(10000), dport=unicode(11000)) old_stream3 = Stream( - protocol="ip", proto="tcp", src=self.vm1_fixture.vm_ip, + proto="tcp", src=self.vm1_fixture.vm_ip, dst=self.vm2_fixture.vm_ip, sport=unicode(10000), dport=unicode(11000)) self.old_stream_list = [old_stream1, old_stream2, old_stream3] @@ -325,7 +372,7 @@ def test_ecmp_svc_in_network_with_3_instance_diff_proto(self): 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. 2. Creating a service instance in in-network-nat mode with 3 instances and left-interface of the service instances sharing the IP and enabled for static route. - 3. Start traffic and send 3 different protocol traffic to the same destination. + 3. Start traffic and send 3 different protocol traffic to the same destination. 4. Creating a service chain by applying the service instance as a service in a policy b etween the VNs. 5. Checking for ping and tcp traffic between vm1 and vm2. @@ -334,7 +381,7 @@ def test_ecmp_svc_in_network_with_3_instance_diff_proto(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_in_network_datapath( - si_count=1, svc_scaling=True, max_inst=3) + svc_mode='in-network', si_count=1, svc_scaling=True, max_inst=3) svm_ids = self.si_fixtures[0].svm_ids self.get_rt_info_tap_intf_list( self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) @@ -343,13 +390,13 @@ def test_ecmp_svc_in_network_with_3_instance_diff_proto(self): vm.install_pkg("Traffic") stream1 = Stream( - protocol="ip", proto="icmp", src=self.vm1_fixture.vm_ip, + proto="icmp", src=self.vm1_fixture.vm_ip, dst=self.vm2_fixture.vm_ip, sport=unicode(8000), dport=unicode(9000)) stream2 = Stream( - protocol="ip", proto="udp", src=self.vm1_fixture.vm_ip, + proto="udp", src=self.vm1_fixture.vm_ip, dst=self.vm2_fixture.vm_ip, sport=unicode(8000), dport=unicode(9000)) stream3 = Stream( - protocol="ip", proto="tcp", src=self.vm1_fixture.vm_ip, + proto="tcp", src=self.vm1_fixture.vm_ip, dst=self.vm2_fixture.vm_ip, sport=unicode(8000), dport=unicode(9000)) self.stream_list = [stream1, stream2, stream3] @@ -386,7 +433,7 @@ def test_ecmp_svc_in_network_with_3_instance_incr_dip(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_in_network_datapath( - si_count=1, svc_scaling=True, max_inst=3) + svc_mode='in-network', si_count=1, svc_scaling=True, max_inst=3) svm_ids = self.si_fixtures[0].svm_ids self.get_rt_info_tap_intf_list( self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) @@ -405,13 +452,13 @@ def test_ecmp_svc_in_network_with_3_instance_incr_dip(self): vm.install_pkg("Traffic") stream1 = Stream( - protocol="ip", proto="udp", src=self.vm1_fixture.vm_ip, + proto="udp", src=self.vm1_fixture.vm_ip, dst=self.vm2_fixture.vm_ip, sport=unicode(8000), dport=unicode(9000)) stream2 = Stream( - protocol="ip", proto="udp", src=self.vm1_fixture.vm_ip, + proto="udp", src=self.vm1_fixture.vm_ip, dst=dest_vm2.vm_ip, sport=unicode(8000), dport=unicode(9000)) stream3 = Stream( - protocol="ip", proto="udp", src=self.vm1_fixture.vm_ip, + proto="udp", src=self.vm1_fixture.vm_ip, dst=dest_vm3.vm_ip, sport=unicode(8000), dport=unicode(9000)) self.stream_list = [stream1, stream2, stream3] @@ -445,7 +492,7 @@ def test_ecmp_svc_in_network_with_policy_bind_unbind(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_in_network_datapath( - si_count=1, svc_scaling=True, max_inst=3) + svc_mode='in-network', si_count=1, svc_scaling=True, max_inst=3) svm_ids = self.si_fixtures[0].svm_ids self.get_rt_info_tap_intf_list( self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) @@ -534,7 +581,7 @@ def test_ecmp_with_svc_with_fip_dest(self): # Get the project_fixture self.project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) # Read the project obj and set to the floating ip object. self.fip_obj.set_project(self.project_fixture.project_obj) @@ -619,11 +666,11 @@ def test_ecmp_bw_three_vms_same_fip_incr_sport(self): """ self.setup_common_objects() vm_list = [self.vm1, self.vm2, self.vm3] - stream1 = Stream(protocol="ip", proto="udp", src=self.fvn_vm1.vm_ip, + stream1 = Stream(proto="udp", src=self.fvn_vm1.vm_ip, dst=self.my_fip, sport=unicode(8000), dport=self.dport1) - stream2 = Stream(protocol="ip", proto="udp", src=self.fvn_vm1.vm_ip, + stream2 = Stream(proto="udp", src=self.fvn_vm1.vm_ip, dst=self.my_fip, sport=unicode(11000), dport=self.dport1) - stream3 = Stream(protocol="ip", proto="udp", src=self.fvn_vm1.vm_ip, + stream3 = Stream(proto="udp", src=self.fvn_vm1.vm_ip, dst=self.my_fip, sport=unicode(12000), dport=self.dport1) stream_list = [stream1, stream2, stream3] @@ -650,11 +697,11 @@ def test_ecmp_bw_three_vms_same_fip_incr_sip(self): """ self.setup_common_objects() vm_list = [self.vm1, self.vm2, self.vm3] - stream1 = Stream(protocol="ip", proto="udp", src=self.fvn_vm1.vm_ip, + stream1 = Stream(proto="udp", src=self.fvn_vm1.vm_ip, dst=self.my_fip, sport=self.udp_src, dport=self.dport1) - stream2 = Stream(protocol="ip", proto="udp", src=self.fvn_vm2.vm_ip, + stream2 = Stream(proto="udp", src=self.fvn_vm2.vm_ip, dst=self.my_fip, sport=self.udp_src, dport=self.dport1) - stream3 = Stream(protocol="ip", proto="udp", src=self.fvn_vm3.vm_ip, + stream3 = Stream(proto="udp", src=self.fvn_vm3.vm_ip, dst=self.my_fip, sport=self.udp_src, dport=self.dport1) stream_list = [stream1, stream2, stream3] @@ -690,8 +737,8 @@ def test_ecmp_with_svm_deletion(self): Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1 and vice-versa. Maintainer : ganeshahv@juniper.net """ - self.verify_svc_transparent_datapath( - si_count=1, svc_scaling=True, max_inst=3) + self.verify_svc_in_network_datapath( + svc_mode='in-network', si_count=1, svc_scaling=True, max_inst=3) svms = self.get_svms_in_si( self.si_fixtures[0], self.inputs.project_name) self.logger.info('The Service VMs in the Service Instance %s are %s' % ( @@ -704,13 +751,16 @@ def test_ecmp_with_svm_deletion(self): self.vm1_fixture, dst_vm_list, self.vm1_fixture.vm_ip, self.vm2_fixture.vm_ip) self.sender, self.receiver = self.start_traffic( self.vm1_fixture, dst_vm_list, self.stream_list, self.vm1_fixture.vm_ip, self.vm2_fixture.vm_ip) - self.verify_flow_thru_si(self.si_fixtures[0]) + self.verify_flow_thru_si(self.si_fixtures[0], self.vn1_fixture) while(len(svms) > 1): - self.logger.info('Will reduce the SVM count to %s' %(len(svms)-1)) - si_id = self.vnc_lib.service_instances_list()['service-instances'][0]['uuid'] - si_obj = self.vnc_lib.service_instance_read(id=si_id) + old_count = len(svms) + self.logger.info( + 'Will reduce the SVM count from %s to %s' % (old_count, len(svms) - 1)) + si_obj = self.vnc_lib.service_instance_read( + fq_name=self.si_fixtures[0].si_fq_name) si_prop = si_obj.get_service_instance_properties() - scale_out = my_vnc_api.ServiceScaleOutType(max_instances=(len(svms)-1)) + scale_out = my_vnc_api.ServiceScaleOutType( + max_instances=(len(svms) - 1)) si_prop.set_scale_out(scale_out) si_obj.set_service_instance_properties(si_prop) self.vnc_lib.service_instance_update(si_obj) @@ -721,11 +771,19 @@ def test_ecmp_with_svm_deletion(self): svms = sorted(set(svms)) if None in svms: svms.remove(None) + new_count = len(svms) + errmsg = 'The SVMs count has not decreased' + assert new_count < old_count, errmsg self.logger.info('The Service VMs in the Service Instance %s are %s' % ( self.si_fixtures[0].si_name, svms)) + svm_ids = [] + for svm in svms: + svm_ids.append(svm.id) + self.get_rt_info_tap_intf_list( + self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) self.verify_flow_records( self.vm1_fixture, self.vm1_fixture.vm_ip, self.vm2_fixture.vm_ip) - self.verify_flow_thru_si(self.si_fixtures[0]) + self.verify_flow_thru_si(self.si_fixtures[0], self.vn1_fixture) return True # end test_ecmp_with_svm_deletion @@ -743,7 +801,7 @@ def test_ecmp_with_svm_suspend_start(self): Maintainer : ganeshahv@juniper.net """ self.verify_svc_transparent_datapath( - si_count=1, svc_scaling=True, max_inst=3) + svc_mode='transparent', si_count=1, svc_scaling=True, max_inst=3) svms = self.get_svms_in_si( self.si_fixtures[0], self.inputs.project_name) self.logger.info('The Service VMs in the Service Instance %s are %s' % ( @@ -759,7 +817,7 @@ def test_ecmp_with_svm_suspend_start(self): self.verify_flow_thru_si(self.si_fixtures[0]) self.logger.info( - '****** Will suspend the SVMs and check traffic flow ******') + '%%%%%% Will suspend the SVMs and check traffic flow %%%%%%') for i in range(len(svms) - 1): self.logger.info('Will Suspend SVM %s' % svms[i].name) svms[i].suspend() @@ -769,7 +827,7 @@ def test_ecmp_with_svm_suspend_start(self): self.verify_flow_thru_si(self.si_fixtures[0]) self.logger.info( - '****** Will resume the suspended SVMs and check traffic flow ******') + '%%%%%% Will resume the suspended SVMs and check traffic flow %%%%%%') for i in range(len(svms)): svms = self.get_svms_in_si( self.si_fixtures[0], self.inputs.project_name) @@ -812,11 +870,37 @@ def test_three_stage_SC(self): Pass criteria: Ping between the VMs should be successful. Maintainer : ganeshahv@juniper.net """ - self.verify_multi_inline_svc( - si_list=[('bridge', 1), ('in-net', 1), ('nat', 1)]) + si_list = [ + ('transparent', 1), ('in-network', 1), ('in-network-nat', 1)] + if self.inputs.get_af() == 'v6': + si_list = [('transparent', 1), ('in-network', 1)] + self.verify_multi_inline_svc(si_list=si_list) return True # end test_three_stage_SC + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_three_stage_v2_SC(self): + """ + Description: Validate multi-Inline SVC version 2. + Test steps: + 1.Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2.Creating 3 service instances. + 3.Creating a service chain by applying the 3 service instances in a policy between t + he VNs. + 4.There should be no traffic loss. + Pass criteria: Ping between the VMs should be successful. + Maintainer : ganeshahv@juniper.net + """ + if self.inputs.orchestrator == 'vcenter': + self.verify_multi_inline_svc( + si_list=[('in-network', 1), ('in-network-nat', 1)], st_version=2) + else: + self.verify_multi_inline_svc( + si_list=[('transparent', 1), ('in-network-nat', 1)], st_version=2) + return True + # end test_three_stage_v2_SC + @preposttest_wrapper def test_three_stage_SC_with_ECMP(self): """ @@ -831,8 +915,11 @@ def test_three_stage_SC_with_ECMP(self): from vm1 and vice-versa. Maintainer : ganeshahv@juniper.net """ - self.verify_multi_inline_svc( - si_list=[('bridge', 2), ('in-net', 2), ('nat', 2)]) + si_list = [ + ('transparent', 2), ('in-network', 2), ('in-network-nat', 2)] + if self.inputs.get_af() == 'v6': + si_list = [('transparent', 2), ('in-network', 2)] + self.verify_multi_inline_svc(si_list=si_list) return True # end test_three_stage_SC_with_ECMP @@ -850,8 +937,11 @@ def test_three_stage_SC_with_traffic(self): from vm1 and vice-versa. Maintainer : ganeshahv@juniper.net """ - self.verify_multi_inline_svc( - si_list=[('in-net', 2), ('bridge', 2), ('nat', 2)]) + si_list = [ + ('transparent', 2), ('in-network', 2), ('in-network-nat', 2)] + if self.inputs.get_af() == 'v6': + si_list = [('transparent', 2), ('in-network', 2)] + self.verify_multi_inline_svc(si_list=si_list) tap_list = [] si_list = self.si_list svm_ids = self.si_fixtures[0].svm_ids @@ -863,3 +953,472 @@ def test_three_stage_SC_with_traffic(self): return True # end test_three_stage_SC_with_traffic + +class TestECMPSanityIPv6(TestECMPSanity): + + @classmethod + def setUpClass(cls): + super(TestECMPSanityIPv6, cls).setUpClass() + cls.inputs.set_af('v6') + +class TestECMPFeatureIPv6(TestECMPFeature): + + @classmethod + def setUpClass(cls): + super(TestECMPFeatureIPv6, cls).setUpClass() + cls.inputs.set_af('v6') + +class TestECMPwithSVMChangeIPv6(TestECMPwithSVMChange): + + @classmethod + def setUpClass(cls): + super(TestECMPwithSVMChangeIPv6, cls).setUpClass() + cls.inputs.set_af('v6') + +class TestMultiInlineSVCIPv6(TestMultiInlineSVC): + + @classmethod + def setUpClass(cls): + super(TestMultiInlineSVCIPv6, cls).setUpClass() + cls.inputs.set_af('v6') + +class TestECMPConfigHashFeature(BaseECMPTest, VerifySvcFirewall, ECMPSolnSetup, ECMPTraffic, ECMPVerify): + + @classmethod + def setUpClass(cls): + super(TestECMPConfigHashFeature, cls).setUpClass() + + def setUp(self): + super(TestECMPConfigHashFeature, self).setUp() + # end setUp + + @test.attr(type=['ci_sanity_WIP', 'sanity']) + @preposttest_wrapper + def test_ecmp_hash_src_ip(self): + """ + Validates ecmp hash when only source ip is configured + Maintainer : cmallam@juniper.net + """ + # Bringing up the basic service chain setup. + max_inst = 2 + svc_mode = 'in-network-nat' + st_version = 2 + ecmp_hash = 'default' + config_level = "vn" + + ret_dict = self.setup_ecmp_config_hash_svc(si_count=1, svc_scaling=True, + max_inst=max_inst, svc_mode=svc_mode, + st_version=st_version, + ecmp_hash=ecmp_hash, + config_level=config_level) + + left_vn_fixture = self.vn1_fixture + right_vn_fixture = self.vn2_fixture + left_vm_fixture = self.vm1_fixture + right_vm_fixture = self.vm2_fixture + + # ECMP Hash with only "source_ip" + ecmp_hash = {"source_ip": True} + config_level = "vn" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + + # Verify ECMP Hash at Agent and control node + self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture, + left_vm_fixture=left_vm_fixture, + right_vm_fixture=right_vm_fixture) + + # Verify traffic from vn1 (left) to vn2 (right), with user specified + # flow count + flow_count = 5 + dst_vm_list = [right_vm_fixture] + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + return True + # end test_ecmp_hash_src_ip + + def test_ecmp_hash_dest_ip(self): + """ + Validates ecmp hash when only destination ip is configured + Maintainer : cmallam@juniper.net + """ + # Bringing up the basic service chain setup. + max_inst = 2 + svc_mode = 'in-network-nat' + st_version = 2 + ecmp_hash = 'default' + config_level = "vn" + + ret_dict = self.setup_ecmp_config_hash_svc(si_count=1, svc_scaling=True, + max_inst=max_inst, svc_mode=svc_mode, + st_version=st_version, + ecmp_hash=ecmp_hash, + config_level=config_level) + + left_vn_fixture = self.vn1_fixture + right_vn_fixture = self.vn2_fixture + left_vm_fixture = self.vm1_fixture + right_vm_fixture = self.vm2_fixture + + # ECMP Hash with only "destination_ip" + ecmp_hash = {"destination_ip": True} + config_level = "vn" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + # Verify ECMP Hash at Agent and control node + self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture, + left_vm_fixture=left_vm_fixture, + right_vm_fixture=right_vm_fixture) + + # Verify traffic from vn1 (left) to vn2 (right), with user specified + # flow count + flow_count = 5 + dst_vm_list = [right_vm_fixture] + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + return True + # end test_ecmp_hash_dest_ip + + def test_ecmp_hash_src_port(self): + """ + Validates ecmp hash when only source port is configured + Maintainer : cmallam@juniper.net + """ + # Bringing up the basic service chain setup. + max_inst = 2 + svc_mode = 'in-network-nat' + st_version = 2 + ecmp_hash = 'default' + config_level = "vn" + + ret_dict = self.setup_ecmp_config_hash_svc(si_count=1, svc_scaling=True, + max_inst=max_inst, svc_mode=svc_mode, + st_version=st_version, + ecmp_hash=ecmp_hash, + config_level=config_level) + + left_vn_fixture = self.vn1_fixture + right_vn_fixture = self.vn2_fixture + left_vm_fixture = self.vm1_fixture + right_vm_fixture = self.vm2_fixture + + # ECMP Hash with only "source_port" + ecmp_hash = {"source_port": True} + config_level = "vn" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + # Verify ECMP Hash at Agent and control node + self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture, + left_vm_fixture=left_vm_fixture, + right_vm_fixture=right_vm_fixture) + + # Verify traffic from vn1 (left) to vn2 (right), with user specified + # flow count + flow_count = 5 + dst_vm_list = [right_vm_fixture] + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + return True + # end test_ecmp_hash_src_port + + def test_ecmp_hash_dest_port(self): + """ + Validates ecmp hash when only destination port is configured + Maintainer : cmallam@juniper.net + """ + # Bringing up the basic service chain setup. + max_inst = 2 + svc_mode = 'in-network-nat' + st_version = 2 + ecmp_hash = 'default' + config_level = "vn" + + ret_dict = self.setup_ecmp_config_hash_svc(si_count=1, svc_scaling=True, + max_inst=max_inst, svc_mode=svc_mode, + st_version=st_version, + ecmp_hash=ecmp_hash, + config_level=config_level) + + left_vn_fixture = self.vn1_fixture + right_vn_fixture = self.vn2_fixture + left_vm_fixture = self.vm1_fixture + right_vm_fixture = self.vm2_fixture + + # ECMP Hash with only "destionation_port" + ecmp_hash = {"destination_port": True} + config_level = "vn" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + # Verify ECMP Hash at Agent and control node + self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture, + left_vm_fixture=left_vm_fixture, + right_vm_fixture=right_vm_fixture) + + # Verify traffic from vn1 (left) to vn2 (right), with user specified + # flow count + flow_count = 5 + dst_vm_list = [right_vm_fixture] + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + return True + # end test_ecmp_hash_dest_port + + def test_ecmp_hash_protocol(self): + """ + Validates ecmp hash when only ip protocol is configured + Maintainer : cmallam@juniper.net + """ + # Bringing up the basic service chain setup. + max_inst = 2 + svc_mode = 'in-network-nat' + st_version = 2 + ecmp_hash = 'default' + config_level = "vn" + ret_dict = self.setup_ecmp_config_hash_svc(si_count=1, svc_scaling=True, + max_inst=max_inst, svc_mode=svc_mode, + st_version=st_version, + ecmp_hash=ecmp_hash, + config_level=config_level) + + left_vn_fixture = self.vn1_fixture + right_vn_fixture = self.vn2_fixture + left_vm_fixture = self.vm1_fixture + right_vm_fixture = self.vm2_fixture + + # ECMP Hash with only "ip_protocol" + ecmp_hash = {"ip_protocol": True} + config_level = "vn" + + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + # Verify ECMP Hash at Agent and control node + self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture, + left_vm_fixture=left_vm_fixture, + right_vm_fixture=right_vm_fixture) + + # Verify traffic from vn1 (left) to vn2 (right), with user specified + # flow count + flow_count = 5 + dst_vm_list = [right_vm_fixture] + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + return True + # end test_ecmp_hash_protocol + + def test_ecmp_hash_precedence(self): + """ + Validates ecmp hash config precedence levels + Maintainer : cmallam@juniper.net + """ + # Bringing up the basic service chain setup. + max_inst = 2 + svc_mode = 'in-network-nat' + st_version = 2 + ecmp_hash = 'default' + config_level = "vn" + ret_dict = self.setup_ecmp_config_hash_svc(si_count=1, svc_scaling=True, + max_inst=max_inst, svc_mode=svc_mode, + st_version=st_version, + ecmp_hash=ecmp_hash, + config_level=config_level) + + left_vn_fixture = self.vn1_fixture + right_vn_fixture = self.vn2_fixture + left_vm_fixture = self.vm1_fixture + right_vm_fixture = self.vm2_fixture + + # Default ECMP Hash config at Global level + ecmp_hash = "default" + config_level = "global" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + # Default ECMP Hash config at VN level + ecmp_hash = "default" + config_level = "vn" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + # "destination_ip" only ECMP Hash config at VMI level. VMI should take + # priority over VN and Global + ecmp_hash = {"destination_ip": True} + config_level = "vmi" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + # Verify ECMP Hash at Agent and control node + self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture, + left_vm_fixture=left_vm_fixture, + right_vm_fixture=right_vm_fixture) + + # Verify traffic from vn1 (left) to vn2 (right), with user specified + # flow count + flow_count = 5 + dst_vm_list = [right_vm_fixture] + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + + # Delete the ECMP Hash config at Global, VN and VMI level + ecmp_hash = "None" + config_level = "all" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + + return True + # end test_ecmp_hash_precedence + + def test_ecmp_hash_deletion(self): + """ + Validates deletion of ecmp hash configuration. When explicit ecmp hash + is deleted, hashing should happen based upon default hash (5 tuple) + Maintainer : cmallam@juniper.net + """ + # Bringing up the basic service chain setup. + max_inst = 2 + svc_mode = 'in-network-nat' + st_version = 2 + ecmp_hash = 'default' + config_level = "vn" + ret_dict = self.setup_ecmp_config_hash_svc(si_count=1, svc_scaling=True, + max_inst=max_inst, svc_mode=svc_mode, + st_version=st_version, + ecmp_hash=ecmp_hash, + config_level=config_level) + + left_vn_fixture = self.vn1_fixture + right_vn_fixture = self.vn2_fixture + left_vm_fixture = self.vm1_fixture + right_vm_fixture = self.vm2_fixture + + # Explicitly delete the ECMP Hashing config + ecmp_hash = 'None' + config_level = "all" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + # When explicit ecmp hash config is deleted, default hash should takes + # place. Verifying whether flows are distributed as per default hash or + # not + ecmp_hash = {"source_ip": True, "destination_ip": True, + "source_port": True, "destination_port": True, + "ip_protocol": True} + + # Verify ECMP Hash at Agent and control node + self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture, + left_vm_fixture=left_vm_fixture, + right_vm_fixture=right_vm_fixture) + + # Verify traffic from vn1 (left) to vn2 (right), with user specified + # flow count + flow_count = 5 + dst_vm_list = [right_vm_fixture] + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + return True + # end test_ecmp_hash_deletion + + def test_ecmp_hash_vm_suspend_restart(self): + """ + Validates deletion and addition of VMs with ecmp hash configuration. + Maintainer : cmallam@juniper.net + """ + # Bringing up the basic service chain setup. + max_inst = 3 + svc_mode = 'in-network-nat' + st_version = 2 + ecmp_hash = 'default' + config_level = "vn" + ret_dict = self.setup_ecmp_config_hash_svc(si_count=1, svc_scaling=True, + max_inst=max_inst, svc_mode=svc_mode, + st_version=st_version, + ecmp_hash=ecmp_hash, + config_level=config_level) + + left_vn_fixture = self.vn1_fixture + right_vn_fixture = self.vn2_fixture + left_vm_fixture = self.vm1_fixture + right_vm_fixture = self.vm2_fixture + + # ECMP Hashing config with 'destination_ip' and at VN level + ecmp_hash = {"destination_ip": True} + config_level = "vn" + self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash, + config_level=config_level, + right_vm_fixture=right_vm_fixture, + right_vn_fixture=right_vn_fixture) + + # Verify ECMP Hash at Agent and control node + self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture, + left_vm_fixture=left_vm_fixture, + right_vm_fixture=right_vm_fixture) + + svms = self.get_svms_in_si(self.si_fixtures[0], + self.inputs.project_name) + self.logger.info('The Service VMs in the Service Instance %s are %s'% (self.si_fixtures[0].si_name, svms)) + for svm in svms: + self.logger.info('SVM %s is in %s state' % (svm, svm.status)) + self.logger.info('%% Will suspend the SVMs and check traffic flow %%') + + # Verify traffic from vn1 (left) to vn2 (right), with user specified + # flow count + flow_count = 5 + dst_vm_list = [right_vm_fixture] + for i in range(len(svms) - 1): + self.logger.info('Will Suspend SVM %s' % svms[i].name) + svms[i].suspend() + sleep(30) + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + self.logger.info('%% Will resume the suspended SVMs and check traffic flow %%%%%%') + for i in range(len(svms)): + svms = self.get_svms_in_si(self.si_fixtures[0], + self.inputs.project_name) + if svms[i].status == 'SUSPENDED': + self.logger.info('Will resume the suspended SVM %s' % svms[i].name) + svms[i].resume() + sleep(30) + else: + self.logger.info('SVM %s is not SUSPENDED' % svms[i].name) + + self.verify_traffic_flow(left_vm_fixture, dst_vm_list, + self.si_fixtures[0], left_vn_fixture, + ecmp_hash=ecmp_hash, flow_count=flow_count) + + + return True + # end test_ecmp_hash_vm_suspend_restart + + + diff --git a/scripts/encap/base.py b/scripts/encap/base.py index d97501487..618cd5323 100644 --- a/scripts/encap/base.py +++ b/scripts/encap/base.py @@ -1,28 +1,16 @@ -import test +import test_v1 import fixtures from common import isolated_creds -class BaseEncapTest(test.BaseTestCase): +class BaseEncapTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseEncapTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib -# cls.logger= cls.inputs.logger cls.agent_inspect = cls.connections.agent_inspect cls.cn_inspect = cls.connections.cn_inspect cls.analytics_obj = cls.connections.analytics_obj @@ -30,7 +18,5 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - # cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseEncapTest, cls).tearDownClass() # end tearDownClass diff --git a/scripts/floatingip/__init__.py b/scripts/floatingip/__init__.py deleted file mode 100644 index 80d48a845..000000000 --- a/scripts/floatingip/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Floating IP test package.""" diff --git a/scripts/floatingip/base.py b/scripts/floatingip/base.py deleted file mode 100644 index f5fdb4c45..000000000 --- a/scripts/floatingip/base.py +++ /dev/null @@ -1,125 +0,0 @@ -import test -from common import isolated_creds -from common import create_public_vn -from vn_test import * -from vm_test import * -import fixtures - - -class FloatingIpBaseTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(FloatingIpBaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.admin_inputs = cls.isolated_creds.get_admin_inputs() - cls.admin_connections = cls.isolated_creds.get_admin_connections() - cls.quantum_h = cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib = cls.connections.vnc_lib - cls.agent_inspect = cls.connections.agent_inspect - cls.cn_inspect = cls.connections.cn_inspect - cls.analytics_obj = cls.connections.analytics_obj - cls.orch = cls.connections.orch - cls.public_vn_obj = create_public_vn.PublicVn( - cls.__name__, - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.public_vn_obj.configure_control_nodes() - # end setUpClass - - @classmethod - def tearDownClass(cls): - cls.isolated_creds.delete_tenant() - super(FloatingIpBaseTest, cls).tearDownClass() - # end tearDownClass - - def setUp(self): - super(FloatingIpBaseTest, self).setUp() - '''self.inputs = inputs - self.connections = connections - self.setup_common_objects()''' - - def cleanUp(self): - super(FloatingIpBaseTest, self).cleanUp() - - def scp_files_to_vm(self, src_vm, dst_vm): - result = True - src_vm.put_pub_key_to_vm() - dst_vm.put_pub_key_to_vm() - dest_vm_ip = dst_vm.vm_ip - file_sizes = ['1000', '1101', '1202'] - for size in file_sizes: - self.logger.info("-" * 80) - self.logger.info("FILE SIZE = %sB" % size) - self.logger.info("-" * 80) - - self.logger.info('Transferring the file from %s to %s using scp' % - (src_vm.vm_name, dst_vm.vm_name)) - filename = 'testfile' - - # Create file - cmd = 'dd bs=%s count=1 if=/dev/zero of=%s' % (size, filename) - src_vm.run_cmd_on_vm(cmds=[cmd]) - - # Copy key - dst_vm.run_cmd_on_vm( - cmds=['cp -f ~root/.ssh/authorized_keys ~/.ssh/'], - as_sudo=True) - # Scp file from EVPN_VN_L2_VM1 to EVPN_VN_L2_VM2 using - # EVPN_VN_L2_VM2 vm's eth1 interface ip - src_vm.scp_file_to_vm(filename, vm_ip=dst_vm.vm_ip) - src_vm.run_cmd_on_vm(cmds=['sync']) - # Verify if file size is same in destination vm - out_dict = dst_vm.run_cmd_on_vm( - cmds=['ls -l %s' % (filename)]) - if size in out_dict.values()[0]: - self.logger.info('File of size %s is trasferred successfully to \ - %s by scp ' % (size, dest_vm_ip)) - else: - self.logger.warn('File of size %s is not trasferred fine to %s \ - by scp !! Pls check logs' % (size, dest_vm_ip)) - result = result and False - return result - - def get_two_different_compute_hosts(self): - host_list = self.connections.orch.get_hosts() - self.compute_1 = host_list[0] - self.compute_2 = host_list[0] - if len(host_list) > 1: - self.compute_1 = host_list[0] - self.compute_2 = host_list[1] - - -class CreateAssociateFip(fixtures.Fixture): - - """Create and associate a floating IP to the Virtual Machine.""" - - def __init__(self, inputs, fip_fixture, vn_id, vm_id): - self.inputs = inputs - self.logger = self.inputs.logger - self.fip_fixture = fip_fixture - self.vn_id = vn_id - self.vm_id = vm_id - - def setUp(self): - self.logger.info("Create associate FIP") - super(CreateAssociateFip, self).setUp() - self.fip_id = self.fip_fixture.create_and_assoc_fip( - self.vn_id, self.vm_id) - - def cleanUp(self): - self.logger.info("Disassociationg FIP") - super(CreateAssociateFip, self).cleanUp() - self.fip_fixture.disassoc_and_delete_fip(self.fip_id) diff --git a/scripts/floatingip/test_floatingip.py b/scripts/floatingip/test_floatingip.py index 86cc70325..6a022e6ab 100644 --- a/scripts/floatingip/test_floatingip.py +++ b/scripts/floatingip/test_floatingip.py @@ -24,6 +24,7 @@ from policy_test import * from multiple_vn_vm_test import * from contrail_fixtures import * +from common import isolated_creds from tcutils.wrappers import preposttest_wrapper sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) from tcutils.commands import * @@ -1646,14 +1647,9 @@ def test_fip_pool_shared_across_project(self): vm4_name = get_random_name('vm4') vm5_name = get_random_name('vm5') - self.demo_proj_inputs1 = self.useFixture( - ContrailTestInit( - self.ini_file, - stack_user='admin', - stack_password='contrail123', - project_fq_name=[ - 'default-domain', - 'demo'], logger=self.logger)) + self.demo_proj_inputs1 = ContrailTestInit( + self.ini_file, stack_tenant='demo',logger=self.logger + ) self.demo_proj_connections1 = ContrailConnections( self.demo_proj_inputs1, self.logger) self.connections = ContrailConnections(self.inputs, self.logger) @@ -1729,7 +1725,7 @@ def test_fip_pool_shared_across_project(self): # Adding further projects to floating IP. self.logger.info('Adding project demo to FIP pool %s' % (fip_pool_name)) - project_obj = fip_fixture.assoc_project(fip_fixture, 'demo') + project_obj = fip_fixture.assoc_project('demo') # Asscociating FIP to VMs under demo project and exaust 4 fips available from the /29 subnet self.logger.info( @@ -1789,7 +1785,7 @@ def test_fip_pool_shared_across_project(self): # Removing further projects from floating IP pool. For cleanup self.logger.info('Removing project demo to FIP pool %s' % (fip_pool_name)) - project_obj = fip_fixture.deassoc_project(fip_fixture, 'demo') + project_obj = fip_fixture.deassoc_project('demo') if not result: self.logger.error( @@ -1823,65 +1819,38 @@ def test_communication_across__diff_proj(self): self.get_two_different_compute_hosts() self.connections = ContrailConnections(self.inputs, self.logger) # Projects - user1_fixture = self.useFixture( - UserFixture( - connections=self.connections, - username=user_list[0][0], - password=user_list[0][1])) - + user1_fixture= self.useFixture(UserFixture(connections=self.connections, + username=user_list[0][0], password=user_list[0][1])) project_fixture1 = self.useFixture( ProjectFixture( - project_name=projects[ - 0], vnc_lib_h=self.vnc_lib, username=user_list[0][0], + project_name=projects[0], username=user_list[0][0], password=user_list[0][1], connections=self.connections)) - user1_fixture.add_user_to_tenant( - projects[0], - user_list[0][0], - user_list[0][2]) - project_inputs1 = self.useFixture( - ContrailTestInit( - self.ini_file, - stack_user=project_fixture1.username, - stack_password=project_fixture1.password, - project_fq_name=[ - 'default-domain', - projects[0]], logger=self.logger)) - project_connections1 = ContrailConnections( - project_inputs1, - self.logger) + project_fixture1.set_user_creds(project_fixture1.username,project_fixture1.password) + user1_fixture.add_user_to_tenant(projects[0], user_list[0][0] , user_list[0][2]) + project_inputs1 = ContrailTestInit( + self.ini_file, stack_user=project_fixture1.project_username, + stack_password=project_fixture1.project_user_password, + stack_tenant=projects[0], logger = self.logger) + project_connections1 = ContrailConnections(project_inputs1,self.logger) self.connections = ContrailConnections(self.inputs, self.logger) self.logger.info( 'Default SG to be edited for allow all on project: %s' % projects[0]) project_fixture1.set_sec_group_for_allow_all(projects[0], 'default') - user2_fixture = self.useFixture( - UserFixture( - connections=self.connections, - username=user_list[1][0], - password=user_list[1][1])) - + user2_fixture= self.useFixture(UserFixture(connections=self.connections, + username=user_list[1][0], password=user_list[1][1])) project_fixture2 = self.useFixture( ProjectFixture( - project_name=projects[ - 1], vnc_lib_h=self.vnc_lib, username=user_list[1][0], + project_name=projects[1], username=user_list[1][0], password=user_list[1][1], connections=self.connections)) - user2_fixture.add_user_to_tenant( - projects[1], - user_list[1][0], - user_list[1][2]) - project_inputs2 = self.useFixture( - ContrailTestInit( - self.ini_file, - stack_user=project_fixture2.username, - stack_password=project_fixture2.password, - project_fq_name=[ - 'default-domain', - projects[1]], logger=self.logger)) - project_connections2 = ContrailConnections( - project_inputs2, - self.logger) - self.connections = ContrailConnections(self.inputs, self.logger) + project_fixture2.set_user_creds(project_fixture2.username,project_fixture2.password) + user2_fixture.add_user_to_tenant(projects[1], user_list[1][0] , user_list[1][2]) + project_inputs2 = ContrailTestInit( + self.ini_file, stack_user=project_fixture2.project_username, + stack_password=project_fixture2.project_user_password, + stack_tenant=projects[1], logger = self.logger) + project_connections2 = ContrailConnections(project_inputs2, self.logger) self.logger.info( 'Default SG to be edited for allow all on project: %s' % projects[1]) @@ -1938,7 +1907,7 @@ def test_communication_across__diff_proj(self): # Adding further projects to floating IP. self.logger.info('Adding project demo to FIP pool %s' % (fip_pool_name)) - project_obj = fip_fixture.assoc_project(fip_fixture, projects[0]) + project_obj = fip_fixture.assoc_project(projects[0]) self.logger.info( 'Allocating FIP to VM %s in project %s from VN %s in project %s ' % @@ -1954,7 +1923,7 @@ def test_communication_across__diff_proj(self): # Removing further projects from floating IP pool. For cleanup self.logger.info('Removing project %s from FIP pool %s' % (projects[0], fip_pool_name)) - project_obj = fip_fixture.deassoc_project(fip_fixture, projects[0]) + project_obj = fip_fixture.deassoc_project(projects[0]) if not result: self.logger.error( @@ -2206,7 +2175,7 @@ def test_traffic_to_fip(self): msg = proto + \ " Traffic Stats is not matching with opServer flow series data" self.logger.info( - "***Actual Traffic sent by agent %s \n\n stats shown by Analytics flow series%s" % + "%%%%%%Actual Traffic sent by agent %s \n\n stats shown by Analytics flow series%s" % (traffic_stats[proto], flow_series_data[proto])) print flow_series_data[proto] for i in xrange(len(flow_series_data[proto]) - 1): @@ -2218,7 +2187,7 @@ def test_traffic_to_fip(self): self.logger.info("-" * 80) self.logger.info( - "***Let flows age out and verify analytics still shows the data in the history***") + "%%%Let flows age out and verify analytics still shows the data in the history%%%") self.logger.info("-" * 80) time.sleep(180) for proto in traffic_proto_l: @@ -2279,173 +2248,6 @@ def test_traffic_to_fip(self): return result # end test_fip_with_traffic - @preposttest_wrapper - def test_ping_to_fip_using_diag(self): - '''Test ping to floating IP using diag introspect. - ''' - result = True - fip_pool_name = get_random_name('some-pool1') - - (vn1_name, vn1_subnets) = ( - get_random_name("vn1"), [get_random_cidr()]) - (fvn1_name, fvn1_subnets) = ( - get_random_name("fip_vn1"), [get_random_cidr()]) - (vn1_vm1_name) = (get_random_name('vn1_vm1')) - (fvn1_vm1_name) = (get_random_name('fvn1_vm1')) - - # Get all computr hosts - self.get_two_different_compute_hosts() - fvn1_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - inputs=self.inputs, - vn_name=fvn1_name, - subnets=fvn1_subnets)) - vn1_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - inputs=self.inputs, - vn_name=vn1_name, - subnets=vn1_subnets)) - fvn1_vm1_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=fvn1_fixture.obj, - vm_name=fvn1_vm1_name, - node_name=self.compute_2)) - - vn1_vm1_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn1_fixture.obj, - vm_name=vn1_vm1_name, - node_name=self.compute_1)) - - assert fvn1_fixture.verify_on_setup() - assert vn1_fixture.verify_on_setup() - assert vn1_vm1_fixture.verify_on_setup() - assert fvn1_vm1_fixture.verify_on_setup() - - fip_fixture1 = self.useFixture( - FloatingIPFixture( - project_name=self.inputs.project_name, - inputs=self.inputs, - connections=self.connections, - pool_name=fip_pool_name, - vn_id=fvn1_fixture.vn_id)) - assert fip_fixture1.verify_on_setup() - - fip_id1 = fip_fixture1.create_and_assoc_fip( - fvn1_fixture.vn_id, vn1_vm1_fixture.vm_id) - self.addCleanup(fip_fixture1.disassoc_and_delete_fip, fip_id1) - assert fip_fixture1.verify_fip(fip_id1, vn1_vm1_fixture, fvn1_fixture) - - if not fvn1_vm1_fixture.ping_with_certainty(fip_fixture1.fip[fip_id1]): - result = result and False - inspect_h1 = self.agent_inspect[fvn1_vm1_fixture.vm_node_ip] - self.logger.info("Pinging using diag introspect from IP %s to IP %s" % - (fvn1_vm1_fixture.vm_ip, fip_fixture1.fip[fip_id1])) - result = inspect_h1.get_vna_verify_diag_ping( - src_ip=fvn1_vm1_fixture.vm_ip, - dst_ip=fip_fixture1.fip[fip_id1], - vrf=fvn1_vm1_fixture.agent_vrf_objs['vrf_list'][0]['name'], - proto='17') - if not result: - self.logger.error( - 'Test to ping uding diag between VMs %s and %s' % - (fvn1_vm1_fixture.vm_ip, fip_fixture1.fip[fip_id1])) - assert result - return result - # end test_ping_to_fip_using_diag - - @test.attr(type=['sanity', 'ci_sanity', 'quick_sanity', 'vcenter']) - @preposttest_wrapper - def test_floating_ip(self): - '''Test to validate floating-ip Assignment to a VM. It creates a VM, assigns a FIP to it and pings to a IP in the FIP VN. - ''' - result = True - fip_pool_name = get_random_name('some-pool') - vn1_vm1_name = get_random_name('vn1_vm1_name') - fvn_vm1_name = get_random_name('fvn_vm1_name') - - (vn1_name, vn1_subnets) = ( - get_random_name("vn1"), [get_random_cidr()]) - (fvn_name, fvn_subnets) = ( - get_random_name("fvn"), [get_random_cidr()]) - - # Get all computes - self.get_two_different_compute_hosts() - - fvn_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - inputs=self.inputs, - vn_name=fvn_name, - subnets=fvn_subnets)) - - assert fvn_fixture.verify_on_setup() - - vn1_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - inputs=self.inputs, - vn_name=vn1_name, - subnets=vn1_subnets)) - - assert vn1_fixture.verify_on_setup() - - vn1_vm1_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn1_fixture.obj, - vm_name=vn1_vm1_name, - node_name=self.compute_1 - )) - - fvn_vm1_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=fvn_fixture.obj, - vm_name=fvn_vm1_name, - node_name=self.compute_2 - )) - - assert vn1_vm1_fixture.verify_on_setup() - assert fvn_vm1_fixture.verify_on_setup() - - fip_fixture = self.useFixture( - FloatingIPFixture( - project_name=self.inputs.project_name, - inputs=self.inputs, - connections=self.connections, - pool_name=fip_pool_name, - vn_id=fvn_fixture.vn_id)) - assert fip_fixture.verify_on_setup() - fip_id = fip_fixture.create_and_assoc_fip( - fvn_fixture.vn_id, vn1_vm1_fixture.vm_id) - assert fip_fixture.verify_fip(fip_id, vn1_vm1_fixture, fvn_fixture) - vn1_vm1_fixture.wait_till_vm_up() - fvn_vm1_fixture.wait_till_vm_up() - if not vn1_vm1_fixture.ping_with_certainty(fvn_vm1_fixture.vm_ip): - result = result and False - fip_fixture.disassoc_and_delete_fip(fip_id) - - if not result: - self.logger.error('Test to ping between VMs %s and %s failed' % - (vn1_vm1_name, fvn_vm1_name)) - assert result - - return True - # end test_floating_ip - class FloatingipTestSanity4(base.FloatingIpBaseTest): @@ -2795,7 +2597,7 @@ def test_longest_prefix_match_with_fip_and_staticroute(self): username, self.inputs.cfgm_ips[0]), password=password, warn_only=True, abort_on_prompts=False, debug=True): - status = run('cd /opt/contrail/utils;' + add_static_route_cmd) + status = run('cd /usr/share/contrail-utils/;' + add_static_route_cmd) self.logger.debug("%s" % status) compute_ip = vm2_fixture.vm_node_ip @@ -2807,7 +2609,7 @@ def test_longest_prefix_match_with_fip_and_staticroute(self): vm2_tapintf) execute_cmd(session, cmd, self.logger) assert not(vm1_fixture.ping_to_ip(vm3_fixture.vm_ip, count='20')) - self.logger.info('***** Will check the result of tcpdump *****') + self.logger.info('%%%%% Will check the result of tcpdump %%%%%') output_cmd = 'cat /tmp/%s_out.log' % vm2_tapintf output, err = execute_cmd_out(session, output_cmd, self.logger) print output @@ -2839,7 +2641,7 @@ def test_longest_prefix_match_with_fip_and_staticroute(self): result = result and False self.logger.error( 'Longest prefix matched route is not taken floating ip ping is failing') - self.logger.info('***** Will check the result of tcpdump *****') + self.logger.info('%%%%% Will check the result of tcpdump %%%%%') output_cmd = 'cat /tmp/%s_out.log' % vm2_tapintf output, err = execute_cmd_out(session, output_cmd, self.logger) print output @@ -2863,7 +2665,7 @@ def test_longest_prefix_match_with_fip_and_staticroute(self): host_string='%s@%s' % ( username, self.inputs.cfgm_ips[0]), password=password, warn_only=True, abort_on_prompts=False, debug=True): - status = run('cd /opt/contrail/utils;' + del_static_route_cmd) + status = run('cd /usr/share/contrail-utils/;' + del_static_route_cmd) self.logger.debug("%s" % status) assert result return True @@ -3095,7 +2897,7 @@ def test_longest_prefix_match_with_fip_and_native_staticroute(self): host_string='%s@%s' % ( username, self.inputs.cfgm_ips[0]), password=password, warn_only=True, abort_on_prompts=False, debug=True): - status = run('cd /opt/contrail/utils;' + add_static_route_cmd) + status = run('cd /usr/share/contrail-utils/;' + add_static_route_cmd) self.logger.debug("%s" % status) fip_fixture = self.useFixture( @@ -3124,7 +2926,7 @@ def test_longest_prefix_match_with_fip_and_native_staticroute(self): vm3_tapintf, vm3_tapintf) execute_cmd(session, cmd, self.logger) assert not(vm1_fixture.ping_to_ip(vm2_eth1_ip, count='20')) - self.logger.info('***** Will check the result of tcpdump *****\n') + self.logger.info('%%%%% Will check the result of tcpdump %%%%%\n') output_cmd = 'cat /tmp/%s_out.log' % vm3_tapintf output, err = execute_cmd_out(session, output_cmd, self.logger) print output @@ -3152,7 +2954,7 @@ def test_longest_prefix_match_with_fip_and_native_staticroute(self): host_string='%s@%s' % ( username, self.inputs.cfgm_ip), password=password, warn_only=True, abort_on_prompts=False, debug=True): - status = run('cd /opt/contrail/utils;' + add_static_route_cmd) + status = run('cd /usr/share/contrail-utils/;' + add_static_route_cmd) self.logger.debug("%s" % status) execute_cmd(session, cmd, self.logger) @@ -3160,7 +2962,7 @@ def test_longest_prefix_match_with_fip_and_native_staticroute(self): result = result and False self.logger.error( 'Longest prefix matched route is not taken ping using native static route is failing \n') - self.logger.info('***** Will check the result of tcpdump *****') + self.logger.info('%%%%% Will check the result of tcpdump %%%%%') output_cmd = 'cat /tmp/%s_out.log' % vm3_tapintf output, err = execute_cmd_out(session, output_cmd, self.logger) print output @@ -3192,9 +2994,9 @@ def test_longest_prefix_match_with_fip_and_native_staticroute(self): host_string='%s@%s' % ( username, self.inputs.cfgm_ip), password=password, warn_only=True, abort_on_prompts=False, debug=True): - status = run('cd /opt/contrail/utils;' + del_static_route_cmd1) + status = run('cd /usr/share/contrail-utils/;' + del_static_route_cmd1) self.logger.debug("%s" % status) - status = run('cd /opt/contrail/utils;' + del_static_route_cmd2) + status = run('cd /usr/share/contrail-utils/;' + del_static_route_cmd2) self.logger.debug("%s" % status) assert result, 'Failed to take route with longest prefix' @@ -3312,10 +3114,10 @@ def test_longest_prefix_match_with_2fip_different_vn_name(self): host_string='%s@%s' % ( username, self.inputs.cfgm_ip), password=password, warn_only=True, abort_on_prompts=False, debug=True): - status1 = run('cd /opt/contrail/utils;' + add_static_route_cmd1) + status1 = run('cd /usr/share/contrail-utils/;' + add_static_route_cmd1) self.logger.debug("%s" % status1) - status2 = run('cd /opt/contrail/utils;' + add_static_route_cmd2) + status2 = run('cd /usr/share/contrail-utils/;' + add_static_route_cmd2) self.logger.debug("%s" % status2) fip_pool_name1 = get_random_name('test-floating-pool1') @@ -3371,7 +3173,7 @@ def test_longest_prefix_match_with_2fip_different_vn_name(self): result = result and False self.logger.error("Ping from vm222 to vm111 failed not expected") - self.logger.info('***** Will check the result of tcpdump *****\n') + self.logger.info('%%%%% Will check the result of tcpdump %%%%%\n') output_cmd1 = 'cat /tmp/%s_out.log' % vm1_tapintf_eth1 output_cmd2 = 'cat /tmp/%s_out.log' % vm1_tapintf_eth2 output1, err = execute_cmd_out(session, output_cmd1, self.logger) @@ -3411,9 +3213,9 @@ def test_longest_prefix_match_with_2fip_different_vn_name(self): host_string='%s@%s' % ( username, self.inputs.cfgm_ip), password=password, warn_only=True, abort_on_prompts=False, debug=True): - status1 = run('cd /opt/contrail/utils;' + del_static_route_cmd1) + status1 = run('cd /usr/share/contrail-utils/;' + del_static_route_cmd1) self.logger.debug("%s" % status1) - status2 = run('cd /opt/contrail/utils;' + del_static_route_cmd2) + status2 = run('cd /usr/share/contrail-utils/;' + del_static_route_cmd2) self.logger.debug("%s" % status2) assert result, 'Longest prefix match rule not followed' @@ -3507,7 +3309,7 @@ def test_longest_prefix_match_with_two_fips_from_same_vn(self): result = result and False self.logger.error("Ping from vm222 to vm111 failed not expected") - self.logger.info('***** Will check the result of tcpdump *****\n') + self.logger.info('%%%%% Will check the result of tcpdump %%%%%\n') output_cmd = 'cat /tmp/%s_out.log' % vm1_tapintf output, err = execute_cmd_out(session, output_cmd, self.logger) print output @@ -3541,7 +3343,7 @@ def test_longest_prefix_match_with_two_fips_from_same_vn(self): count='20')): result = result and False self.logger.error("Ping from vm222 to vm111 failed not expected") - self.logger.info('***** Will check the result of tcpdump *****') + self.logger.info('%%%%% Will check the result of tcpdump %%%%%') output_cmd = 'cat /tmp/%s_out.log' % vm1_tapintf output, err = execute_cmd_out(session, output_cmd, self.logger) print output diff --git a/scripts/floatingip/test_mx.py b/scripts/floatingip/test_mx.py index 55187ee85..77b5974a8 100644 --- a/scripts/floatingip/test_mx.py +++ b/scripts/floatingip/test_mx.py @@ -182,7 +182,7 @@ def test_apply_policy_fip_on_same_vn(self): self.logger.info('Adding project %s to FIP pool %s' % (self.inputs.project_name, fip_pool_name)) project_obj = self.public_vn_obj.fip_fixture.assoc_project\ - (self.public_vn_obj.fip_fixture, self.inputs.project_name) + (self.inputs.project_name) fip_id = self.public_vn_obj.fip_fixture.create_and_assoc_fip( self.public_vn_obj.public_vn_fixture.vn_id, vm1_fixture.vm_id, project_obj) @@ -301,7 +301,7 @@ def test_ftp_http_with_public_ip(self): self.logger.info('Adding project %s to FIP pool %s' % (self.inputs.project_name, fip_pool_name)) project_obj = self.public_vn_obj.fip_fixture.assoc_project\ - (self.public_vn_obj.fip_fixture, self.inputs.project_name) + (self.inputs.project_name) fip_id = self.public_vn_obj.fip_fixture.create_and_assoc_fip( self.public_vn_obj.public_vn_fixture.vn_id, vm1_fixture.vm_id,project_obj) @@ -500,7 +500,7 @@ def test_fip_with_vm_in_2_vns(self): self.logger.info('Adding project %s to FIP pool %s' % (self.inputs.project_name, fip_pool_name)) project_obj = self.public_vn_obj.fip_fixture.assoc_project\ - (self.public_vn_obj.fip_fixture, self.inputs.project_name) + (self.inputs.project_name) # FIP public self.logger.info( @@ -564,7 +564,7 @@ def test_fip_with_vm_in_2_vns(self): flow_rec1_direction = False flow_rec1_nat = False for iter in range(25): - self.logger.debug('**** Iteration %s *****' % iter) + self.logger.debug('%%%%%%%% Iteration %s %%%%%%%%%%' % iter) flow_rec1 = None flow_rec1 = inspect_h1.get_vna_fetchallflowrecords() for rec in flow_rec1: @@ -602,7 +602,7 @@ def test_fip_with_vm_in_2_vns(self): flow_rec2_direction = False flow_rec2_nat = False for iter in range(25): - self.logger.debug('**** Iteration %s *****' % iter) + self.logger.debug('%%%%%%%% Iteration %s %%%%%%%%%%' % iter) flow_rec2 = None flow_rec2 = inspect_h1.get_vna_fetchallflowrecords() for rec in flow_rec2: diff --git a/scripts/heat/base.py b/scripts/heat/base.py deleted file mode 100644 index c8ce7217f..000000000 --- a/scripts/heat/base.py +++ /dev/null @@ -1,258 +0,0 @@ -import time -import test -from common.connections import ContrailConnections -from common import isolated_creds -from vn_test import VNFixture -from heat_test import * -from vm_test import VMFixture -from svc_template_fixture import * -from svc_instance_fixture import * -from project_test import ProjectFixture -from tcutils.util import get_random_name, retry -from fabric.context_managers import settings -from fabric.api import run -from fabric.operations import get, put -from tcutils.commands import ssh, execute_cmd, execute_cmd_out -import template as template -import env as env -import ConfigParser -import re -import copy - -contrail_api_conf = '/etc/contrail/contrail-api.conf' - - -class BaseHeatTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BaseHeatTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.admin_connections = cls.isolated_creds.get_admin_connections() - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.admin_inputs = cls.isolated_creds.get_admin_inputs() - cls.admin_connections = cls.isolated_creds.get_admin_connections() - cls.quantum_h = cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib = cls.connections.vnc_lib - cls.agent_inspect = cls.connections.agent_inspect - cls.cn_inspect = cls.connections.cn_inspect - cls.analytics_obj = cls.connections.analytics_obj - cls.api_s_inspect = cls.connections.api_server_inspect - # end setUpClass - - @classmethod - def tearDownClass(cls): - cls.isolated_creds.delete_tenant() - super(BaseHeatTest, cls).tearDownClass() - # end tearDownClass - - def get_template(self, template_name): - template_name = '%s' % template_name - return getattr(template, template_name) - # end get_template - - def get_env(self, env_name): - env_name = '%s' % env_name - return copy.deepcopy(getattr(env, env_name)) - # end get_env - - def verify_vn(self, stack, env, stack_name): - op = stack.stacks.get(stack_name).outputs - time.sleep(5) - for output in op: - if output['output_key'] == 'right_net_id': - vn_id = output['output_value'] - vn_obj = self.vnc_lib.virtual_network_read(id=vn_id) - vn_name = str(env['parameters']['right_net_name']) - subnet = str(env['parameters']['right_net_cidr']) - elif output['output_key'] == 'left_net_id': - vn_id = output['output_value'] - vn_obj = self.vnc_lib.virtual_network_read(id=vn_id) - vn_name = str(env['parameters']['left_net_name']) - subnet = str(env['parameters']['left_net_cidr']) - elif output['output_key'] == 'transit_net_id': - vn_id = output['output_value'] - vn_obj = self.vnc_lib.virtual_network_read(id=vn_id) - vn_name = str(env['parameters']['transit_net_name']) - subnet = str(env['parameters']['transit_net_cidr']) - vn_fix = self.useFixture(VNFixture(project_name=self.inputs.project_name, - vn_name=vn_name, inputs=self.inputs, subnets=[subnet], connections=self.connections)) - if vn_fix.vn_id == vn_id: - self.logger.info('VN %s launched successfully via heat' % vn_name) - assert vn_fix.verify_on_setup() - return vn_fix - # end verify_vn - - def update_stack(self, hs_obj, stack_name=None, change_set=[]): - template = self.get_template(template_name=stack_name + '_template') - env = self.get_env(env_name=stack_name + '_env') - parameters = env['parameters'] - if env['parameters'][change_set[0]] != change_set[1]: - parameters[change_set[0]] = change_set[1] - hs_obj.update(stack_name, parameters) - else: - self.logger.info( - 'No change seen in the Stack %s to update' % stack_name) - # end update_stack - - def config_vn(self, stack_name=None): - template = self.get_template(template_name=stack_name + '_template') - env = self.get_env(env_name=stack_name + '_env') - vn_hs_obj = self.config_heat_obj(stack_name, template, env) - stack = vn_hs_obj.heat_client_obj - vn_fix = self.verify_vn(stack, env, stack_name) - self.logger.info( - 'VN %s launched successfully with ID %s' % (vn_fix.vn_name, vn_fix.vn_id)) - return vn_fix, vn_hs_obj - # end config_vn - - def config_heat_obj(self, stack_name, template, env): - return self.useFixture(HeatStackFixture(connections=self.connections, - inputs=self.inputs, stack_name=stack_name, project_fq_name=self.inputs.project_fq_name, template=template, env=env)) - # end config_heat_obj - - def config_vms(self, vn_list): - stack_name = 'vms' - template = self.get_template(template_name='vms_template') - env = self.get_env(env_name='vms_env') - env['parameters']['right_net_id'] = vn_list[1].vn_id - env['parameters']['left_net_id'] = vn_list[0].vn_id - vms_hs_obj = self.config_heat_obj(stack_name, template, env) - stack = vms_hs_obj.heat_client_obj - vm_fix = self.verify_vms(stack, vn_list, stack_name) - return vm_fix - - def verify_vms(self, stack, vn_list, stack_name): - op = stack.stacks.get(stack_name).outputs - time.sleep(5) - vm1_fix = self.useFixture(VMFixture(project_name=self.inputs.project_name, - vn_obj=vn_list[0].obj, vm_name='left_vm', connections=self.connections)) - vm2_fix = self.useFixture(VMFixture(project_name=self.inputs.project_name, - vn_obj=vn_list[1].obj, vm_name='right_vm', connections=self.connections)) - assert vm1_fix.wait_till_vm_is_up() - assert vm2_fix.wait_till_vm_is_up() - for output in op: - if output['output_value'] == vm1_fix.vm_ip: - self.logger.info( - 'VM %s launched successfully' % vm1_fix.vm_name) - elif output['output_value'] == vm2_fix.vm_ip: - self.logger.info( - 'VM %s launched successfully' % vm2_fix.vm_name) - vms_list = [vm1_fix, vm2_fix] - return vms_list - # end verify_vn - - def config_svc_template(self, stack_name=None, scaling=False, mode='in-network-nat'): - template = self.get_template(template_name='svc_temp_template') - env = self.get_env(env_name='svc_temp_env') - env['parameters']['mode'] = mode - env['parameters']['name'] = stack_name - if mode == 'transparent': - env['parameters']['image'] = 'vsrx-bridge' - if mode == 'in-network': - env['parameters']['image'] = 'vsrx-fw' - if scaling: - env['parameters']['service_scaling'] = "True" - if mode != 'in-network-nat': - env['parameters']['shared_ip_list'] = 'False,True,True' - else: - env['parameters']['shared_ip_list'] = 'False,True,False' - svc_temp_hs_obj = self.config_heat_obj(stack_name, template, env) - st = self.verify_st(stack_name, env, scaling) - return st - # end config_svc_template - - def verify_st(self, stack_name, env, scaling): - st_name = env['parameters']['name'] - svc_img_name = env['parameters']['image'] - svc_type = env['parameters']['type'] - if_list = env['parameters']['service_interface_type_list'] - svc_mode = env['parameters']['mode'] - svc_scaling = scaling - flavor = env['parameters']['flavor'] - st_fix = self.useFixture(SvcTemplateFixture( - connections=self.connections, inputs=self.inputs, domain_name='default-domain', - st_name=st_name, svc_img_name=svc_img_name, svc_type=svc_type, - if_list=if_list, svc_mode=svc_mode, svc_scaling=svc_scaling, flavor=flavor, ordered_interfaces=True)) - assert st_fix.verify_on_setup() - return st_fix - # end verify_st - - def config_svc_instance(self, stack_name, st_fq_name, st_obj, vn_list, max_inst='1', svc_mode='in-network-nat'): - template = self.get_template(template_name='svc_inst_template') - env = self.get_env(env_name='svc_inst_env') - env['parameters']['service_template_fq_name'] = st_fq_name - if svc_mode != 'transparent': - env['parameters']['right_net_id'] = vn_list[1].vn_id - env['parameters']['left_net_id'] = vn_list[0].vn_id - else: - env['parameters']['right_net_id'] = 'auto' - env['parameters']['left_net_id'] = 'auto' - env['parameters'][ - 'service_instance_name'] = get_random_name('svc_inst') - env['parameters']['max_instances'] = max_inst - si_hs_obj = self.config_heat_obj(stack_name, template, env) - si_name = env['parameters']['service_instance_name'] - si_fix = self.verify_si(si_name, st_obj, max_inst, svc_mode) - si_fix.verify_on_setup() - return si_fix, si_hs_obj - - # end config_svc_instance - - @retry(delay=2, tries=5) - def verify_svm_count(self, hs_obj, stack_name, svm_count): - result = True - stack = hs_obj.heat_client_obj - op = stack.stacks.get(stack_name).outputs - for output in op: - if output['output_key'] == u'num_active_service_instance_vms': - if int(output['output_value']) != int(svm_count): - self.logger.error('SVM Count mismatch') - result = False - else: - self.logger.info( - 'There are %s Active SVMs in the SI' % output['output_value']) - if output['output_key'] == u'service_instance_vms': - self.logger.info('%s' % output['output_value']) - return result - # end get_svms - - def verify_si(self, si_name, st_obj, max_inst, svc_mode): - if max_inst > 1: - if svc_mode != 'in-network-nat': - if_list = [['management', False, False], - ['left', True, False], ['right', True, False]] - else: - if_list = [['management', False, False], - ['left', True, False], ['right', False, False]] - else: - if_list = [['management', False, False], - ['left', False, False], ['right', False, False]] - svc_inst = self.useFixture(SvcInstanceFixture( - connections=self.connections, inputs=self.inputs, - domain_name='default-domain', project_name=self.inputs.project_name, si_name=si_name, - svc_template=st_obj, if_list=if_list)) - assert svc_inst.verify_on_setup() - return svc_inst - - # end verify_si - - def config_svc_chain(self, si_fq_name, vn_list, stack_name='svc_chain'): - template = self.get_template(template_name='svc_chain_template') - env = self.get_env(env_name='svc_chain_env') - env['parameters']['apply_service'] = si_fq_name - env['parameters']['dst_vn_id'] = vn_list[1].vn_id - env['parameters']['src_vn_id'] = vn_list[0].vn_id - env['parameters']['policy_name'] = get_random_name('svc_chain') - svc_hs_obj = self.config_heat_obj(stack_name, template, env) - return svc_hs_obj - # end config_svc_chain diff --git a/scripts/heat/env.py b/scripts/heat/env.py deleted file mode 100644 index 78e25bcdc..000000000 --- a/scripts/heat/env.py +++ /dev/null @@ -1,20 +0,0 @@ -left_net_env = {u'parameters': {u'left_net_gateway': u'10.10.10.1', u'left_net_name': u'vn-left', - u'left_net_pool_end': u'10.10.10.253', u'left_net_pool_start': u'10.10.10.2', u'left_net_cidr': u'10.10.10.0/24'}} - -right_net_env = {u'parameters': {u'right_net_name': u'vn-right', u'right_net_pool_end': u'20.20.20.253', - u'right_net_gateway': u'20.20.20.1', u'right_net_cidr': u'20.20.20.0/24', u'right_net_pool_start': u'20.20.20.2'}} - -vms_env = {u'parameters': {u'right_net_id': u'e6f9e85b-5816-4818-bea6-089262c63f5d', u'left_net_id': - u'3f162ab2-85ff-4ad0-8161-9ae4633e7359', u'image': u'ubuntu-traffic', u'flavor': u'm1.medium'}} - -svc_temp_env = {u'parameters': {u'name': u'st1', u'service_interface_type_list': u'management,left,right', u'image': u'vsrx', u'static_routes_list': - u'False,False,False', u'mode': u'in-network-nat', u'flavor': u'm1.medium', u'service_scaling': 'False', u'type': u'firewall', u'shared_ip_list': u'False,False,False'}} - -svc_inst_env = {u'parameters': {u'service_template_fq_name': u'default-domain:st1', u'left_net_id': u'3f162ab2-85ff-4ad0-8161-9ae4633e7359', - u'service_instance_name': u'si1', u'max_instances': u'1', u'right_net_id': u'e6f9e85b-5816-4818-bea6-089262c63f5d'}} - -svc_chain_env = {u'parameters': {u'direction': u'<>', u'dst_port_end': -1, u'protocol': u'any', u'dst_port_start': -1, u'policy_name': u'pol1', u'dst_vn_id': - u'e6f9e85b-5816-4818-bea6-089262c63f5d', u'src_vn_id': u'3f162ab2-85ff-4ad0-8161-9ae4633e7359', u'apply_service': u'default-domain:admin:si1', u'src_port_end': -1, u'src_port_start': -1}} - -transit_net_env = {"parameters": {"transit_net_cidr": "30.30.30.0/24", - "transit_net_name": "transit-vn", "allow_transit": "True"}} diff --git a/scripts/heat/template.py b/scripts/heat/template.py deleted file mode 100644 index 2319d2ca1..000000000 --- a/scripts/heat/template.py +++ /dev/null @@ -1,227 +0,0 @@ -left_net_template = {u'description': u'HOT template to create left network in a service chain. \n', - u'heat_template_version': u'2013-05-23', - u'outputs': {u'left_net_cidr': {u'description': u'CIDR of the left network', - u'value': {u'get_attr': [u'left_subnet', - u'cidr']}}, - u'left_net_id': {u'description': u'ID of the left network', - u'value': {u'get_attr': [u'left_net', - u'show', - u'id']}}, - u'left_net_name': {u'description': u'Name of the left network', - u'value': {u'get_attr': [u'left_net', - u'name']}}}, - u'parameters': {u'left_net_cidr': {u'description': u'Private network address (CIDR notation)', - u'type': u'string'}, - u'left_net_gateway': {u'description': u'Private network gateway address', - u'type': u'string'}, - u'left_net_name': {u'description': u'Name of private network to be created', - u'type': u'string'}, - u'left_net_pool_end': {u'description': u'End of private network IP address allocation pool', - u'type': u'string'}, - u'left_net_pool_start': {u'description': u'Start of private network IP address allocation pool', - u'type': u'string'}}, - u'resources': {u'left_net': {u'properties': {u'name': {u'get_param': u'left_net_name'}}, - u'type': u'OS::Neutron::Net'}, - u'left_subnet': {u'properties': {u'allocation_pools': [{u'end': {u'get_param': u'left_net_pool_end'}, - u'start': {u'get_param': u'left_net_pool_start'}}], - u'cidr': {u'get_param': u'left_net_cidr'}, - u'gateway_ip': {u'get_param': u'left_net_gateway'}, - u'network_id': {u'get_resource': u'left_net'}}, - u'type': u'OS::Neutron::Subnet'}}} - -right_net_template = {u'description': u'HOT template to create right network in a service chain. \n', - u'heat_template_version': u'2013-05-23', - u'outputs': {u'right_net_cidr': {u'description': u'CIDR of the right network', - u'value': {u'get_attr': [u'right_subnet', - u'cidr']}}, - u'right_net_id': {u'description': u'ID of the right network', - u'value': {u'get_attr': [u'right_net', - u'show', - u'id']}}, - u'right_net_name': {u'description': u'Name of the right network', - u'value': {u'get_attr': [u'right_net', - u'name']}}}, - u'parameters': {u'right_net_cidr': {u'description': u'Private network address (CIDR notation)', - u'type': u'string'}, - u'right_net_gateway': {u'description': u'Private network gateway address', - u'type': u'string'}, - u'right_net_name': {u'description': u'Name of private network to be created', - u'type': u'string'}, - u'right_net_pool_end': {u'description': u'End of private network IP address allocation pool', - u'type': u'string'}, - u'right_net_pool_start': {u'description': u'Start of private network IP address allocation pool', - u'type': u'string'}}, - u'resources': {u'right_net': {u'properties': {u'name': {u'get_param': u'right_net_name'}}, - u'type': u'OS::Neutron::Net'}, - u'right_subnet': {u'properties': {u'allocation_pools': [{u'end': {u'get_param': u'right_net_pool_end'}, - u'start': {u'get_param': u'right_net_pool_start'}}], - u'cidr': {u'get_param': u'right_net_cidr'}, - u'gateway_ip': {u'get_param': u'right_net_gateway'}, - u'network_id': {u'get_resource': u'right_net'}}, - u'type': u'OS::Neutron::Subnet'}}} - -vms_template = {u'description': u'HOT template to deploy server into an existing neutron tenant network\n', - u'heat_template_version': u'2013-05-23', - u'outputs': {u'server1_ip': {u'description': u'IP address of the left_vm', - u'value': {u'get_attr': [u'server1', - u'first_address']}}, - u'server2_ip': {u'description': u'IP address of the right_vm', - u'value': {u'get_attr': [u'server2', - u'first_address']}}}, - u'parameters': {u'flavor': {u'description': u'Flavor to use for servers', - u'type': u'string'}, - u'image': {u'description': u'Name of image to use for servers', - u'type': u'string'}, - u'left_net_id': {u'description': u'ID of the left network', - u'type': u'string'}, - u'right_net_id': {u'description': u'ID of the right network', - u'type': u'string'}}, - u'resources': {u'server1': {u'properties': {u'flavor': {u'get_param': u'flavor'}, - u'image': {u'get_param': u'image'}, - u'name': u'left_vm', - u'networks': [{u'port': {u'get_resource': u'server1_port'}}]}, - u'type': u'OS::Nova::Server'}, - u'server1_port': {u'properties': {u'network_id': {u'get_param': u'left_net_id'}}, - u'type': u'OS::Neutron::Port'}, - u'server2': {u'properties': {u'flavor': {u'get_param': u'flavor'}, - u'image': {u'get_param': u'image'}, - u'name': u'right_vm', - u'networks': [{u'port': {u'get_resource': u'server2_port'}}]}, - u'type': u'OS::Nova::Server'}, - u'server2_port': {u'properties': {u'network_id': {u'get_param': u'right_net_id'}}, - u'type': u'OS::Neutron::Port'}}} - -svc_temp_template = {u'description': u'HOT template to create a service template \n', - u'heat_template_version': u'2013-05-23', - u'outputs': {u'service_template_fq_name': {u'description': u'FQ name of the service template', - u'value': {u'get_attr': [u'service_template', - u'fq_name']}}}, - u'parameters': {u'flavor': {u'description': u'Flavor', u'type': u'string'}, - u'service_scaling': {u'description': u'Flag to enable scaling', u'type': u'string'}, - u'image': {u'description': u'Name of the image', - u'type': u'string'}, - u'mode': {u'description': u'service mode', - u'type': u'string'}, - u'name': {u'description': u'Name of service template', - u'type': u'string'}, - u'service_interface_type_list': {u'description': u'List of interface types', - u'type': u'string'}, - u'shared_ip_list': {u'description': u'List of shared ip enabled-disabled', - u'type': u'string'}, - u'static_routes_list': {u'description': u'List of static routes enabled-disabled', - u'type': u'string'}, - u'type': {u'description': u'service type', - u'type': u'string'}}, - u'resources': {u'service_template': {u'properties': {u'flavor': {u'get_param': u'flavor'}, - u'service_scaling': {u'get_param': u'service_scaling'}, - u'image_name': {u'get_param': u'image'}, - u'name': {u'get_param': u'name'}, - u'service_interface_type_list': {u'Fn::Split': [u',', - {u'Ref': u'service_interface_type_list'}]}, - u'service_mode': {u'get_param': u'mode'}, - u'service_type': {u'get_param': u'type'}, - u'shared_ip_list': {u'Fn::Split': [u',', - {u'Ref': u'shared_ip_list'}]}, - u'static_routes_list': {u'Fn::Split': [u',', - {u'Ref': u'static_routes_list'}]}}, - u'type': u'OS::Contrail::ServiceTemplate'}}} - -svc_inst_template = {u'description': u'HOT template to create service instance.\n', - u'heat_template_version': u'2013-05-23', - u'outputs': {u'num_active_service_instance_vms': {u'description': u'Number of active service VMs', - u'value': {u'get_attr': [u'service_instance', - u'active_service_vms']}}, - u'service_instance_fq_name': {u'description': u'FQ name of the service template', - u'value': {u'get_attr': [u'service_instance', - u'fq_name']}}, - u'service_instance_uuid': {u'description': u'UUID of the service template', - u'value': {u'get_attr': [u'service_instance', - u'show']}}, - u'service_instance_vms': {u'description': u'List of service VMs', - u'value': {u'get_attr': [u'service_instance', - u'virtual_machines']}}}, - u'parameters': {u'left_net_id': {u'description': u'ID of the left network \n', - u'type': u'string'}, - u'right_net_id': {u'description': u'ID of the right network \n', - u'type': u'string'}, - u'service_instance_name': {u'description': u'service instance name', - u'type': u'string'}, - u'max_instances': {u'description': u'Number of service VMs', - u'type': u'string'}, - u'service_template_fq_name': {u'description': u'service template name or ID', - u'type': u'string'}}, - u'resources': {u'service_instance': {u'properties': {u'interface_list': [{u'virtual_network': u'auto'}, - {u'virtual_network': { - u'get_param': u'left_net_id'}}, - {u'virtual_network': {u'get_param': u'right_net_id'}}], - u'name': {u'get_param': u'service_instance_name'}, - u'scale_out': {u'max_instances': {u'get_param': u'max_instances'}}, - u'service_template': {u'get_param': u'service_template_fq_name'}}, - u'type': u'OS::Contrail::ServiceInstance'}}} - -svc_chain_template = {u'description': u'HOT template to create a policy between two virtual network and apply a service. Attach the network policy to two virtual networks\n', - u'heat_template_version': u'2013-05-23', - u'parameters': {u'apply_service': {u'description': u'service instance id', - u'type': u'string'}, - u'direction': {u'description': u'Direction of Policy', - u'type': u'string'}, - u'dst_port_end': {u'description': u'end of the dst port', - u'type': u'number'}, - u'dst_port_start': {u'description': u'start of the dst port', - u'type': u'number'}, - u'dst_vn_id': {u'description': u'ID of the destination network', - u'type': u'string'}, - u'policy_name': {u'description': u'Policy Name', - u'type': u'string'}, - u'protocol': {u'description': u'Protocol', - u'type': u'string'}, - u'src_port_end': {u'description': u'end of the src port', - u'type': u'number'}, - u'src_port_start': {u'description': u'start of the src port', - u'type': u'number'}, - u'src_vn_id': {u'description': u'ID of the source network', - u'type': u'string'}}, - u'resources': {u'private_policy': {u'properties': {u'entries': {u'policy_rule': [{u'action_list': {u'apply_service': - {u'Fn::Split': [u',', - {u'Ref': u'apply_service'}]}}, - u'direction': {u'get_param': u'direction'}, - u'dst_addresses': [{u'virtual_network': {u'get_param': u'dst_vn_id'}}], - u'dst_ports': [{u'end_port': {u'get_param': u'dst_port_end'}, - u'start_port': {u'get_param': u'dst_port_start'}}], - u'protocol': {u'get_param': u'protocol'}, - u'src_addresses': [{u'virtual_network': {u'get_param': u'src_vn_id'}}], - u'src_ports': [{u'end_port': {u'get_param': u'src_port_end'}, - u'start_port': {u'get_param': u'src_port_start'}}]}]}, - u'name': {u'get_param': u'policy_name'}}, - u'type': u'OS::Contrail::NetworkPolicy'}, - u'private_policy_attach_net1': {u'properties': {u'network': {u'get_param': u'src_vn_id'}, - u'policy': {u'get_attr': [u'private_policy', - u'fq_name']}}, - u'type': u'OS::Contrail::AttachPolicy'}, - u'private_policy_attach_net2': {u'properties': {u'network': {u'get_param': u'dst_vn_id'}, - u'policy': {u'get_attr': [u'private_policy', - u'fq_name']}}, - u'type': u'OS::Contrail::AttachPolicy'}}} - -transit_net_template = {'description': 'HOT template to creates a virtual network with allow_transit enabled\n', - 'heat_template_version': '2013-05-23', - 'outputs': {'transit_net_id': {'description': 'ID of the transit network', - 'value': {'get_attr': ['transit_net_horizon', - 'show', - 'id']}}}, - 'parameters': {'allow_transit': {'description': 'flag to set for transit vn', - 'type': 'string'}, - 'transit_net_cidr': {'description': 'Transit network block (CIDR notation)', - 'type': 'string'}, - 'transit_net_name': {'description': 'Name of virtual network to be created', - 'type': 'string'}}, - 'resources': {'transit_net': {'properties': {'allow_transit': {'get_param': 'allow_transit'}, - 'name': {'get_param': 'transit_net_name'}}, - 'type': 'OS::Contrail::VirtualNetwork'}, - 'transit_net_horizon': {'properties': {'name': {'get_resource': 'transit_net'}}, - 'type': 'OS::Neutron::Net'}, - 'transit_subnet': {'properties': {'cidr': {'get_param': 'transit_net_cidr'}, - 'enable_dhcp': 'true', - 'name': {'get_resource': 'transit_net'}, - 'network_id': {'get_resource': 'transit_net'}}, - 'type': 'OS::Neutron::Subnet'}}} diff --git a/scripts/heat/test_heat.py b/scripts/heat/test_heat.py index 9ad0a0b26..1f41f03c5 100644 --- a/scripts/heat/test_heat.py +++ b/scripts/heat/test_heat.py @@ -7,6 +7,7 @@ # import os import fixtures +from vm_test import VMFixture import testtools import time import sys @@ -18,9 +19,11 @@ from netaddr import IPNetwork, IPAddress from common.ecmp.ecmp_traffic import ECMPTraffic from common.ecmp.ecmp_verify import ECMPVerify +from common.servicechain.verify import VerifySvcChain + try: from heat_test import * - from base import BaseHeatTest + from common.heat.base import BaseHeatTest class TestHeat(BaseHeatTest, ECMPTraffic, ECMPVerify): @@ -32,7 +35,7 @@ def setUpClass(cls): def tearDownClass(cls): super(TestHeat, cls).tearDownClass() - @test.attr(type=['sanity', 'ci_sanity']) + @test.attr(type=['ci_sanity']) @preposttest_wrapper def test_heat_stacks_list(self): ''' @@ -48,80 +51,24 @@ def test_heat_stacks_list(self): 'The following are the stacks currently : %s' % stacks_list) # end test_heat_stacks_list - @preposttest_wrapper - def test_svc_creation_with_heat(self): - ''' - Validate creation of a in-network-nat service chain using heat - ''' - vn_list = [] - right_net_fix, r_hs_obj = self.config_vn(stack_name='right_net') - left_net_fix, l_h_obj = self.config_vn(stack_name='left_net') - vn_list = [left_net_fix, right_net_fix] - vms = [] - vms = self.config_vms(vn_list) - svc_template = self.config_svc_template(stack_name='svc_template') - st_fq_name = ':'.join(svc_template.st_fq_name) - st_obj = svc_template.st_obj - svc_instance, si_hs_obj = self.config_svc_instance( - 'svc_instance', st_fq_name, st_obj, vn_list) - si_fq_name = (':').join(svc_instance.si_fq_name) - svc_chain = self.config_svc_chain(si_fq_name, vn_list) - assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True) - # end test_svc_creation_with_heat - - @test.attr(type=['sanity']) - @preposttest_wrapper - def test_transit_vn_with_svc(self): - ''' - Validate Transit VN with in-network-nat service chain using heat - ''' - vn_list = [] - right_net_fix, r_hs_obj = self.config_vn(stack_name='right_net') - transit_net_fix, t_hs_obj = self.config_vn( - stack_name='transit_net') - left_net_fix, l_hs_obj = self.config_vn(stack_name='left_net') - vn_list1 = [left_net_fix, transit_net_fix] - vn_list2 = [transit_net_fix, right_net_fix] - end_vn_list = [left_net_fix, right_net_fix] - vms = [] - vms = self.config_vms(end_vn_list) - svc_template = self.config_svc_template(stack_name='svc_template') - st_fq_name = ':'.join(svc_template.st_fq_name) - st_obj = svc_template.st_obj - svc_instance1, si_hs_obj1 = self.config_svc_instance( - 'svc_instance1', st_fq_name, st_obj, vn_list1) - svc_instance2, si_hs_obj2 = self.config_svc_instance( - 'svc_instance2', st_fq_name, st_obj, vn_list2) - si1_fq_name = (':').join(svc_instance1.si_fq_name) - si2_fq_name = (':').join(svc_instance2.si_fq_name) - svc_chain1 = self.config_svc_chain( - si1_fq_name, vn_list1, 'svc_chain1') - svc_chain2 = self.config_svc_chain( - si2_fq_name, vn_list2, 'svc_chain2') - assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True) - self.logger.info( - 'Changing the VN %s to non-transitive' % transit_net_fix.vn_name) - self.update_stack( - t_hs_obj, stack_name='transit_net', change_set=['allow_transit', 'False']) - assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=False) - # end test_transit_vn_with_svc def transit_vn_with_left_right_svc(self, left_svcs, right_svcs): ''' Validate Transit VN with multi transparent service chain using heat ''' vn_list = [] + mgmt_net_fix, m_hs_obj = self.config_vn(stack_name='svc_mgmt_net') right_net_fix, r_hs_obj = self.config_vn(stack_name='right_net') - transit_net_fix, t_hs_obj = self.config_vn(stack_name='transit_net') + transit_net_fix, t_hs_obj = self.config_vn(stack_name='transit_net', transit=True) left_net_fix, l_hs_obj = self.config_vn(stack_name='left_net') - vn_list1 = [left_net_fix, transit_net_fix] - vn_list2 = [transit_net_fix, right_net_fix] + vn_list1 = [mgmt_net_fix, left_net_fix, transit_net_fix] + vn_list2 = [mgmt_net_fix, transit_net_fix, right_net_fix] end_vn_list = [left_net_fix, right_net_fix] vms = [] vms = self.config_vms(end_vn_list) svc_tmpls = {} - for mode in set(left_svcs + right_svcs): - tmpl = self.config_svc_template(stack_name='st_%s' % mode, + for i, mode in enumerate(set(left_svcs + right_svcs)): + tmpl = self.config_svc_template(stack_name='st_%d' % i, mode=mode) svc_tmpls[mode] = {} svc_tmpls[mode]['tmpl'] = tmpl @@ -130,20 +77,27 @@ def transit_vn_with_left_right_svc(self, left_svcs, right_svcs): left_sis = [] for i, svc in enumerate(left_svcs): - left_sis.append(self.config_svc_instance( - 'svc_left_%d' % i, svc_tmpls[svc]['fq_name'], - svc_tmpls[svc]['obj'], vn_list1, svc_mode=svc)) + si = self.config_svc_instance('sil_%d' % i, svc_tmpls[svc]['tmpl'], vn_list1) + left_sis.append(si) + if svc == 'in-network': + self.add_route_in_svm(si[0], [right_net_fix, 'eth2']) right_sis = [] for i, svc in enumerate(right_svcs): - right_sis.append(self.config_svc_instance( - 'svc_right_%d' % i, svc_tmpls[svc]['fq_name'], - svc_tmpls[svc]['obj'], vn_list2, svc_mode=svc)) - left_si_names = ','.join([(':').join(si[0].si_fq_name) for si in left_sis]) - right_si_names = ','.join([(':').join(si[0].si_fq_name) for si in right_sis]) - left_chain = self.config_svc_chain( - left_si_names, vn_list1, 'left_chain') - right_chain = self.config_svc_chain( - right_si_names, vn_list2, 'right_chain') + si = self.config_svc_instance('sir_%d' % i, svc_tmpls[svc]['tmpl'], vn_list2) + right_sis.append(si) + if svc == 'in-network': + self.add_route_in_svm(si[0], [left_net_fix, 'eth1']) + left_si_names = [(':').join(si[0].si_fq_name) for si in left_sis] + right_si_names = [(':').join(si[0].si_fq_name) for si in right_sis] + left_rules = [] + left_rules.append(self.config_svc_rule(si_fq_names=left_si_names, src_vns=[left_net_fix], dst_vns=[transit_net_fix])) + right_rules = [] + right_rules.append(self.config_svc_rule(si_fq_names=right_si_names, src_vns=[transit_net_fix], dst_vns=[right_net_fix])) + if self.inputs.get_af() == 'v6': + left_rules.append(self.config_svc_rule(proto='icmp6', si_fq_names=left_si_names, src_vns=[left_net_fix], dst_vns=[transit_net_fix])) + right_rules.append(self.config_svc_rule(proto='icmp6', si_fq_names=right_si_names, src_vns=[transit_net_fix], dst_vns=[right_net_fix])) + left_chain = self.config_svc_chain(left_rules, vn_list1, [l_hs_obj, t_hs_obj], 'left_chain') + right_chain = self.config_svc_chain(right_rules, vn_list2, [t_hs_obj, r_hs_obj], 'right_chain') assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True) # end transit_vn_with_left_right_svc @@ -153,6 +107,7 @@ def test_transit_vn_sym_1_innetnat(self): self.transit_vn_with_left_right_svc(svcs, svcs) return True + @test.attr(type=['sanity']) @preposttest_wrapper def test_transit_vn_sym_1_innet(self): svcs= ['in-network'] @@ -217,49 +172,51 @@ def test_transit_vn_sym_trans_innet(self): return True @preposttest_wrapper + @skip_because(pt_based_svc=True) def test_max_inst_change_in_ecmp_svc(self): ''' Validate creation of a in-network-nat service chain with 3 Service VMs using heat ''' vn_list = [] + mgmt_net_fix, m_hs_obj = self.config_vn(stack_name='mgmt_net') right_net_fix, r_hs_obj = self.config_vn(stack_name='right_net') left_net_fix, l_h_obj = self.config_vn(stack_name='left_net') - vn_list = [left_net_fix, right_net_fix] + vn_list = [mgmt_net_fix, left_net_fix, right_net_fix] vms = [] - vms = self.config_vms(vn_list) + vms = self.config_vms([left_net_fix, right_net_fix]) svc_template = self.config_svc_template( - stack_name='svc_template', scaling=True, mode='in-network-nat') - st_fq_name = ':'.join(svc_template.st_fq_name) - st_obj = svc_template.st_obj + stack_name='st', scaling=True, mode='in-network-nat') svc_instance, si_hs_obj = self.config_svc_instance( - 'svc_inst', st_fq_name, st_obj, vn_list, max_inst='3', svc_mode='in-network-nat') + 'si', svc_template, vn_list, max_inst=3) si_fq_name = (':').join(svc_instance.si_fq_name) - svc_chain = self.config_svc_chain(si_fq_name, vn_list) + svc_rules = [] + svc_rules.append(self.config_svc_rule(proto='any', si_fq_names=[si_fq_name], src_vns=[left_net_fix], dst_vns=[right_net_fix])) + svc_chain = self.config_svc_chain(svc_rules, vn_list, [l_h_obj, r_hs_obj]) assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True) dst_vm_list = [vms[1]] self.verify_traffic_flow( vms[0], dst_vm_list, svc_instance, left_net_fix) self.logger.info( - '***** Will increase the SVMs in the SI to 4 *****') + '%%%%% Will increase the SVMs in the SI to 4 %%%%%') self.update_stack( - si_hs_obj, stack_name='svc_inst', change_set=['max_instances', '4']) - time.sleep(10) - svc_instance.verify_on_setup() - self.verify_svm_count(si_hs_obj, 'svc_inst', '4') + si_hs_obj, change_sets=[('max_instances', '4')]) + svc_instance.max_inst = 4 + assert svc_instance.verify_on_setup(), 'SI verification failed after change of max_inst to 4' + assert self.verify_svm_count(si_hs_obj, 'si', '4'), 'SVM count doesnt match after incr to 4' assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True) self.verify_traffic_flow( vms[0], dst_vm_list, svc_instance, left_net_fix) self.logger.info( - '***** Will decrease the SVMs in the SI to 2 *****') + '%%%%% Will decrease the SVMs in the SI to 2 %%%%%') self.update_stack( - si_hs_obj, stack_name='svc_inst', change_set=['max_instances', '2']) - time.sleep(10) - svc_instance.verify_on_setup() - self.verify_svm_count(si_hs_obj, 'svc_inst', '2') + si_hs_obj, change_sets=[('max_instances', '2')]) + svc_instance.max_inst = 2 + assert svc_instance.verify_on_setup(), 'SI verification failed after change of max_inst to 2' + assert self.verify_svm_count(si_hs_obj, 'si', '2'), 'SVM count doesnt match after decr to 2' assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True) self.verify_traffic_flow( vms[0], dst_vm_list, svc_instance, left_net_fix) - # end test_max_inst_change_in_ecmp_svc + # end test_max_inst_change_in_ecmp_svc @preposttest_wrapper def test_ecmp_svc_creation_with_heat(self): @@ -267,26 +224,199 @@ def test_ecmp_svc_creation_with_heat(self): Validate creation of a in-network-nat service chain with 3 Service VMs using heat ''' vn_list = [] + mgmt_net_fix, m_hs_obj = self.config_vn(stack_name='mgmt_net') right_net_fix, r_hs_obj = self.config_vn(stack_name='right_net') left_net_fix, l_h_obj = self.config_vn(stack_name='left_net') - vn_list = [left_net_fix, right_net_fix] + vn_list = [mgmt_net_fix, left_net_fix, right_net_fix] + end_vn_list = [left_net_fix, right_net_fix] vms = [] - vms = self.config_vms(vn_list) + vms = self.config_vms(end_vn_list) svc_template = self.config_svc_template( - stack_name='svc_template', scaling=True, mode='in-network-nat') - st_fq_name = ':'.join(svc_template.st_fq_name) - st_obj = svc_template.st_obj + stack_name='st', scaling=True, mode='in-network-nat') svc_instance, si_hs_obj = self.config_svc_instance( - 'svc_instance', st_fq_name, st_obj, vn_list, max_inst='3', svc_mode='in-network-nat') + 'si', svc_template, vn_list, max_inst=3) si_fq_name = (':').join(svc_instance.si_fq_name) - svc_chain = self.config_svc_chain(si_fq_name, vn_list) + svc_rules = [] + svc_rules.append(self.config_svc_rule(proto='any', si_fq_names=[si_fq_name], src_vns=[left_net_fix], dst_vns=[right_net_fix])) + svc_chain = self.config_svc_chain(svc_rules, vn_list, [l_h_obj, r_hs_obj]) assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True) dst_vm_list = [vms[1]] self.verify_traffic_flow( vms[0], dst_vm_list, svc_instance, left_net_fix) # end test_ecmp_svc_creation_with_heat + def multi_svc_chain(self, policys, svcs): + ''' + Validate multi service chain using heat + ''' + mgmt_net_fix, m_hs_obj = self.config_vn(stack_name='mgmt_net') + right_net_fix, r_hs_obj = self.config_vn(stack_name='right_net') + left_net_fix, l_hs_obj = self.config_vn(stack_name='left_net') + vn_list = [mgmt_net_fix, left_net_fix, right_net_fix] + end_vn_list = [left_net_fix, right_net_fix] + vms = [] + vms = self.config_vms(end_vn_list) + svc_tmpls = {} + for i, mode in enumerate(set(svcs.values())): + tmpl = self.config_svc_template(stack_name='st_%d' % i, + mode=mode) + svc_tmpls[mode] = {} + svc_tmpls[mode]['tmpl'] = tmpl + svc_tmpls[mode]['obj'] = tmpl.st_obj + svc_tmpls[mode]['fq_name'] = ':'.join(tmpl.st_fq_name) + sis = {} + i = 1 + for svc, mode in svcs.items(): + sis[svc] = self.config_svc_instance( + 'sil_%d' % i, svc_tmpls[mode]['tmpl'], vn_list) + i += 1 + rules = [] + test_ping = False + for policy in policys: + if (policy['proto'] == 'icmp') or (policy['proto'] == 'icmp6'): + test_ping = True + rules.append(self.config_svc_rule(direction=policy['direction'], + proto=policy['proto'], + src_ports=policy.get('src_ports',None), + dst_ports=policy.get('dst_ports',None), + src_vns=[left_net_fix], dst_vns=[right_net_fix], + si_fq_names=[(':').join(sis[policy['svc']][0].si_fq_name)])) + chain = self.config_svc_chain(rules, vn_list, [l_hs_obj, r_hs_obj], 'svc_chain') + if test_ping: + assert vms[0].ping_with_certainty(vms[1].vm_ip, expectation=True) + for policy in policys: + if policy['proto'] == 'icmp': + continue + proto = policy['proto'] if policy['proto'] != 'any' else 'udp' + sport = policy.get('src_ports', 8000) + dport = policy.get('dst_ports', 8000) + if type(sport) == type([]): + sport = sport[0][0] + dport = dport[0][0] + sent, recv = self.verify_traffic(vms[0], vms[1], proto, sport, dport) + assert sent == recv, "%s Traffic with src port %d, dst port %d failed" % (proto, sport, dport) + return True + # end multi_svc_chain + + @preposttest_wrapper + def test_proto_based_multi_sc(self): + svcs = {'svc1' : 'in-network', 'svc2' : 'in-network'} + policys = [{'direction':'<>', 'proto':'icmp', 'svc':'svc1'}, + {'direction':'<>', 'proto':'tcp', 'svc':'svc2'}] + if self.inputs.get_af() == 'v6': + policys.append({'direction': '<>', 'proto': 'icmp6', 'svc':'svc1'}) + return self.multi_svc_chain(policys, svcs) + + @preposttest_wrapper + def test_port_based_multi_sc(self): + svcs = {'svc1' : 'in-network', 'svc2' : 'in-network'} + policys = [{'direction':'<>', 'proto':'tcp', 'svc':'svc1', 'src_ports':[(8000,8000)], 'dst_ports':[(8000,8000)]}, + {'direction':'<>', 'proto':'tcp', 'svc':'svc2', 'src_ports':[(8001,8001)], 'dst_ports':[(8001,8001)]}] + return self.multi_svc_chain(policys, svcs) + # end TestHeat + class TestHeatIPv6(TestHeat): + + @classmethod + def setUpClass(cls): + super(TestHeatIPv6, cls).setUpClass() + cls.inputs.set_af('v6') + + class TestHeatv2(TestHeat): + + @classmethod + def setUpClass(cls): + super(TestHeatv2, cls).setUpClass() + cls.heat_api_version = 2 + cls.pt_based_svc = True + + @test.attr(type=['sanity']) + @preposttest_wrapper + @skip_because(address_family='v6') + def test_public_access_thru_svc_w_fip(self): + ''' + Validate creation of a in-network-nat service chain using heat. + Create a end VN. + Associate FIPs to the end VM and the right intf of the SVM. + Create a static route entry to point 0/0 to the left intf of the SVM. + The end VM should be able to access internet. + ''' + if ('MX_GW_TEST' not in os.environ) or (('MX_GW_TEST' in os.environ) and (os.environ.get('MX_GW_TEST') != '1')): + self.logger.info( + "Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test") + raise self.skipTest( + "Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test") + return True + public_vn_fixture = self.public_vn_obj.public_vn_fixture + public_vn_subnet = self.public_vn_obj.public_vn_fixture.vn_subnets[ + 0]['cidr'] + # Since the ping is across projects, enabling allow_all in the SG + self.project.set_sec_group_for_allow_all( + self.inputs.project_name, 'default') + vn_list = [] + right_net_fix, r_hs_obj = self.config_vn(stack_name='right_net') + left_net_fix, l_h_obj = self.config_vn(stack_name='left_net') + mgmt_net_fix, m_h_obj = self.config_vn(stack_name='mgmt_net') + end_net_fix, end_h_obj = self.config_vn(stack_name='end_net') + vn_list = [left_net_fix, right_net_fix] + svc_vn_list = [mgmt_net_fix, left_net_fix, right_net_fix] + vms = [] + vms = self.config_vms(vn_list) + end_vm, end_vm_fix = self.config_vm(end_net_fix) + end_vm_vmi = self.get_stack_output(end_vm, 'port_id') + left_vn_fip_pool = self.config_fip_pool(left_net_fix) + left_vn_fip_pool_op = self.get_stack_output( + left_vn_fip_pool, 'fip_pool_name') + fip_pool_fqdn = (':').join(left_vn_fip_pool_op) + left_vn_fip = self.config_fip(fip_pool_fqdn, end_vm_vmi) + svc_template = self.config_svc_template( + stack_name='st', mode='in-network-nat') + pt_si_hs_obj = self.config_pt_si( + 'pt_si', svc_template, svc_vn_list) + pt_si_hs_obj_op = self.get_stack_output( + pt_si_hs_obj, 'service_instance_fq_name') + si_fqdn = (':').join(pt_si_hs_obj_op) + prefix = '8.8.8.8/32' + si_intf_type = 'left' + intf_route_table = self.config_intf_rt_table( + prefix, si_fqdn, si_intf_type) + intf_route_table_op = self.get_stack_output( + intf_route_table, 'intf_rt_tbl_name') + intf_rt_table_fqdn = (':').join(intf_route_table_op) + pt_svm, pt_svm_fix = self.config_pt_svm( + 'pt_svm', si_fqdn, svc_vn_list, intf_rt_table_fqdn) + svm_right_vmi_id = self.get_stack_output( + pt_svm, 'svm_right_vmi_id') + public_vn_fip = self.config_fip( + public_vn_fixture.vn_fq_name, svm_right_vmi_id) + assert end_vm_fix.ping_with_certainty('8.8.8.8', expectation=True) + # end test_public_access_thru_svc_w_fip + + @preposttest_wrapper + def test_ecmp_svc_creation_with_heat(self): + ''' + Validate creation of a in-network-nat ECMP service chain using port-tuple + ''' + stack_name = 'ecmp_pt' + self.config_v2_svc_chain(stack_name) + # end test_ecmp_v2_creation_with_heat + + @preposttest_wrapper + def test_pt_multi_inline_v2_svc_creation_with_heat(self): + ''' + Validate creation of a multi-inline SVC using port-tuple + ''' + stack_name = 'pt_multi_inline' + self.config_v2_svc_chain(stack_name) + # end test_pt_multi_inline_v2_svc_creation_with_heat + + class TestHeatv2IPv6(TestHeatv2): + + @classmethod + def setUpClass(cls): + super(TestHeatv2IPv6, cls).setUpClass() + cls.inputs.set_af('v6') + except ImportError: print 'Missing Heat Client. Will skip tests' diff --git a/scripts/intf_mirror/__init__.py b/scripts/intf_mirror/__init__.py new file mode 100644 index 000000000..4070e8f92 --- /dev/null +++ b/scripts/intf_mirror/__init__.py @@ -0,0 +1 @@ +"""Interface Mirroring tests.""" diff --git a/scripts/intf_mirror/base.py b/scripts/intf_mirror/base.py new file mode 100644 index 000000000..986022f72 --- /dev/null +++ b/scripts/intf_mirror/base.py @@ -0,0 +1,29 @@ +import test_v1 +from common.connections import ContrailConnections +from common import isolated_creds + +class BaseIntfMirrorTest(test_v1.BaseTestCase_v1): + + @classmethod + def setUpClass(cls): + super(BaseIntfMirrorTest, cls).setUpClass() + cls.orch = cls.connections.orch + cls.quantum_h= cls.connections.quantum_h + cls.nova_h = cls.connections.nova_h + cls.vnc_lib= cls.connections.vnc_lib + cls.agent_inspect= cls.connections.agent_inspect + cls.cn_inspect= cls.connections.cn_inspect + cls.analytics_obj=cls.connections.analytics_obj + #end setUpClass + + @classmethod + def tearDownClass(cls): + super(BaseIntfMirrorTest, cls).tearDownClass() + #end tearDownClass + + def remove_from_cleanups(self, fix): + for cleanup in self._cleanups: + if fix.cleanUp in cleanup: + self._cleanups.remove(cleanup) + break + #end remove_from_cleanups diff --git a/scripts/intf_mirror/test_intf_mirror.py b/scripts/intf_mirror/test_intf_mirror.py new file mode 100644 index 000000000..e7dc95161 --- /dev/null +++ b/scripts/intf_mirror/test_intf_mirror.py @@ -0,0 +1,61 @@ +"""Intf mirroring Regression tests.""" +import os +import unittest +import fixtures +import testtools +import test + +from common.connections import ContrailConnections +from common.contrail_test_init import ContrailTestInit +from tcutils.wrappers import preposttest_wrapper +from common.intf_mirroring.verify import VerifyIntfMirror +from base import BaseIntfMirrorTest + +class TestIntfMirror(BaseIntfMirrorTest, VerifyIntfMirror): + + @classmethod + def setUpClass(cls): + super(TestIntfMirror, cls).setUpClass() + + def runTest(self): + pass + # end runTest + + @test.attr(type=['ci_sanity_WIP', 'sanity', 'quick_sanity']) + @preposttest_wrapper + def test_intf_mirroring_1(self): + """Validate the intf mirroring + src vm, dst vm and analyzer vm on different CNs + """ + return self.verify_intf_mirroring_1() + + @preposttest_wrapper + def test_intf_mirroring_2(self): + """Validate the intf mirroring + src vm, dst vm and analyzer vm on same CN + """ + return self.verify_intf_mirroring_2() + + @preposttest_wrapper + def test_intf_mirroring_3(self): + """Validate the intf mirroring + src vm, dst vm on same CN and analyzer vm on different CN + """ + return self.verify_intf_mirroring_3() + + @preposttest_wrapper + def test_intf_mirroring_4(self): + """Validate the intf mirroring + src vm, analyzer on same CN and dst vm on different CN + """ + return self.verify_intf_mirroring_4() + + @preposttest_wrapper + def test_intf_mirroring_5(self): + """Validate the intf mirroring + dst vm, analyzer vm on same CN and src vm on different CN + """ + return self.verify_intf_mirroring_5() + +if __name__ == '__main__': + unittest.main() diff --git a/scripts/log_conf.ini b/scripts/log_conf.ini deleted file mode 100755 index 5beb32fd0..000000000 --- a/scripts/log_conf.ini +++ /dev/null @@ -1,50 +0,0 @@ -[log_screen] -# set if log redirection to console needed -log_to_console= yes - -[loggers] -keys=root,log01 - -[logger_root] -handlers=screen -#qualname=(root) -level=ERROR - -[logger_log01] -handlers=file -qualname=log01 -level=DEBUG -propagate=0 - - -[formatters] -keys=std - -[formatter_std] -format=%(asctime)s %(levelname)s %(message)s -datefmt=%m-%d-%Y -#format=%(asctime)s [ %(levelname)5s ] %(message)s - - -[handlers] -keys=file,screen -#keys=file - -[handler_file] -#class= handlers.MemoryHandler -class= handlers.MemoryHandler -formatter=std -level=DEBUG -target= -args=(1000, ERROR) -#args=( 'test_details.log.2014-04-20-23:31:35','a') -#args is of the form : ( log-file-name , write-mode) - -[handler_screen] -#class=handlers.StreamHandler -class=StreamHandler -formatter=std -level=INFO -stream=sys.stdout -args=(sys.stdout,) - diff --git a/scripts/multi_tenancy/base.py b/scripts/multi_tenancy/base.py index 839e4a4d3..79f91ab86 100644 --- a/scripts/multi_tenancy/base.py +++ b/scripts/multi_tenancy/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 import os from common.connections import ContrailConnections from vm_test import VMFixture @@ -7,15 +7,11 @@ from vnc_api.vnc_api import * from keystone_tests import KeystoneCommands -class BaseMultitenancyTest(test.BaseTestCase): +class BaseMultitenancyTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseMultitenancyTest, cls).setUpClass() - cls.connections = ContrailConnections(cls.inputs, project_name = cls.inputs.project_name, - username = cls.inputs.stack_user, - password = cls.inputs.stack_password, - logger = cls.logger) cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -25,7 +21,10 @@ def setUpClass(cls): 'http://' + cls.inputs.openstack_ip + ':5000/v2.0' insecure = bool(os.getenv('OS_INSECURE',True)) cls.key_stone_clients = KeystoneCommands( - username=cls.inputs.stack_user, password = cls.inputs.stack_password, tenant = cls.inputs.project_name, auth_url=auth_url, + username=cls.inputs.admin_user, + password = cls.inputs.admin_password, + tenant = cls.inputs.admin_tenant, + auth_url=auth_url, insecure=insecure) #end setUpClass diff --git a/scripts/multi_tenancy/test_perms.py b/scripts/multi_tenancy/test_perms.py index f7f5b3daf..c5592a96f 100644 --- a/scripts/multi_tenancy/test_perms.py +++ b/scripts/multi_tenancy/test_perms.py @@ -96,7 +96,7 @@ def test_all( time.sleep(4) self.addCleanup(self.key_stone_clients.delete_tenant_list, [proj_name]) project = self.vnc_lib.project_read(id=project_uuid_vnc_api_format) - project_fq_name = project.get_fq_name() + project_name = project.get_fq_name()[1] self.logger.info('Created Project %s ' % (str(project.get_fq_name()))) @@ -380,7 +380,7 @@ def test_permissions_on_projects(self): time.sleep(4) self.addCleanup(self.key_stone_clients.delete_tenant_list, [proj_name]) project = self.vnc_lib.project_read(id=project_uuid_vnc_api_format) - project_fq_name = project.get_fq_name() + project_name = project.get_fq_name()[1] # Create user test/test123 and add as admin in projectF self.logger.info("Creating user test/test123 in projF as Admin") @@ -459,7 +459,7 @@ def test_permissions_on_projects(self): try: test_proj_inputs1 = ContrailTestInit( self.ini_file, stack_user=user, stack_password=password, - project_fq_name=project_fq_name , logger = self.logger) + stack_tenant=project_name , logger = self.logger) test_proj_connections1 = ContrailConnections(test_proj_inputs1 , logger = self.logger) vn1_fixture = self.useFixture( VNFixture( @@ -475,7 +475,7 @@ def test_permissions_on_projects(self): try: test1_proj_inputs1 = ContrailTestInit( self.ini_file, stack_user=user1, stack_password=password1, - project_fq_name=project_fq_name,logger = self.logger) + stack_tenant=project_name,logger = self.logger) test1_proj_connections1 = ContrailConnections(test1_proj_inputs1,logger = self.logger) vn2_fixture = self.useFixture( VNFixture( @@ -662,7 +662,7 @@ def test_permissions_on_policy_objects(self): time.sleep(4) self.addCleanup(self.key_stone_clients.delete_tenant_list, [proj_name]) project = self.vnc_lib.project_read(id=project_uuid_vnc_api_format) - project_fq_name = project.get_fq_name() + project_name = project.get_fq_name()[1] self.logger.info("Creating user test/test123 in projF as Admin") user = util.get_random_name('test') password = 'test123' @@ -725,13 +725,13 @@ def test_permissions_on_policy_objects(self): try: test_proj_inputs1 = ContrailTestInit( self.ini_file, stack_user=user, stack_password=password, - project_fq_name=project_fq_name , logger = self.logger) + stack_tenant=project_name , logger = self.logger) test_proj_connections1 = ContrailConnections(test_proj_inputs1 , logger = self.logger) vn_obj = self.useFixture( VNFixture( project_name=proj_name, connections=test_proj_connections1, - vn_name='vn211', option='api', inputs=test_proj_inputs1, subnets=['200.100.100.0/24', '200.100.101.0/24'])) + vn_name='vn211', option='contrail', inputs=test_proj_inputs1, subnets=['200.100.100.0/24', '200.100.101.0/24'])) except Exception as e: self.logger.exception('Got exception as %s' % (e)) testfail += 1 @@ -886,7 +886,7 @@ def test_permissions_on_vn_objects(self): time.sleep(4) self.addCleanup(self.key_stone_clients.delete_tenant_list, [proj_name]) project = self.vnc_lib.project_read(id=project_uuid_vnc_api_format) - project_fq_name = project.get_fq_name() + project_name = project.get_fq_name()[1] self.logger.info("Creating user test/test123 in projF as Admin") user = 'test' password = 'test123' @@ -929,13 +929,13 @@ def test_permissions_on_vn_objects(self): try: test_proj_inputs1 = ContrailTestInit( self.ini_file, stack_user=user, stack_password=password, - project_fq_name=project_fq_name , logger = self.logger) + stack_tenant=project_name , logger = self.logger) test_proj_connections1 = ContrailConnections(test_proj_inputs1 , logger = self.logger) vn_obj = self.useFixture( VNFixture( project_name=proj_name, connections=test_proj_connections1, - vn_name='vn211', option='api', inputs=test_proj_inputs1, subnets=['200.100.100.0/24', '200.100.101.0/24'])) + vn_name='vn211', option='contrail', inputs=test_proj_inputs1, subnets=['200.100.100.0/24', '200.100.101.0/24'])) except Exception as e: self.logger.exception('Got exception as %s' % (e)) testfail += 1 diff --git a/scripts/neutron/lbaas/test_lbaas.py b/scripts/neutron/lbaas/test_lbaas.py index 57449442e..c4978477c 100644 --- a/scripts/neutron/lbaas/test_lbaas.py +++ b/scripts/neutron/lbaas/test_lbaas.py @@ -597,323 +597,3 @@ def test_healthmonitor_delete(self): assert False, errmsg # end test_healthmonitor_delete - - @test.attr(type=['sanity']) - @preposttest_wrapper - def test_lbmethod_round_robin(self): - '''Creates Lbaas pool with lb-method ROUND ROBIN, 3 members and vip - Verify: lb-method ROUND ROBIN works as expected, fail otherwise - ''' - - vn_pool = get_random_name('vn_pool') - vn_vip = get_random_name('vn_vip') - vn_pool_subnets = ['10.1.1.0/24'] - vn_vip_subnets = ['20.1.1.0/24'] - pool_vm1 = get_random_name('server1') - pool_vm2 = get_random_name('server2') - pool_vm3 = get_random_name('server3') - client_vm1 = get_random_name('client1') - - vn_pool_fixture = self.create_vn(vn_pool, vn_pool_subnets) - assert vn_pool_fixture.verify_on_setup() - vn_vip_fixture = self.create_vn(vn_vip, vn_vip_subnets) - assert vn_vip_fixture.verify_on_setup() - pool_vm1_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm1, - flavor='contrail_flavor_small', image_name='ubuntu') - pool_vm2_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm2, - flavor='contrail_flavor_small', image_name='ubuntu') - pool_vm3_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm3, - flavor='contrail_flavor_small', image_name='ubuntu') - client_vm1_fixture = self.create_vm(vn_vip_fixture,vm_name=client_vm1, - flavor='contrail_flavor_small', image_name='ubuntu') - - lb_pool_servers = [pool_vm1_fixture, pool_vm2_fixture, pool_vm3_fixture] - - assert pool_vm1_fixture.wait_till_vm_is_up() - assert pool_vm2_fixture.wait_till_vm_is_up() - assert pool_vm3_fixture.wait_till_vm_is_up() - assert client_vm1_fixture.wait_till_vm_is_up() - - pool_name = 'mypool' - lb_method = 'ROUND_ROBIN' - protocol = 'HTTP' - protocol_port = 80 - vip_name = 'myvip' - - #create lb pool - self.logger.info("creating lb pool:%s" % (pool_name)) - lb_pool = self.create_lb_pool(pool_name, lb_method, protocol, vn_pool_fixture.vn_subnet_objs[0]['id']) - assert lb_pool, "lb pool create failed" - #api server verification - assert self.verify_lb_pool_in_api_server(lb_pool['id']), \ - "API server verification failed for pool with pool id %s" % (lb_pool['id']) - - #create lb member - self.logger.info("creating lb member") - lb_member1 = self.create_lb_member(pool_vm1_fixture.vm_ip, protocol_port, lb_pool['id']) - assert lb_member1, "lb member create failed" - assert self.verify_member_in_api_server(lb_member1['id']), \ - "API server verification failed for member with id %s" % (lb_member1['id']) - lb_member2 = self.create_lb_member(pool_vm2_fixture.vm_ip, protocol_port, lb_pool['id']) - assert lb_member2, "lb member create failed" - assert self.verify_member_in_api_server(lb_member2['id']), \ - "API server verification failed for member with id %s" % (lb_member2['id']) - lb_member3 = self.create_lb_member(pool_vm3_fixture.vm_ip, protocol_port, lb_pool['id']) - assert lb_member3, "lb member create failed" - assert self.verify_member_in_api_server(lb_member3['id']), \ - "API server verification failed for member with id %s" % (lb_member3['id']) - - #create vip - self.logger.info("creating lb vip:%s" % (vip_name)) - lb_vip = self.create_vip(vip_name, protocol, protocol_port, vn_vip_fixture.vn_subnet_objs[0]['id'], lb_pool['id']) - assert lb_vip, "lb vip create failed" - vip_ip = lb_vip['address'] - #api server verification - assert self.verify_vip_in_api_server(lb_vip['id']), \ - "API server verification failed for vip with vip id %s" % (lb_vip['id']) - #TODO : agent verification - - #Start SimpleHTTPServer on port 80 on all lb pool servers - output = '' - self.start_simpleHTTPserver(lb_pool_servers) - - #Do wget on the VIP ip from the client, Lets do it 3 times - lb_response1 = [] - result = '' - for i in range (0,3): - result,output = self.run_wget(client_vm1_fixture,vip_ip) - if result: - lb_response1.append(output.strip('\r')) - else: - errmsg = "connection to vip %s failed" % (vip_ip) - assert result, errmsg - - # To check lb-method ROUND ROBIN lets do wget again 3 times - lb_response2 = [] - for i in range (0,3): - result,output = self.run_wget(client_vm1_fixture,vip_ip) - if result: - lb_response2.append(output.strip('\r')) - else: - errmsg = "connection to vip %s failed" % (vip_ip) - assert result, errmsg - - errmsg = ("lb-method ROUND ROBIN doesnt work as expcted, First time requests went to servers %s" - " subsequent requests went to servers %s" %(lb_response1, lb_response2)) - if not lb_response1 == lb_response2: - self.logger.error(errmsg) - assert False, errmsg - self.logger.info("lb-method ROUND ROBIN works as expected,First time requests went to servers %s" - " subsequent requests went to servers %s" % (lb_response1, lb_response2)) - - # end test_lbmethod_round_robin - - @preposttest_wrapper - def test_healthmonitor(self): - '''Creates Lbaas pool with lb-method ROUND ROBIN, 3 members and vip - create the healthmonitor of type HTTP associate with the pool. - bringdown one of the backend server and verify requests are not - sent to that server and loadbalcing happens between the remaining backend servers - which are active. - ''' - - vn_pool = get_random_name('vn_pool') - vn_vip = get_random_name('vn_vip') - vn_pool_subnets = ['10.1.1.0/24'] - vn_vip_subnets = ['20.1.1.0/24'] - pool_vm1 = get_random_name('server1') - pool_vm2 = get_random_name('server2') - pool_vm3 = get_random_name('server3') - client_vm1 = get_random_name('client1') - - vn_pool_fixture = self.create_vn(vn_pool, vn_pool_subnets) - assert vn_pool_fixture.verify_on_setup() - vn_vip_fixture = self.create_vn(vn_vip, vn_vip_subnets) - assert vn_vip_fixture.verify_on_setup() - pool_vm1_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm1, - flavor='contrail_flavor_small', image_name='ubuntu') - pool_vm2_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm2, - flavor='contrail_flavor_small', image_name='ubuntu') - pool_vm3_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm3, - flavor='contrail_flavor_small', image_name='ubuntu') - client_vm1_fixture = self.create_vm(vn_vip_fixture,vm_name=client_vm1, - flavor='contrail_flavor_small', image_name='ubuntu') - - lb_pool_servers = [pool_vm1_fixture, pool_vm2_fixture, pool_vm3_fixture] - - assert pool_vm1_fixture.wait_till_vm_is_up() - assert pool_vm2_fixture.wait_till_vm_is_up() - assert pool_vm3_fixture.wait_till_vm_is_up() - assert client_vm1_fixture.wait_till_vm_is_up() - - pool_name = 'mypool' - lb_method = 'ROUND_ROBIN' - protocol = 'HTTP' - protocol_port = 80 - vip_name = 'myvip' - hm_delay = 10 - hm_max_retries = 3 - hm_probe_type = 'HTTP' - hm_timeout = 5 - - #create lb pool - self.logger.info("creating lb pool:%s" % (pool_name)) - lb_pool = self.create_lb_pool(pool_name, lb_method, protocol, vn_pool_fixture.vn_subnet_objs[0]['id']) - assert lb_pool, "lb pool create failed" - #api server verification - assert self.verify_lb_pool_in_api_server(lb_pool['id']), \ - "API server verification failed for pool with pool id %s" % (lb_pool['id']) - - #create lb member - self.logger.info("creating lb member") - lb_member1 = self.create_lb_member(pool_vm1_fixture.vm_ip, protocol_port, lb_pool['id']) - assert lb_member1, "lb member create failed" - assert self.verify_member_in_api_server(lb_member1['id']), \ - "API server verification failed for member with id %s" % (lb_member1['id']) - lb_member2 = self.create_lb_member(pool_vm2_fixture.vm_ip, protocol_port, lb_pool['id']) - assert lb_member2, "lb member create failed" - assert self.verify_member_in_api_server(lb_member2['id']), \ - "API server verification failed for member with id %s" % (lb_member2['id']) - lb_member3 = self.create_lb_member(pool_vm3_fixture.vm_ip, protocol_port, lb_pool['id']) - assert lb_member3, "lb member create failed" - assert self.verify_member_in_api_server(lb_member3['id']), \ - "API server verification failed for member with id %s" % (lb_member3['id']) - - #create vip - self.logger.info("creating lb vip:%s" % (vip_name)) - lb_vip = self.create_vip(vip_name, protocol, protocol_port, vn_vip_fixture.vn_subnet_objs[0]['id'], lb_pool['id']) - assert lb_vip, "lb vip create failed" - vip_ip = lb_vip['address'] - #api server verification - assert self.verify_vip_in_api_server(lb_vip['id']), \ - "API server verification failed for vip with vip id %s" % (lb_vip['id']) - #TODO : agent verification - - #create helathmonitor of type HTTP - self.logger.info("creating healthmonitor") - healthmonitor = self.create_health_monitor(hm_delay, hm_max_retries, hm_probe_type, hm_timeout) - assert self.verify_healthmonitor_in_api_server(healthmonitor['id']), \ - "API server verification failed for healthmonitor with id %s" % (healthmonitor['id']) - - #Associate HM to pool - self.logger.info("associating healthmonitor to pool %s" % (lb_pool['name'])) - self.associate_health_monitor(lb_pool['id'], healthmonitor['id']) - - #Check if Health monitor is associated with the pool - pool = self.quantum_h.get_lb_pool(lb_pool['id']) - if pool['health_monitors'][0] == healthmonitor['id']: - self.logger.info("pool %s is associated with healthmonitor %s" % (lb_pool['name'], pool['health_monitors'])) - else: - assert False, "pool %s is not associated with healthmonitor %s" %(lb_pool['name'], healthmonitor['id']) - - #verify in API server whether HM is associated with pool - self.logger.info("Verify in API server whether pool is associaed with Healthmonitor") - result,msg = self.verify_healthmonitor_association_in_api_server(lb_pool['id'], healthmonitor['id']) - assert result, msg - - #Start SimpleHTTPServer on port 80 on all lb pool servers - self.start_simpleHTTPserver(lb_pool_servers) - - #Do wget on the VIP ip from the client, Lets do it 3 times - out = True - lb_response1 = [] - for i in range (0,3): - result,output = self.run_wget(client_vm1_fixture,vip_ip) - if result: - lb_response1.append(output.strip('\r')) - else: - assert False, ("Test pre condition failed, Error in response on connecting to vip," - " failing the test here, Helathmonitor functionality not verified.") - self.logger.info("requests went to servers: %s" % (lb_response1)) - - #check if server2 is in lb_response1 before bringing it down to check HM functionality - self.logger.info("Verififying if the client request gets forwarded to %s before bringing" - " it down to verify Healthmonitor functinality" % (pool_vm2_fixture.vm_name)) - if pool_vm2_fixture.vm_name in lb_response1: - self.logger.info("client requests are getting forwarded to backend server %s" % (pool_vm2_fixture.vm_name)) - else: - self.logger.info("client requests are not getting forwareded to server %s" % (pool_vm2_fixture.vm_name)) - - #Lets bring down backend server pool_vm2_fixture and requests from client should not - #get forwded to pool_vm2_fixture - pool_vm2_fixture.vm_obj.stop() - self.logger.info("Waiting for the VM to shutdown") - sleep(40) - #ping to the stopped VM to make sure it has stopped. - if pool_vm1_fixture.ping_with_certainty(pool_vm2_fixture.vm_ip, expectation=False): - self.logger.info("ping to vm %s failed.VM %s is in shutoff state" - " continuing the test" % (pool_vm2_fixture.vm_name, pool_vm2_fixture.vm_name)) - else: - assert False, ("vm %s stil in active state, HM functinality can not be verified.Stop the VM" - "and then continue the test. failing the test now" % (pool_vm2_fixture.vm_name)) - - #remove the stopped server from lb_pool_servers and start the simpleHTTPserver again - lb_pool_servers.remove(pool_vm2_fixture) - self.start_simpleHTTPserver(lb_pool_servers) - - lb_response1 = [] - for i in range (0,3): - result,output = self.run_wget(client_vm1_fixture,vip_ip) - if result: - lb_response1.append(output.strip('\r')) - else: - assert False, ("Error in response on connecting to vip,even with Healthmonitor associated" - " requests fron client tries to go to backend server which is down") - out = False - if out: - self.logger.info("client requests are not getting forwarded to backend server: %s" - " requests went to servers: %s. healthmonitor working as expected" - % (pool_vm2_fixture.vm_name, lb_response1)) - - #Bring up the server back again and healthmonitor should add the server back to pool and - #client requets should start getting forwarded to this server again. - pool_vm2_fixture.vm_obj.start() - #sleep for 10 sec for the VM to come back to Active state. - self.logger.info("waiting for the VM to come back to Active state") - sleep(10) - #ping to the VM to make sure it is in Active state. - if pool_vm1_fixture.ping_with_certainty(pool_vm2_fixture.vm_ip): - self.logger.info("ping to vm %s passed.VM %s is in Active state" - " continuing the test" % (pool_vm2_fixture.vm_name, pool_vm2_fixture.vm_name)) - else: - assert False, ("vm %s stil in shtuoff state, HM functinality can not be verified. start the VM" - "and then continue the test. failing the test now" % (pool_vm2_fixture.vm_name)) - - #add server from lb_pool_servers and start the simpleHTTPserver again - lb_pool_servers.append(pool_vm2_fixture) - self.start_simpleHTTPserver(lb_pool_servers) - - maxduration = 300 - start = datetime.datetime.now() - timedelta = datetime.timedelta(seconds=maxduration) - maxtime = start + timedelta - while maxtime >= datetime.datetime.now(): - lb_response1 = [] - #Do wget on the VIP ip from the client, Lets do it 3 times - for i in range (0,3): - result,output = self.run_wget(client_vm1_fixture,vip_ip) - if result: - lb_response1.append(output.strip('\r')) - else: - errmsg = "connection to vip %s failed" % (vip_ip) - assert result, errmsg - self.logger.info("requests went to servers: %s" % (lb_response1)) - - #check if server2 is in lb_response1 - if pool_vm2_fixture.vm_name in lb_response1: - self.logger.info("client requests are getting forwarded to backend server %s" - " HM functionality working as expected " % (pool_vm2_fixture.vm_name)) - out = True - break - else: - out = False - self.logger.warning("client requests are not getting forwareded to server %s" - " after server is up again, requests should have got forwarded to %s" - " retrying...." % (pool_vm2_fixture.vm_name, pool_vm2_fixture.vm_name)) - - if not out: - assert out, ("Reached Max wait, waited for (%s secs), still the client requests are not getting" - " forwareded to server %s, HM functinality not working as expected" - % (maxduration, pool_vm2_fixture.vm_name)) - - # end test_healthmonitor diff --git a/scripts/neutron/test_ports.py b/scripts/neutron/test_ports.py index 26dc4dd96..5cd623a31 100644 --- a/scripts/neutron/test_ports.py +++ b/scripts/neutron/test_ports.py @@ -9,9 +9,11 @@ import fixtures import testtools import time +import datetime from vn_test import * from vm_test import * +from port_fixture import PortFixture from common.connections import ContrailConnections from tcutils.wrappers import preposttest_wrapper @@ -19,6 +21,8 @@ import test from tcutils.util import * from netaddr import IPNetwork, IPAddress +from floating_ip import FloatingIPFixture +from tcutils import tcpdump_utils class TestPorts(BaseNeutronTest): @@ -51,9 +55,9 @@ def test_ports_attach_detach(self): image_name='ubuntu-traffic', port_ids=[port_obj['id']]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec') - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() + image_name='cirros') + assert vm1_fixture.wait_till_vm_is_up() + assert vm2_fixture.wait_till_vm_is_up() if not vm2_fixture.ping_with_certainty(vm1_fixture.vm_ip): self.logger.error('Ping to a attached port %s failed' % (vm1_fixture.vm_ip)) @@ -117,13 +121,13 @@ def test_ports_specific_subnet(self): port2_obj = self.create_port(net_id=vn1_fixture.vn_id, fixed_ips=[{'subnet_id': vn1_subnet2_id}]) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port1_obj['id']]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port2_obj['id']]) - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() + assert vm1_fixture.wait_till_vm_is_up() + assert vm2_fixture.wait_till_vm_is_up() self.assertEqual( vm1_fixture.vm_ip, port1_obj['fixed_ips'][0]['ip_address'], @@ -167,13 +171,13 @@ def test_ports_specific_subnet_ip(self): fixed_ips=[{'subnet_id': vn1_subnet2_id, 'ip_address': vn1_subnet2_ip}]) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port1_obj['id']]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port2_obj['id']]) - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() + assert vm1_fixture.wait_till_vm_is_up() + assert vm2_fixture.wait_till_vm_is_up() self.assertEqual(vm1_fixture.vm_ip, vn1_subnet1_ip, 'VM IP and Port IP Mismatch') self.assertEqual(vm2_fixture.vm_ip, @@ -211,9 +215,9 @@ def test_ports_multiple_specific_subnet_ips(self): image_name='ubuntu-traffic', port_ids=[port1_obj['id']]) test_vm_fixture = self.create_vm(vn1_fixture, test_vm_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') vm1_fixture.verify_on_setup() - test_vm_fixture.wait_till_vm_is_up() + assert test_vm_fixture.wait_till_vm_is_up() subnet_list = [vn1_subnet1_ip, vn1_subnet2_ip] subnet_list2 = [vn1_subnet1_ip2, vn1_subnet2_ip2] assert set(vm1_fixture.vm_ips) == set( @@ -237,8 +241,9 @@ def test_ports_multiple_specific_subnet_ips(self): port1_obj = self.create_port(net_id=vn1_fixture.vn_id, fixed_ips=[{'subnet_id': vn1_subnet1_id, 'ip_address': vn1_subnet1_ip2}, {'subnet_id': vn1_subnet2_id, 'ip_address': vn1_subnet2_ip2}]) + vm1_fixture.clear_vmi_info() vm1_fixture.interface_attach(port_id=port1_obj['id']) - vm1_fixture.wait_till_vm_is_up() + assert vm1_fixture.wait_till_vm_is_up() # Create alias on the VM to respond to pings for subnet in subnet_list2: output = vm1_fixture.run_cmd_on_vm(['sudo ifconfig eth0:' + unicode( @@ -273,13 +278,13 @@ def test_ports_specific_mac(self): port2_obj = self.create_port(net_id=vn1_fixture.vn_id, mac_address=vm2_mac) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port1_obj['id']]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port2_obj['id']]) - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() + assert vm1_fixture.wait_till_vm_is_up() + assert vm2_fixture.wait_till_vm_is_up() vm1_tap_intf = vm1_fixture.tap_intf[vm1_fixture.vn_fq_names[0]] vm2_tap_intf = vm2_fixture.tap_intf[vm2_fixture.vn_fq_names[0]] assert vm1_tap_intf['mac_addr'] == vm1_mac, ''\ @@ -310,10 +315,10 @@ def test_ports_no_sg(self): no_security_group=True) port2_obj = self.create_port(net_id=vn1_fixture.vn_id) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port1_obj['id']]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port2_obj['id']]) assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' @@ -346,10 +351,10 @@ def test_ports_custom_sg(self): security_groups=[sg1['id']]) port2_obj = self.create_port(net_id=vn1_fixture.vn_id) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port1_obj['id']]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port2_obj['id']]) assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' @@ -466,12 +471,12 @@ def test_port_admin_state_up(self): vn1_fixture = self.create_vn(vn1_name, vn1_subnets) port_obj = self.create_port(net_id=vn1_fixture.vn_id) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port_obj['id']]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec') - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() + image_name='cirros') + assert vm1_fixture.wait_till_vm_is_up() + assert vm2_fixture.wait_till_vm_is_up() assert vm2_fixture.ping_with_certainty(vm1_fixture.vm_ip) port_dict = {'admin_state_up': False} port_rsp = self.quantum_h.update_port(port_obj['id'], port_dict) @@ -507,10 +512,10 @@ def test_ports_update_sg(self): port1_obj = self.create_port(net_id=vn1_fixture.vn_id) port2_obj = self.create_port(net_id=vn1_fixture.vn_id) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port1_obj['id']]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port2_obj['id']]) assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' @@ -551,7 +556,7 @@ def test_ports_device_owner_and_id(self): port1_obj) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port1_obj['id']]) assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' @@ -565,6 +570,46 @@ def test_ports_device_owner_and_id(self): # end test_ports_device_owner_and_id + @preposttest_wrapper + def test_ports_secondary_ip_attach(self): + ''' + Validate when 2 different Instance IPs are associated with same VMI, + and "instance_ip_secondary" is set True for 2nd Instance IP, 1st Instance IP + should act as native IP of VM. This script verifies following bug: + https://bugs.launchpad.net/juniperopenstack/+bug/1645414 + + Create a VN. + Create a VMI/Port and add set "instance_ip_secondary" = False. + This will result in 2 instances getting created and attached to same VMI. + 1 IIP Primary and 2nd IIP as Secondary. + Create a VM using that port and verify that IP from primary IIP gets assigned by DHCP + ''' + vn1_name = get_random_name('vn1') + vn1_subnet = get_random_cidr() + vn1_vm1_name = get_random_name('vn1-vm1') + vn1_fixture = self.create_vn(vn1_name, [vn1_subnet]) + fixed_ips = [{'subnet_id' : vn1_fixture.vn_subnet_objs[0]['id'], + 'instance_ip_secondary' : False}, + {'subnet_id' : vn1_fixture.vn_subnet_objs[0]['id'], + 'instance_ip_secondary' : True}] + port_vm1_obj = self.useFixture(PortFixture(vn1_fixture.uuid, + api_type = "contrail", + fixed_ips = fixed_ips, + connections=self.connections)) + assert port_vm1_obj.verify_on_setup() + vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, + image_name='ubuntu-traffic', + port_ids=[port_vm1_obj.uuid]) + assert vm1_fixture.wait_till_vm_is_up() + if port_vm1_obj.iip_objs[0].instance_ip_address == vm1_fixture.vm_ip: + self.logger.debug("VM Ip is successfully set to Primary IIP") + elif port_vm1_obj.iip_objs[1].instance_ip_address == vm1_fixture.vm_ip: + self.logger.debug("VM Ip is set to secondary IIP") + assert False, "VM Ip is set to secondary IIP" + else: + assert False, "VM Ip is none among any IIP" + # end test_ports_secondary_ip_attach + @preposttest_wrapper def test_shutoff_vm_route_withdrawal(self): '''Test shutdown of VM using nova command and correponfing route withdrawal. @@ -613,8 +658,8 @@ def test_shutoff_vm_route_withdrawal(self): vn1_vm1_fixture = self.vn1_vm1_fixture vn1_vm2_fixture = self.vn1_vm2_fixture - vn1_vm1_fixture.wait_till_vm_is_up() - vn1_vm2_fixture.wait_till_vm_is_up() + assert vn1_vm1_fixture.wait_till_vm_is_up() + assert vn1_vm2_fixture.wait_till_vm_is_up() vm1_name = self.vn1_vm1_name vn1_name = self.vn1_name @@ -671,8 +716,9 @@ def test_aap_with_vrrp_admin_state_toggle(self): assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' assert vm_test_fixture.wait_till_vm_is_up( ), 'VM does not seem to be up' - - self.config_aap(port1_obj, port2_obj, vIP) + port_list = [port1_obj, port2_obj] + for port in port_list: + self.config_aap(port, vIP, mac=port['mac_address']) self.config_vrrp(vm1_fixture, vIP, '20') self.config_vrrp(vm2_fixture, vIP, '10') time.sleep(10) @@ -701,38 +747,53 @@ def test_aap_with_vrrp_admin_state_toggle(self): @test.attr(type=['sanity']) @preposttest_wrapper - def test_aap_with_vMAC(self): - '''Create 2 VSRXs and enable VRRP between them, specifying a vIP. - Update the ports of the respective VMs to allow the vIP so configured. - Cause a VRRP Mastership switchover by changing the VRRP priority. - The vIP should still be accessible via the new VRRP master. + def test_aap_with_fip(self): + ''' + 1. Create 2 VSRXs and enable VRRP between them, specifying a vIP. + 2. Update the ports of the respective VMs to allow the vIP so configured. + 3. Associate the same FIP to both the ports using API. + 4. In the Floating IP object, add the vIP as the fixed_ip_address. + 5. Ping to the vIP and FIP should be answered by the AAP active port. + 6. Cause a VRRP Mastership switchover by changing the VRRP priority. + 7. The vIP and FIP should still be accessible via the new VRRP master. + ''' + if ('MX_GW_TEST' not in os.environ) or (('MX_GW_TEST' in os.environ) and (os.environ.get('MX_GW_TEST') != '1')): + self.logger.info( + "Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test") + raise self.skipTest( + "Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test") + return True + + public_vn_fixture = self.public_vn_obj.public_vn_fixture + public_vn_subnet = self.public_vn_obj.public_vn_fixture.vn_subnets[ + 0]['cidr'] + # Since the ping is across projects, enabling allow_all in the SG + self.project.set_sec_group_for_allow_all( + self.inputs.project_name, 'default') vn1_name = get_random_name('left-vn') - vn1_subnets = ['10.10.10.0/24'] + vn1_subnets = [get_random_cidr()] vn2_name = get_random_name('right-vn') - vn2_subnets = ['20.20.20.0/24'] - vn3_name = get_random_name('mgmt-vn') - vn3_subnets = ['30.30.30.0/24'] + vn2_subnets = [get_random_cidr()] vsrx1_name = get_random_name('vsrx1') vsrx2_name = get_random_name('vsrx2') vm_test_name = get_random_name('vm_test') - vIP = '10.10.10.10' + vIP = get_an_ip(vn1_subnets[0], offset=10) result = False vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vn2_fixture = self.create_vn(vn2_name, vn2_subnets) - vn3_fixture = self.create_vn(vn3_name, vn3_subnets) - vn_objs = [vn3_fixture.obj, vn1_fixture.obj, vn2_fixture.obj] + vn_objs = [public_vn_fixture.obj, vn1_fixture.obj, vn2_fixture.obj] lvn_port_obj1 = self.create_port(net_id=vn1_fixture.vn_id) rvn_port_obj1 = self.create_port(net_id=vn2_fixture.vn_id) - mvn_port_obj1 = self.create_port(net_id=vn3_fixture.vn_id) + mvn_port_obj1 = self.create_port(net_id=public_vn_fixture.vn_id) lvn_port_obj2 = self.create_port(net_id=vn1_fixture.vn_id) rvn_port_obj2 = self.create_port(net_id=vn2_fixture.vn_id) - mvn_port_obj2 = self.create_port(net_id=vn3_fixture.vn_id) + mvn_port_obj2 = self.create_port(net_id=public_vn_fixture.vn_id) port_ids1 = [ mvn_port_obj1['id'], lvn_port_obj1['id'], rvn_port_obj1['id']] @@ -742,16 +803,48 @@ def test_aap_with_vMAC(self): vm1_fixture = self.useFixture( VMFixture( vn_objs=vn_objs, project_name=self.inputs.project_name, connections=self.connections, - flavor='m1.medium', image_name='vsrx', vm_name=vsrx1_name, + image_name='vsrx', vm_name=vsrx1_name, port_ids=port_ids1, zone='nova')) vm2_fixture = self.useFixture( VMFixture( vn_objs=vn_objs, project_name=self.inputs.project_name, connections=self.connections, - flavor='m1.medium', image_name='vsrx', vm_name=vsrx2_name, + image_name='vsrx', vm_name=vsrx2_name, port_ids=port_ids2, zone='nova')) vm_test_fixture = self.create_vm(vn1_fixture, vm_test_name, - image_name='ubuntu-traffic') - self.config_aap(lvn_port_obj1, lvn_port_obj2, vIP, vsrx=True) + image_name='cirros') + + self.logger.info('Create a FVN. Create a FIP-Pool and FIP') + fvn_name = get_random_name('fvn') + fvn_subnets = [get_random_cidr()] + fvn_vm_name = get_random_name('fvn-vm') + fvn_fixture = self.create_vn(fvn_name, fvn_subnets) + fvn_vm_fixture = self.create_vm(fvn_fixture, fvn_vm_name, + image_name='cirros') + assert fvn_vm_fixture.wait_till_vm_is_up( + ), 'VM does not seem to be up' + fip_pool_name = 'some-pool1' + my_fip_name = 'fip' + fvn_obj = fvn_fixture.obj + fvn_id = fvn_fixture.vn_id + fip_fixture = self.useFixture( + FloatingIPFixture( + project_name=self.inputs.project_name, inputs=self.inputs, + connections=self.connections, pool_name=fip_pool_name, vn_id=fvn_fixture.vn_id)) + assert fip_fixture.verify_on_setup() + fIP = self.create_fip(fip_fixture) + self.addCleanup(self.del_fip, fIP[1]) + self.logger.info('Use VNC API to associate the same fIP to two ports') + self.logger.info( + 'Add the vIP- %s as Fixed IP of the fIP- %s' % (vIP, fIP[0])) + vm1_l_vmi_id = vm1_fixture.get_vmi_ids()[vn1_fixture.vn_fq_name] + vm2_l_vmi_id = vm2_fixture.get_vmi_ids()[vn1_fixture.vn_fq_name] + self.assoc_fip(fIP[1], vm1_fixture.vm_id, vmi_id=vm1_l_vmi_id) + self.assoc_fip(fIP[1], vm2_fixture.vm_id, vmi_id=vm2_l_vmi_id) + self.assoc_fixed_ip_to_fip(fIP[1], vIP) + self.addCleanup(self.disassoc_fip, fIP[1]) + port_list = [lvn_port_obj1, lvn_port_obj2] + for port in port_list: + self.config_aap(port, vIP, mac='00:00:5e:00:01:01') vm1_fixture.wait_till_vm_is_up() vm2_fixture.wait_till_vm_is_up() self.logger.info('We will configure VRRP on the two vSRX') @@ -765,6 +858,8 @@ def test_aap_with_vMAC(self): assert self.vrrp_mas_chk(vm1_fixture, vn1_fixture, vIP, vsrx=True) assert self.verify_vrrp_action( vm_test_fixture, vm1_fixture, vIP, vsrx=True) + assert self.verify_vrrp_action( + fvn_vm_fixture, vm1_fixture, fIP[0], vsrx=True) self.logger.info( 'Will reduce the VRRP priority on %s, causing a VRRP mastership switch' % vm1_fixture.vm_name) @@ -776,8 +871,10 @@ def test_aap_with_vMAC(self): assert self.vrrp_mas_chk(vm2_fixture, vn1_fixture, vIP, vsrx=True) assert self.verify_vrrp_action( vm_test_fixture, vm2_fixture, vIP, vsrx=True) + assert self.verify_vrrp_action( + fvn_vm_fixture, vm2_fixture, fIP[0], vsrx=True) - # end test_aap_with_vMAC + # end test_aap_with_fip @preposttest_wrapper def test_aap_with_vrrp_priority_change(self): @@ -811,8 +908,9 @@ def test_aap_with_vrrp_priority_change(self): assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' assert vm_test_fixture.wait_till_vm_is_up( ), 'VM does not seem to be up' - - self.config_aap(port1_obj, port2_obj, vIP) + port_list = [port1_obj, port2_obj] + for port in port_list: + self.config_aap(port, vIP, mac=port['mac_address']) self.config_vrrp(vm1_fixture, vIP, '20') self.config_vrrp(vm2_fixture, vIP, '10') assert self.vrrp_mas_chk(vm1_fixture, vn1_fixture, vIP) @@ -835,3 +933,294 @@ def test_aap_with_vrrp_priority_change(self): assert self.verify_vrrp_action(vm_test_fixture, vm2_fixture, vIP) # end test_aap_with_vrrp_priority_change + + @preposttest_wrapper + def test_zombie_tap_interface(self): + '''Test Zombie Tap-interface + create vn,vm and port + delete the port + check whether still tap-interface present or not''' + result = True + vn1_name = get_random_name('vn1') + vm1_name = get_random_name('vm1') + vn1_fixture = self.create_vn(vn1_name) + port1_obj = self.create_port(net_id=vn1_fixture.vn_id) + vm1_fixture = self.create_vm(vn1_fixture, vm1_name, + image_name='ubuntu-traffic', + port_ids=[port1_obj['id']]) + assert vm1_fixture.wait_till_vm_is_up() + self.logger.info('get tap_interface of vm %s' %vm1_fixture.vm_name) + vm_tap_intf=vm1_fixture.get_tap_intf_of_vm() + assert vm_tap_intf,'Tap interface not present for %s' %vm1_fixture.vm_name + self.delete_port(port1_obj['id']) + sleep(10) + vm_tap_intf = vm1_fixture.get_tap_intf_of_vm() + assert not( + vm_tap_intf), 'Tap interface still present for vm %s' % vm1_fixture.vm_name + self.logger.info( + "VM's tap interface got cleaned up on port delete. Test passed") + + # end test_zombie_tap_interface + + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_aap_active_active_mode(self): + ''' + Verify AAP in active-active mode + 1. Launch 2 vms on same virtual network. + 2. Configure AAP between the two ports in active-active mode. + 3. Launch a test VM in the same network. + 4. Create a alias on both the VMs for the vIP. + 5. cURL request to the vIP should be answered by either of the two VMs. + + Maintainer: ganeshahv@juniper.net + ''' + + vn1_name = get_random_name('vn1') + vn1_subnets = [get_random_cidr()] + vm1_name = get_random_name('vm1') + vm2_name = get_random_name('vm2') + vm_test_name = get_random_name('vm_test') + result = False + vn1_fixture = self.create_vn(vn1_name, vn1_subnets) + vIP = get_an_ip(vn1_subnets[0], offset=10) + port1_obj = self.create_port(net_id=vn1_fixture.vn_id) + port2_obj = self.create_port(net_id=vn1_fixture.vn_id) + port_list = [port1_obj, port2_obj] + vm1_fixture = self.create_vm(vn1_fixture, vm1_name, + image_name='ubuntu-traffic', + port_ids=[port1_obj['id']]) + vm2_fixture = self.create_vm(vn1_fixture, vm2_name, + image_name='ubuntu-traffic', + port_ids=[port2_obj['id']]) + vm_test_fixture = self.create_vm(vn1_fixture, vm_test_name, + image_name='ubuntu') + assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + assert vm_test_fixture.wait_till_vm_is_up( + ), 'VM does not seem to be up' + vm_list = [vm1_fixture, vm2_fixture] + for port in port_list: + self.config_aap( + port, vIP, mac=port['mac_address'], aap_mode='active-active', contrail_api=True) + self.logger.info( + 'Since no VRRP is run, both the ports should be seen as active') + for vm in vm_list: + vm.start_webserver() + output = vm.run_cmd_on_vm( + ['sudo ifconfig eth0:10 ' + vIP + ' netmask 255.255.255.0']) + self.check_master_in_agent(vm, vn1_fixture, vIP, ecmp=True) + self.logger.info('Curl requests to %s should be answered by either %s or %s' % ( + vIP, vm1_fixture.vm_name, vm2_fixture.vm_name)) + cmd = "curl --local-port 9001 -i " + vIP + ":8000" + result = vm_test_fixture.run_cmd_on_vm(cmds=[cmd]) + assert (vm1_fixture.vm_name or vm2_fixture.vm_name) and '200 OK' in result[ + cmd], 'Requests not being answered' + # end test_aap_active_active_mode + + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_aap_with_zero_mac(self): + ''' + Verify VIP reachability over L2 network when AAP MAC is configured with all zeo + 1. Launch 2 vms on same virtual network. + 2. Configure high availability between them with keepalived. + 3. Launch third VM in same VM. + 4. Check the reachability of VIP from 3rd VM. + 5. Shutdown keepalive in master VM to induce VIP switch over. + 6. Check the reachability of VIP from 3rd VM again. + 7. Bring back master VM which will cause switchover of VIP again. + 8. Check the reachability of VIP from 3rd VM again. + + Pass criteria: Step 4,6 and 8 should pass + Maintainer: chhandak@juniper.net + ''' + + vn1_name = get_random_name('vn1') + vn1_subnets = [get_random_cidr()] + vm1_name = get_random_name('vm1') + vm2_name = get_random_name('vm2') + vm_test_name = get_random_name('vm_test') + vID = '51' + result = False + + vn1_fixture = self.create_vn(vn1_name, vn1_subnets) + vIP = get_an_ip(vn1_subnets[0], offset=10) + port1_obj = self.create_port(net_id=vn1_fixture.vn_id) + port2_obj = self.create_port(net_id=vn1_fixture.vn_id) + port_list = [port1_obj, port2_obj] + vm1_fixture = self.create_vm(vn1_fixture, vm1_name, + image_name='ubuntu-keepalive', + port_ids=[port1_obj['id']]) + vm2_fixture = self.create_vm(vn1_fixture, vm2_name, + image_name='ubuntu-keepalive', + port_ids=[port2_obj['id']]) + vm_test_fixture = self.create_vm(vn1_fixture, vm_test_name, + image_name='ubuntu') + assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + assert vm_test_fixture.wait_till_vm_is_up( + ), 'VM does not seem to be up' + for port in port_list: + self.config_aap( + port, vIP, mac='00:00:00:00:00:00', contrail_api=True) + self.config_keepalive(vm1_fixture, vIP, vID, '10') + self.config_keepalive(vm2_fixture, vIP, vID, '20') + + self.logger.info('Ping to the Virtual IP from the test VM (Same Network)') + assert vm_test_fixture.ping_with_certainty(vIP), ''\ + 'Ping to the Virtual IP %s from the test VM %s, failed' % (vIP, + vm_test_fixture.vm_ip) + + self.logger.info('Forcing VIP Switch by stopping keepalive on master') + self.service_keepalived(vm2_fixture, 'stop') + + self.logger.info('Ping to the Virtual IP after switch over \ + from the test VM (Same Network)') + assert vm_test_fixture.ping_with_certainty(vIP), ''\ + 'Ping to the Virtual IP %s from the test VM %s, failed' % (vIP, + vm_test_fixture.vm_ip) + + self.logger.info('Bringing keepalive master back') + self.service_keepalived(vm2_fixture, 'start') + + self.logger.info('Ping to the Virtual IP after switch over \ + from the test VM (Same Network)') + assert vm_test_fixture.ping_with_certainty(vIP), ''\ + 'Ping to the Virtual IP %s from the test VM %s, failed' % (vIP, + vm_test_fixture.vm_ip) + # end test_aap_with_zero_mac + + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_ports_bindings(self): + ''' + Verify that we are able to create a port with custom port bindings + Steps: + 1) Create Port with Port bindings profile set to {'foo': 'bar'} + 2) Retrieve and verify the same is set + ''' + bind_dict = {'foo': 'bar'} + vn = self.create_vn() + port = self.useFixture(PortFixture(vn.uuid, connections=self.connections, + binding_profile=bind_dict)) + assert port.verify_on_setup(), 'VMI %s verification has failed'%port.uuid + # end test_ports_bindings + + # Unused test, retained for reference + def aap_backoff(self): + ''' + Verify VIP reachability over L2 network when AAP MAC is configured with all zeo + 1. Launch 2 vms on same virtual network. + 2. Configure high availability between them with keepalived. + 3. Launch third VM in same VM. + 4. Check the reachability of VIP from 3rd VM. + 5. Break VRRP link, VRRP state toggles + 6. vRouter query to VM should start to incrementally backoff + 7. Check Max backoff interval is 32 secs + 8. Stop VRRP toggle, VRRP state should stable & ping to VIP passes + 9. Break VRRP link again + 10. Check that backoff interval is reset + + Pass criteria: Step 4,6,7,8 and 10 should pass + Maintainer: chhandak@juniper.net + ''' + + vn1_name = get_random_name('vn1') + vn1_subnets = [get_random_cidr()] + vm1_name = get_random_name('vm1') + vm2_name = get_random_name('vm2') + vm_test_name = get_random_name('vm_test') + vID = '51' + result = False + + vn1_fixture = self.create_vn(vn1_name, vn1_subnets) + vIP = get_an_ip(vn1_subnets[0], offset=10) + port1_obj = self.create_port(net_id=vn1_fixture.vn_id) + port2_obj = self.create_port(net_id=vn1_fixture.vn_id) + port_list = [port1_obj, port2_obj] + vm1_fixture = self.create_vm(vn1_fixture, vm1_name, + image_name='ubuntu-keepalive', + flavor='contrail_flavor_large', + port_ids=[port1_obj['id']]) + vm2_fixture = self.create_vm(vn1_fixture, vm2_name, + image_name='ubuntu-keepalive', + flavor='contrail_flavor_large', + port_ids=[port2_obj['id']]) + vm_test_fixture = self.create_vm(vn1_fixture, vm_test_name, + image_name='ubuntu') + assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + assert vm_test_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + for port in port_list: + self.config_aap(port, vIP, mac=port['mac_address']) + self.config_keepalive(vm1_fixture, vIP, vID, '10') + self.config_keepalive(vm2_fixture, vIP, vID, '20') + + self.logger.info('Ping to the Virtual IP from the test VM (Same Network)') + assert vm_test_fixture.ping_with_certainty(vIP), ''\ + 'Ping to the Virtual IP %s from the test VM %s, failed' % (vIP, + vm_test_fixture.vm_ip) + + self.logger.info('Break VRRP link, by setting mismatching server Id') + self.service_keepalived(vm2_fixture, 'stop') + filter_cmd = 'arp host %s and ether src 00:00:5e:00:01:00' % vIP + session, pcap = tcpdump_utils.start_tcpdump_for_vm_intf(self.connections, vm2_fixture, + vn1_fixture.vn_fq_name, filter_cmd) + self.config_keepalive(vm2_fixture, vIP, '50', '20') + time.sleep(120) + tcpdump_utils.stop_tcpdump_for_vm_intf(self.connections, session, pcap) + self.verify_arp_backoff(vIP, tcpdump_utils.read_tcpdump(self.connections, session, pcap)) + tcpdump_utils.delete_pcap(session, pcap) + + self.logger.info('Restore VRRP link') + self.service_keepalived(vm2_fixture, 'stop') + self.config_keepalive(vm2_fixture, vIP, vID, '20') + time.sleep(30) + + self.logger.info('Break VRRP link, by setting mismatching server Id') + self.service_keepalived(vm2_fixture, 'stop') + session, pcap = tcpdump_utils.start_tcpdump_for_vm_intf(self.connections, vm2_fixture, + vn1_fixture.vn_fq_name, filter_cmd) + self.config_keepalive(vm2_fixture, vIP, '50', '20') + time.sleep(120) + tcpdump_utils.stop_tcpdump_for_vm_intf(self.connections, session, pcap) + self.verify_arp_backoff(vIP, tcpdump_utils.read_tcpdump(self.connections, session, pcap)) + tcpdump_utils.delete_pcap(session, pcap) + + self.logger.info('Restore VRRP link') + self.service_keepalived(vm2_fixture, 'stop') + self.config_keepalive(vm2_fixture, vIP, vID, '20') + time.sleep(2) + self.logger.info('Ping to the Virtual IP after stablizing VRRP link') + assert vm_test_fixture.ping_with_certainty(vIP), \ + 'Ping to the Virtual IP %s from the test VM %s, failed' % (vIP, + vm_test_fixture.vm_ip) + + def verify_arp_backoff (self, vIP, msgs): + msgs = msgs.split('\n') + prev_ts = None + cur_ts = None + expected = [8, 16, 32] + cur_idx = 0 + ret = True + start_backoff = False + for i in range(len(msgs)): + if not msgs[i]: + continue + x = time.strptime(msgs[i][0:8], '%H:%M:%S') + cur_ts = datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds() + if prev_ts: + intval = cur_ts - prev_ts + if intval < 1.0 and start_backoff == False: + start_backoff = True + if intval > 1.0 and start_backoff == True: + diff = intval - expected[cur_idx] + if diff >= 0 and diff <= 2.0: + cur_idx = min(cur_idx + 1, len(expected) - 1) + else: + ret = False + self.logger.warn('expected backoff %d, got %d\n%s\n%s' % (expected[cur_idx], + intval, msgs[i-1], msgs[i])) + prev_ts = cur_ts + assert ret, "AAP ARP backoff not as expected %s" % expected diff --git a/scripts/neutron/test_quota.py b/scripts/neutron/test_quota.py index b07d64382..d756faa48 100644 --- a/scripts/neutron/test_quota.py +++ b/scripts/neutron/test_quota.py @@ -32,11 +32,19 @@ def test_default_quota_for_admin_tenant(self): "Defalult quota set for admin tenant is : \n %s" % (quota_dict)) for neutron_obj in quota_dict['quota']: - if quota_dict['quota'][neutron_obj] != -1: - self.logger.error( - "Default Quota limit not followed for %s and is set to %s " % - (neutron_obj, quota_dict['quota'][neutron_obj])) - result = False + if neutron_obj != 'rbac_policy' and neutron_obj != 'loadbalancer': + if quota_dict['quota'][neutron_obj] != -1: + self.logger.error( + "Default Quota limit not followed for %s and is set to %s " % + (neutron_obj, quota_dict['quota'][neutron_obj])) + result = False + else: + if quota_dict['quota'][neutron_obj] != 10: + self.logger.error( + "Default Quota limit not followed for %s and is set to %s " % + (neutron_obj, quota_dict['quota'][neutron_obj])) + result = False + assert result, 'Default quota for admin tenant is not set' @preposttest_wrapper @@ -48,11 +56,18 @@ def test_default_quota_for_new_tenant(self): "Defalult quota set for tenant %s is : \n %s" % (self.inputs.project_name, quota_dict)) for neutron_obj in quota_dict['quota']: - if quota_dict['quota'][neutron_obj] != -1: - self.logger.error( - "Default Quota limit not followed for %s and is set to %s " % - (neutron_obj, quota_dict['quota'][neutron_obj])) - result = False + if neutron_obj != 'rbac_policy' and neutron_obj != 'loadbalancer': + if quota_dict['quota'][neutron_obj] != -1: + self.logger.error( + "Default Quota limit not followed for %s and is set to %s " % + (neutron_obj, quota_dict['quota'][neutron_obj])) + result = False + else: + if quota_dict['quota'][neutron_obj] != 10: + self.logger.error( + "Default Quota limit not followed for %s and is set to %s " % + (neutron_obj, quota_dict['quota'][neutron_obj])) + result = False assert result, 'Default quota for custom tenant is not set' @preposttest_wrapper @@ -89,6 +104,7 @@ def test_update_quota_for_new_tenant(self): result = False assert result, 'Failed to update quota for admin tenant' + @preposttest_wrapper def test_quota_update_of_new_project_by_admin(self): '''Launch two custom tenants, quota update by admin tenant should be successful @@ -106,27 +122,27 @@ def test_quota_update_of_new_project_by_admin(self): project_name = get_random_name('project1') isolated_creds = IsolatedCreds( - project_name, self.admin_inputs, + project_name, ini_file=self.ini_file, logger=self.logger) - isolated_creds.setUp() - project_obj = isolated_creds.create_tenant() - isolated_creds.create_and_attach_user_to_tenant() - proj_inputs = isolated_creds.get_inputs() - proj_connection = isolated_creds.get_conections() + project_obj = self.admin_isolated_creds.create_tenant(isolated_creds.project_name) + self.admin_isolated_creds.create_and_attach_user_to_tenant(project_obj, + isolated_creds.username,isolated_creds.password) + proj_inputs = isolated_creds.get_inputs(project_obj) + proj_connection = project_obj.get_project_connections() project_name1 = get_random_name('project2') isolated_creds1 = IsolatedCreds( - project_name1, self.admin_inputs, + project_name1, ini_file=self.ini_file, logger=self.logger) - isolated_creds1.setUp() - project_obj1 = isolated_creds1.create_tenant() - isolated_creds1.create_and_attach_user_to_tenant() - proj_inputs1 = isolated_creds1.get_inputs() - proj_connection1 = isolated_creds1.get_conections() + project_obj1 = self.admin_isolated_creds.create_tenant(isolated_creds1.project_name) + self.admin_isolated_creds.create_and_attach_user_to_tenant(project_obj1, + isolated_creds1.username,isolated_creds1.password) + proj_inputs1 = isolated_creds1.get_inputs(project_obj1) + proj_connection1 = project_obj1.get_project_connections() self.logger.info( "Update quota for tenant %s to: \n %s by admin tenat " % @@ -162,6 +178,7 @@ def test_quota_update_of_new_project_by_admin(self): "Quota for tenant %s still set to : \n %s as expected " % (proj_inputs1.project_name, quota_show_dict)) + @preposttest_wrapper def test_quota_update_of_specific_tenant(self): '''Quota update of one tenant should not affect @@ -185,7 +202,6 @@ def test_quota_update_of_specific_tenant(self): username=self.inputs.stack_user, password=self.inputs.stack_password, project_name=project_name, - vnc_lib_h=self.vnc_lib, connections=self.connections)) user_fixture.add_user_to_tenant(project_name, 'test_usr', 'Member') assert project_fixture_obj.verify_on_setup() @@ -198,7 +214,6 @@ def test_quota_update_of_specific_tenant(self): username='test1', password='test1231', project_name=project_name1, - vnc_lib_h=self.vnc_lib, connections=self.connections)) user_fixture1.add_user_to_tenant(project_name1, 'test_usr1', 'Member') assert project_fixture_obj1.verify_on_setup() diff --git a/scripts/neutron/test_routers.py b/scripts/neutron/test_routers.py index 85868e0b8..bec78af0c 100644 --- a/scripts/neutron/test_routers.py +++ b/scripts/neutron/test_routers.py @@ -56,9 +56,9 @@ def test_basic_router_behavior(self): vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vn2_fixture = self.create_vn(vn2_name, vn2_subnets) vn1_vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') vn2_vm1_fixture = self.create_vm(vn2_fixture, vn2_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vn1_vm1_fixture.wait_till_vm_is_up() assert vn2_vm1_fixture.wait_till_vm_is_up() assert vn1_vm1_fixture.ping_with_certainty(vn2_vm1_fixture.vm_ip, @@ -111,9 +111,9 @@ def test_router_admin_state_up(self): vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vn2_fixture = self.create_vn(vn2_name, vn2_subnets) vn1_vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') vn2_vm1_fixture = self.create_vm(vn2_fixture, vn2_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vn1_vm1_fixture.wait_till_vm_is_up() assert vn2_vm1_fixture.wait_till_vm_is_up() assert vn1_vm1_fixture.ping_with_certainty(vn2_vm1_fixture.vm_ip, @@ -159,9 +159,9 @@ def test_router_with_existing_ports(self): vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vn2_fixture = self.create_vn(vn2_name, vn2_subnets) vn1_vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') vn2_vm1_fixture = self.create_vm(vn2_fixture, vn2_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vn1_vm1_fixture.wait_till_vm_is_up() assert vn2_vm1_fixture.wait_till_vm_is_up() assert vn1_vm1_fixture.ping_with_certainty(vn2_vm1_fixture.vm_ip, @@ -234,42 +234,15 @@ def test_router_with_alloc_pool_and_gateway(self): 'Gateway IP(%s) is not the same as Router intf IP(%s)' % ( vn2_gateway_ip, router_port_ip) -class TestRouterSNAT(BaseNeutronTest): - - @classmethod - def setUpClass(cls): - super(TestRouterSNAT, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - super(TestRouterSNAT, cls).tearDownClass() - - - def is_test_applicable(self): - if os.environ.get('MX_GW_TEST') != '1': - return (False, 'Skipping Test. Env variable MX_GW_TEST is not set') - return (True, None) - @test.attr(type=['ci_sanity']) + #@test.attr(type=['sanity', 'suite1']) @preposttest_wrapper - def test_basic_snat_behavior_without_external_connectivity(self): - '''Create an external network, a router - set router-gateway to external network - launch a private network and attach it to router - validate left vm pinging right vm through Snat - ''' + def test_bug_id_1599672(self): + '''Create a VN with router_external: True + Delete Subnet - vm1_name = get_random_name('vm_left') - vn1_name = get_random_name('vn_private') - vn1_subnets = [get_random_cidr()] - self.allow_default_sg_to_allow_all_on_project(self.inputs.project_name) - vn1_fixture = self.create_vn(vn1_name, vn1_subnets) - vn1_fixture.verify_on_setup() - vm1_fixture = self.create_vm(vn1_fixture, vm1_name, - image_name='ubuntu') - vm1_fixture.wait_till_vm_is_up() - - ext_vn_name = get_random_name('ext_vn') + ''' + ext_vn_name = "BugVn" ext_subnets = [get_random_cidr()] ext_vn_fixture = self.useFixture( @@ -282,20 +255,29 @@ def test_basic_snat_behavior_without_external_connectivity(self): router_external=True)) ext_vn_fixture.verify_on_setup() + self.quantum_h.create_floatingip(fip_pool_vn_id=ext_vn_fixture.vn_id) + sn_info=ext_vn_fixture.get_subnets() + sn_id= sn_info[0]['id'] + assert self.quantum_h.delete_sn(sn_id), 'Delete SN failed, Refer bug 1599672' + assert self.quantum_h.delete_vn(ext_vn_fixture.vn_id), 'Delete VN failed, Refer bug 1599672' + return True - vm2_name = get_random_name('vm_right') - vm2_fixture = self.create_vm(ext_vn_fixture, vm2_name, - image_name='ubuntu') - vm2_fixture.wait_till_vm_is_up() - router_name = get_random_name('router1') - router_dict = self.create_router(router_name) - router_rsp = self.quantum_h.router_gateway_set( - router_dict['id'], - ext_vn_fixture.vn_id) - self.add_vn_to_router(router_dict['id'], vn1_fixture) - assert vm1_fixture.ping_with_certainty( - vm2_fixture.vm_ip), 'Ping from vm_left to vm_right through snat failed' +class TestRouterSNAT(BaseNeutronTest): + + @classmethod + def setUpClass(cls): + super(TestRouterSNAT, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestRouterSNAT, cls).tearDownClass() + + + def is_test_applicable(self): + if os.environ.get('MX_GW_TEST') != '1': + return (False, 'Skipping Test. Env variable MX_GW_TEST is not set') + return (True, None) @test.attr(type=['sanity']) @preposttest_wrapper @@ -359,7 +341,6 @@ def test_basic_snat_behavior_with_diff_projects(self): username=self.inputs.stack_user, password=self.inputs.stack_password, project_name=project_name, - vnc_lib_h=self.vnc_lib, connections=self.connections)) user_fixture.add_user_to_tenant(project_name, 'test_usr', 'admin') assert project_fixture_obj.verify_on_setup() @@ -372,7 +353,6 @@ def test_basic_snat_behavior_with_diff_projects(self): username=self.inputs.stack_user, password=self.inputs.stack_password, project_name=project_name1, - vnc_lib_h=self.vnc_lib, connections=self.connections)) user_fixture1.add_user_to_tenant(project_name1, 'test_usr1', 'admin') assert project_fixture_obj1.verify_on_setup() @@ -448,7 +428,6 @@ def test_basic_snat_behavior_with_fip_and_diff_projects(self): username=self.inputs.stack_user, password=self.inputs.stack_password, project_name=project_name, - vnc_lib_h=self.vnc_lib, connections=self.connections)) user_fixture.add_user_to_tenant(project_name, 'test_usr', 'admin') assert project_fixture_obj.verify_on_setup() @@ -461,7 +440,6 @@ def test_basic_snat_behavior_with_fip_and_diff_projects(self): username=self.inputs.stack_user, password=self.inputs.stack_password, project_name=project_name1, - vnc_lib_h=self.vnc_lib, connections=self.connections)) user_fixture1.add_user_to_tenant(project_name1, 'test_usr1', 'admin') assert project_fixture_obj1.verify_on_setup() diff --git a/scripts/neutron/test_subnets.py b/scripts/neutron/test_subnets.py index d2709366d..c26e08e9d 100644 --- a/scripts/neutron/test_subnets.py +++ b/scripts/neutron/test_subnets.py @@ -43,8 +43,7 @@ def test_subnet_host_routes(self): vn1_gateway = get_an_ip(vn1_subnets[0], 1) dest_ip = '8.8.8.8' destination = dest_ip + '/32' - # nh IP does not matter, it will always be the default gw - nh = '30.1.1.10' + nh = get_an_ip(vn1_subnets[0], 10) vn1_subnets = [{'cidr': vn1_subnets[0], 'host_routes': [{'destination': destination, 'nexthop': nh}, @@ -140,7 +139,7 @@ def test_gateway(self): vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vm1_fixture.wait_till_vm_is_up() output = vm1_fixture.run_cmd_on_vm(['route -n']) route_output = output.values()[0] @@ -214,7 +213,7 @@ def test_allocation_pools(self): vn1_vm1_name = get_random_name('vn1-vm1') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vm1_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm1_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 3),\ 'IP of VM %s should have been %s. It is %s' % ( @@ -222,7 +221,7 @@ def test_allocation_pools(self): vm1_fixture.vm_ip) vm2_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vm2_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm2_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 4),\ 'IP of VM %s should have been %s. It is %s' % ( @@ -230,7 +229,7 @@ def test_allocation_pools(self): vm2_fixture.vm_ip) vm3_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vm3_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' assert vm3_fixture.vm_ip == get_an_ip(vn1_subnet_cidr, 6),\ 'IP of VM %s should have been %s. It is %s' % ( @@ -238,7 +237,7 @@ def test_allocation_pools(self): vm3_fixture.vm_ip) vm4_fixture = self.create_vm(vn1_fixture, get_random_name('vn1-vm1'), - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vm4_fixture.wait_till_vm_status('ERROR'), 'VM %s should '\ 'have failed since allocation pool is full' % (vm4_fixture.vm_name) # end test_allocation_pools @@ -260,16 +259,19 @@ def test_enable_dhcp(self): vn1_fixture = self.create_vn(vn1_name, vn1_subnets) assert vn1_fixture.vn_subnet_objs[0]['enable_dhcp'],\ 'DHCP is not enabled by default in the Subnet!' - + vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, + image_name='cirros') + assert vm1_fixture.wait_till_vm_up(),\ + 'VM not able to boot' + # Update subnet to disable dhcp vn1_subnet_dict = {'enable_dhcp': False} vn1_fixture.update_subnet(vn1_fixture.vn_subnet_objs[0]['id'], vn1_subnet_dict) - vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') - assert vm1_fixture.wait_till_vm_up(),\ - 'Unable to detect if VM booted up using console log' - + vm1_fixture.reboot() + time.sleep(5) + assert vm1_fixture.wait_till_vm_is_active(), 'VM is not up on reboot!' + time.sleep(30) console_log = vm1_fixture.get_console_output() assert 'No lease, failing' in console_log,\ 'Failure while determining if VM got a DHCP IP. Log : %s' % ( @@ -283,13 +285,11 @@ def test_enable_dhcp(self): vm1_fixture.reboot() time.sleep(5) assert vm1_fixture.wait_till_vm_is_up(), 'VM is not up on reboot!' - result_output = vm1_fixture.run_cmd_on_vm(['ifconfig -a']) output = result_output.values()[0] assert vm1_fixture.vm_ip in output,\ 'VM did not get an IP %s after enabling DHCP' % (vm1_fixture.vm_ip) self.logger.info('VM got DHCP IP after subnet-dhcp is enabled..OK') - # end test_enable_dhcp @preposttest_wrapper @@ -312,15 +312,15 @@ def test_ip_allocation_order(self): self.logger.info('Create first VM in the VN') vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vm1_fixture.wait_till_vm_status('ACTIVE'),\ 'VM %s is not active' % (vm1_fixture.vm_name) # Create a second VM in second subnet port_obj = self.create_port(net_id=vn1_fixture.vn_id, - subnet_id=vn1_fixture.vn_subnet_objs[1]['id']) + fixed_ips=[{'subnet_id': vn1_fixture.vn_subnet_objs[1]['id']}]) vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec', + image_name='cirros', port_ids=[port_obj['id']]) assert vm2_fixture.wait_till_vm_status('ACTIVE'),\ 'VM %s is not active' % (vm2_fixture.vm_name) @@ -328,7 +328,7 @@ def test_ip_allocation_order(self): # Create third VM and check if it gets IP from first subnet self.logger.info('Create a third VM in the VN') vm3_fixture = self.create_vm(vn1_fixture, vn1_vm3_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vm3_fixture.wait_till_vm_is_up(),\ 'VM %s is not fully up' % (vm3_fixture.vm_name) assert IPAddress(vm3_fixture.vm_ip) in IPNetwork(vn1_subnet_list[0]),\ diff --git a/scripts/neutron/test_virtual_network.py b/scripts/neutron/test_virtual_network.py index 41a294ee0..337f6e752 100644 --- a/scripts/neutron/test_virtual_network.py +++ b/scripts/neutron/test_virtual_network.py @@ -59,9 +59,9 @@ def test_virtual_network_admin_state_up(self): vn1_vm2_name = get_random_name('vn1-vm2') vn1_fixture = self.create_vn(vn1_name, vn1_subnets) vn1_vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') vn1_vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name, - image_name='cirros-0.3.0-x86_64-uec') + image_name='cirros') assert vn1_vm1_fixture.wait_till_vm_is_up() assert vn1_vm2_fixture.wait_till_vm_is_up() assert vn1_vm1_fixture.ping_with_certainty(vn1_vm2_fixture.vm_ip) diff --git a/scripts/policy/base.py b/scripts/policy/base.py deleted file mode 100644 index 06eff9d66..000000000 --- a/scripts/policy/base.py +++ /dev/null @@ -1,37 +0,0 @@ -import test -from common import isolated_creds - - -class BasePolicyTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BasePolicyTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.orch = cls.connections.orch - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.api_s_inspect = cls.connections.api_server_inspect - cls.analytics_obj=cls.connections.analytics_obj - # end setUpClass - - @classmethod - def tearDownClass(cls): - cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() - super(BasePolicyTest, cls).tearDownClass() - # end tearDownClass -#end BasePolicyTest class - diff --git a/scripts/policy/sdn_basic_topology.py b/scripts/policy/sdn_basic_topology.py deleted file mode 100644 index 5c6307143..000000000 --- a/scripts/policy/sdn_basic_topology.py +++ /dev/null @@ -1,94 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - - -class sdn_basic_config (): - - def __init__(self, domain='default-domain', project='admin', username=None, password=None): - # - # Domain and project defaults: Do not change until support for - # non-default is tested! - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet0', 'vnet1', 'vnet2', 'vnet3'] - # - # Define network info for each VN: - if self.project == 'vCenter': - # For vcenter, only one subnet per VN is supported - self.vn_nets = { - 'vnet0': ['10.1.1.0/24'], - 'vnet1': ['12.1.1.0/24'], - 'vnet2': ['14.1.1.0/24'], - 'vnet3': ['16.1.1.0/24'] - } - else: - self.vn_nets = { - 'vnet0': ['10.1.1.0/24', '11.1.1.0/24'], - 'vnet1': ['12.1.1.0/24', '13.1.1.0/24'], - 'vnet2': ['14.1.1.0/24', '15.1.1.0/24'], - 'vnet3': ['16.1.1.0/24', '17.1.1.0/24'] - } - # - # Define network policies - self.policy_list = ['policy0', 'policy1', 'policy2', - 'policy3', 'policy4', 'policy5', 'policy6', 'policy7'] - self.vn_policy = {'vnet0': ['policy0', 'policy1'], 'vnet1': [ - 'policy2', 'policy3'], 'vnet2': ['policy4', 'policy5'], 'vnet3': ['policy6', 'policy7']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc0': 'vnet0', 'vmc1': 'vnet2'} - # - # Define network policy rules - self.rules = {} - - self.rules['policy0'] = [{'direction': '>', 'protocol': 'any', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy1'] = [{'direction': '>', 'protocol': 'any', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy2'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet1', 'source_network': 'vnet1', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet1', 'source_network': 'vnet1', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet1', 'source_network': 'vnet1', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet1', 'source_network': 'vnet1', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy3'] = [{'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet1', 'source_network': 'vnet1', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet1', 'source_network': 'vnet1', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet1', 'source_network': 'vnet1', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet1', 'source_network': 'vnet1', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy4'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet2', 'source_network': 'vnet2', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet2', 'source_network': 'vnet2', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet2', 'source_network': 'vnet2', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet2', 'source_network': 'vnet2', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy5'] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet2', 'source_network': 'vnet2', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet2', 'source_network': 'vnet2', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet2', 'source_network': 'vnet2', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet2', 'source_network': 'vnet2', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy6'] = [{'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet3', 'source_network': 'vnet3', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet3', 'source_network': 'vnet3', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet3', 'source_network': 'vnet3', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet3', 'source_network': 'vnet3', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy7'] = [{'direction': '>', 'protocol': 'any', 'dest_network': 'vnet3', 'source_network': 'vnet3', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet3', 'source_network': 'vnet3', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet3', 'source_network': 'vnet3', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'any', 'dest_network': 'vnet3', 'source_network': 'vnet3', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - # end __init__ - -if __name__ == '__main__': - print "Currently topology limited to one domain/project.." - print "Based on need, can be extended to cover config for multiple domain/projects" - print "Running unit test for this module ..." - my_topo = sdn_basic_config(domain='default-domain', project='admin') - x = my_topo.__dict__ - print "\nprinting keys of topology dict:" - for key, value in x.iteritems(): - print key - print - # print "keys & values:" - # for key, value in x.iteritems(): print key, "-->", value - # Use topology_helper to extend/derive data from user-defined topology to help verifications. - # ex. get list of all vm's from topology; get list of vn's associated to a - # policy - import topo_helper - topo_h = topo_helper.topology_helper(my_topo) - #vmc_list= topo_h.get_vmc_list() - policy_vn = topo_h.get_policy_vn() - print "printing derived topo data - vn's associated to a policy: \n", policy_vn -# diff --git a/scripts/policy/sdn_single_vm_multiple_policy_topology.py b/scripts/policy/sdn_single_vm_multiple_policy_topology.py deleted file mode 100644 index 3f69ea8e3..000000000 --- a/scripts/policy/sdn_single_vm_multiple_policy_topology.py +++ /dev/null @@ -1,87 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - - -class sdn_single_vm_multiple_policy_config (): - - def __init__(self, domain='default-domain', project='admin', username=None, password=None): - # - # Domain and project defaults: Do not change until support for - # non-default is tested! - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet0'] - # - # Define network info for each VN: - if self.project == 'vCenter': - # For vcenter, only one subnet per VN is supported - self.vn_nets = {'vnet0': ['10.1.1.0/24']} - else: - self.vn_nets = {'vnet0': ['10.1.1.0/24', '11.1.1.0/24']} - # - # Define network policies - self.policy_list = ['policy0', 'policy1', 'policy2', 'policy3', - 'policy4', 'policy5', 'policy6', 'policy7', 'policy8', 'policy9'] - self.vn_policy = { - 'vnet0': ['policy0', 'policy1', 'policy2', 'policy3', - 'policy4', 'policy5', 'policy6', 'policy7', 'policy8', 'policy9']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc0': 'vnet0'} - # - # Define network policy rules - self.rules = {} - - self.rules['policy0'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy1'] = [{'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy2'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy3'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy4'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy5'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy6'] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy7'] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy8'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - - self.rules['policy9'] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [ - 1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}] - # end __init__ - -if __name__ == '__main__': - print "Currently topology limited to one domain/project.." - print "Based on need, can be extended to cover config for multiple domain/projects" - print - my_topo = sdn_single_vm_multiple_policy_config( - domain='default-domain', project='admin') - x = my_topo.__dict__ - # print "keys only:" - # for key, value in x.iteritems(): print key - # print - # print "keys & values:" - # for key, value in x.iteritems(): print key, "-->", value - import topo_helper - topo_h = topo_helper.topology_helper(my_topo) - #vmc_list= topo_h.get_vmc_list() - policy_vn = topo_h.get_policy_vn() -# diff --git a/scripts/policy/sdn_single_vm_policy_topology.py b/scripts/policy/sdn_single_vm_policy_topology.py deleted file mode 100644 index e7e0c07d4..000000000 --- a/scripts/policy/sdn_single_vm_policy_topology.py +++ /dev/null @@ -1,68 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - -import sys - - -class sdn_single_vm_policy_config (): - - def __init__(self, domain='default-domain', project='admin', username=None, password=None): - # - # Domain and project defaults: Do not change until support for - # non-default is tested! - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet0'] - # - # Define network info for each VN: - self.vn_nets = {'vnet0': ['10.1.1.0/24']} - # - # Define network policies - self.policy_list = ['policy0', 'policy1', 'policy2'] - self.vn_policy = {'vnet0': ['policy0', 'policy1']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmc0': 'vnet0'} - # - # Define network policy rules - self.rules = {} - - self.rules[ - 'policy0'] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, - {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, - {'direction': '>', 'protocol': 'tcp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}] - - self.rules[ - 'policy1'] = [{'direction': '>', 'protocol': 'icmp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': 'any'}] - - self.rules[ - 'policy2'] = [{'direction': '>', 'protocol': 'udp', 'dest_network': 'vnet0', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [10, 10]}] - # end __init__ - -if __name__ == '__main__': - print "Currently topology limited to one domain/project.." - print "Based on need, can be extended to cover config for multiple domain/projects" - print - my_topo = sdn_single_vm_policy_config( - domain='default-domain', project='admin') - x = my_topo.__dict__ - # print "keys only:" - # for key, value in x.iteritems(): print key - # print - # print "keys & values:" - # for key, value in x.iteritems(): print key, "-->", value - import topo_helper - topo_h = topo_helper.topology_helper(my_topo) - #vmc_list= topo_h.get_vmc_list() - policy_vn = topo_h.get_policy_vn() - vmc_list = topo_h.get_vmc_list() - policy_vn = topo_h.get_policy_vn() - # To unit test topology: - if len(sys.argv) > 1 and sys.argv[1] == 'test': - topo_h.test_module() -# diff --git a/scripts/policy/test_policy.py b/scripts/policy/test_policy.py index 4b104ddc0..a5df3b2da 100644 --- a/scripts/policy/test_policy.py +++ b/scripts/policy/test_policy.py @@ -11,6 +11,9 @@ from tcutils.test_lib.test_utils import assertEqual import sdn_basic_topology import os +import test_policy_basic + +af_test = 'dual' class TestBasicPolicyConfig(BasePolicyTest): @@ -51,99 +54,6 @@ def create_vm( flavor=flavor, node_name=node_name)) - @test.attr(type=['sanity','ci_sanity','quick_sanity', 'vcenter']) - @preposttest_wrapper - def test_policy(self): - """ Configure policies based on topology and run policy related verifications. - """ - result = True - # - # Get config for test from topology - topology_class_name = sdn_basic_topology.sdn_basic_config - self.logger.info( - "Scenario for the test used is: %s" % - (topology_class_name)) - # set project name - try: - # provided by wrapper module if run in parallel test env - topo = topology_class_name( - project=self.project.project_name, - username=self.project.username, - password=self.project.password) - except NameError: - topo = topology_class_name() - # - # Test setup: Configure policy, VN, & VM - # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} - # Returned topo is of following format: - # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture} - setup_obj = self.useFixture( - sdnTopoSetupFixture(self.connections, topo)) - out = setup_obj.topo_setup() - assertEqual(out['result'], True, out['msg']) - if out['result']: - topo, config_topo = out['data'] - # - # Verify [and assert on fail] after setup - # Calling system policy verification, pick any policy fixture to - # access fixture verification - policy_name = topo.policy_list[0] - system_vna_verify_policy( - self, - config_topo['policy'][policy_name], - topo, - 'setup') - return True - # end test_policy - - @test.attr(type=['sanity','ci_sanity','quick_sanity', 'vcenter']) - @preposttest_wrapper - def test_policy_to_deny(self): - ''' Test to validate that with policy having rule to disable icmp within the VN, ping between VMs should fail - 1. Pick 2 VN from resource pool which have one VM in each - 2. Create policy with icmp deny rule - 3. Associate policy to both VN - 4. Ping from one VM to another. Ping should fail - Pass criteria: Step 2,3 and 4 should pass - ''' - vn1_name = get_random_name('vn1') - vn1_subnets = ['192.168.10.0/24'] - policy_name = get_random_name('policy1') - rules = [ - { - 'direction': '<>', 'simple_action': 'deny', - 'protocol': 'icmp', - 'source_network': vn1_name, - 'dest_network': vn1_name, - }, - ] - policy_fixture = self.useFixture( - PolicyFixture( - policy_name=policy_name, rules_list=rules, inputs=self.inputs, - connections=self.connections)) - vn1_fixture = self.create_vn(vn1_name, vn1_subnets) - vn1_fixture.bind_policies( - [policy_fixture.policy_fq_name], vn1_fixture.vn_id) - self.addCleanup(vn1_fixture.unbind_policies, - vn1_fixture.vn_id, [policy_fixture.policy_fq_name]) - assert vn1_fixture.verify_on_setup() - - vn1_vm1_name = get_random_name('vn1_vm1') - vn1_vm2_name = get_random_name('vn1_vm2') - vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name) - vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name) - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() - if vm1_fixture.ping_to_ip(vm2_fixture.vm_ip): - self.logger.error( - 'Ping from %s to %s passed,expected it to fail' % - (vm1_fixture.vm_name, vm2_fixture.vm_name)) - self.logger.info('Doing verifications on the fixtures now..') - assert vm1_fixture.verify_on_setup() - assert vm2_fixture.verify_on_setup() - return True - # end test_policy_to_deny - @preposttest_wrapper def test_policy_with_multi_vn_in_vm(self): ''' Test to validate policy action in VM with vnic's in multiple VN's with different policies. @@ -247,8 +157,16 @@ def test_policy_with_multi_vn_in_vm(self): self.nova_h.wait_till_vm_is_up(vm2_fixture.vm_obj) # For multi-vn vm, configure ip address for 2nd interface multivn_vm_ip_list = vm1_fixture.vm_ips - intf_conf_cmd = "ifconfig eth1 %s netmask 255.255.255.0" % multivn_vm_ip_list[ - 1] + interfaces = vm1_fixture.get_vm_interface_list() + interface1 = vm1_fixture.get_vm_interface_list(ip=multivn_vm_ip_list[0])[0] + interfaces.remove(interface1) + interface2 = interfaces[0] + if 'dual' == self.inputs.get_af(): + intf_conf_cmd = "ifconfig %s inet6 add %s" % (interface2, + multivn_vm_ip_list[3]) + else: + intf_conf_cmd = "ifconfig %s %s netmask 255.255.255.0" % (interface2, + multivn_vm_ip_list[1]) vm_cmds = (intf_conf_cmd, 'ifconfig -a') for cmd in vm_cmds: cmd_to_output = [cmd] @@ -265,25 +183,29 @@ def test_policy_with_multi_vn_in_vm(self): self.logger.info( "Ping from multi-vn vm to vm2, with no allow rule in the VN where default gw is part of, traffic should fail") result = vm1_fixture.ping_with_certainty( - vm2_fixture.vm_ip, - expectation=False) + expectation=False,dst_vm_fixture=vm2_fixture) assertEqual(result, True, "ping passed which is not expected") # Configure VM to reroute traffic to interface belonging to different # VN self.logger.info( "Direct traffic to gw which is part of VN with allow policy to destination VN, traffic should pass now") - i = ' route add -net %s netmask 255.255.255.0 gw %s dev eth1' % ( - vn3_subnets[0].split('/')[0], multivn_vm_ip_list[1]) - cmd_to_output = [i] + cmd_to_output = [] + if 'dual' == self.inputs.get_af(): + cmd = ' route add -net %s netmask 255.255.255.0 gw %s dev %s' % ( + vn3_subnets[0].split('/')[0], multivn_vm_ip_list[2], interface2) + cmd_to_output.append(' ip -6 route add %s dev %s' % (vn3_subnets[1], interface2)) + else: + cmd = ' route add -net %s netmask 255.255.255.0 gw %s dev %s' % ( + vn3_subnets[0].split('/')[0], multivn_vm_ip_list[1], interface2) + cmd_to_output.append(cmd) vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True) - output = vm1_fixture.return_output_cmd_dict[i] + output = vm1_fixture.return_output_cmd_dict[cmd] # Ping test from multi-vn vm to peer vn, result will be based on action # defined in policy attached to VN which has the default gw for VM self.logger.info( "Ping from multi-vn vm to vm2, with allow rule in the VN where network gw is part of, traffic should pass") result = vm1_fixture.ping_with_certainty( - vm2_fixture.vm_ip, - expectation=True) + expectation=True,dst_vm_fixture=vm2_fixture) assertEqual(result, True, "ping failed which is not expected") return True # end test_policy_with_multi_vn_in_vm @@ -391,84 +313,6 @@ def test_policy_protocol_summary(self): # end of class TestBasicPolicyConfig - -class TestBasicPolicyNegative(BasePolicyTest): - - '''Negative tests''' - - _interface = 'json' - - @classmethod - def setUpClass(cls): - super(TestBasicPolicyNegative, cls).setUpClass() - - def runTest(self): - pass - - @test.attr(type=['sanity','ci_sanity', 'vcenter']) - @preposttest_wrapper - def test_remove_policy_with_ref(self): - ''' This tests the following scenarios. - 1. Test to validate that policy removal will fail when it referenced with VN. - 2. validate vn_policy data in api-s against quantum-vn data, when created and unbind policy from VN thru quantum APIs. - 3. validate policy data in api-s against quantum-policy data, when created and deleted thru quantum APIs. - ''' - vn1_name = 'vn4' - vn1_subnets = ['10.1.1.0/24'] - policy_name = 'policy1' - rules = [ - { - 'direction': '<>', 'simple_action': 'pass', - 'protocol': 'icmp', - 'source_network': vn1_name, - 'dest_network': vn1_name, - }, - ] - policy_fixture = self.useFixture( - PolicyFixture( - policy_name=policy_name, - rules_list=rules, - inputs=self.inputs, - connections=self.connections)) - vn1_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_name=vn1_name, - inputs=self.inputs, - subnets=vn1_subnets, - policy_objs=[ - policy_fixture.policy_obj])) - assert vn1_fixture.verify_on_setup() - ret = policy_fixture.verify_on_setup() - if ret['result'] == False: - self.logger.error( - "Policy %s verification failed after setup" % policy_name) - assert ret['result'], ret['msg'] - - self.logger.info( - "Done with setup and verification, moving onto test ..") - # try to remove policy which was referenced with VN. - policy_removal = True - pol_id = None - if self.quantum_h: - policy_removal = self.quantum_h.delete_policy(policy_fixture.get_id()) - else: - try: - self.vnc_lib.network_policy_delete(id=policy_fixture.get_id()) - except Exception as e: - policy_removal = False - self.assertFalse( - policy_removal, - 'Policy removal succeed as not expected since policy is referenced with VN') - #assert vn1_fixture.verify_on_setup() - # policy_fixture.verify_policy_in_api_server() - return True - # end test_remove_policy_with_ref - -# end of class TestBasicPolicyNegative - - class TestBasicPolicyRouting(BasePolicyTest): ''' Check route import/exports based on policy config''' @@ -687,138 +531,78 @@ def test_policy_RT_import_export(self): # end of class TestBasicPolicyRouting +class TestBasicPolicyIpv4v6(test_policy_basic.TestBasicPolicy): + @classmethod + def setUpClass(cls): + super(TestBasicPolicyIpv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) -class TestBasicPolicyModify(BasePolicyTest): + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) - '''Policy modification related tests''' + @test.attr(type=['sanity', 'quick_sanity']) + @preposttest_wrapper + def test_policy(self): + super(TestBasicPolicyIpv4v6, self).test_policy() - _interface = 'json' + @test.attr(type=['sanity', 'quick_sanity']) + @preposttest_wrapper + def test_policy_to_deny(self): + super(TestBasicPolicyIpv4v6, self).test_policy_to_deny() +class TestBasicPolicyNegativeIpv4v6(test_policy_basic.TestBasicPolicyNegative): @classmethod def setUpClass(cls): - super(TestBasicPolicyModify, cls).setUpClass() + super(TestBasicPolicyNegativeIpv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) - def runTest(self): - pass + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) - @test.attr(type=['sanity', 'ci_sanity', 'vcenter']) + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_remove_policy_with_ref(self): + super(TestBasicPolicyNegativeIpv4v6, self).test_remove_policy_with_ref() + +class TestBasicPolicyModifyIpv4v6(test_policy_basic.TestBasicPolicyModify): + @classmethod + def setUpClass(cls): + super(TestBasicPolicyModifyIpv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) + + @test.attr(type=['sanity']) @preposttest_wrapper def test_policy_modify_vn_policy(self): - """ Configure policies based on topology; - """ - ### - # Get config for test from topology - # very simple topo will do, one vn, one vm, one policy, 3 rules - from . import sdn_single_vm_policy_topology - topology_class_name = sdn_single_vm_policy_topology.sdn_single_vm_policy_config + super(TestBasicPolicyModifyIpv4v6, self).test_policy_modify_vn_policy() - self.logger.info( - "Scenario for the test used is: %s" % - (topology_class_name)) - # set project name - try: - # provided by wrapper module if run in parallel test env - topo = topology_class_name( - project=self.project.project_name, - username=self.project.username, - password=self.project.password) - except NameError: - topo = topology_class_name() - ### - # Test setup: Configure policy, VN, & VM - # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} - # Returned topo is of following format: - # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture} - setup_obj = self.useFixture( - sdnTopoSetupFixture( - self.connections, - topo)) - out = setup_obj.topo_setup() - assertEqual(out['result'], True, out['msg']) - if out['result']: - topo, config_topo = out['data'] - ### - # Verify [and assert on fail] after setup - # Calling system policy verification, pick any policy fixture to - # access fixture verification - policy_name = topo.policy_list[0] - system_vna_verify_policy( - self, - config_topo['policy'][policy_name], - topo, - 'setup') - ### - # Test procedure: - # Test adding new policy to VN's exiting policy list - state = "add policy: " - test_vm = topo.vmc_list[0] - test_vn = topo.vn_of_vm[test_vm] - # Init test data, take backup of current topology - initial_vn_policy_list = copy.copy(topo.vn_policy[test_vn]) - new_policy_to_add = policy_test_utils.get_policy_not_in_vn( - initial_vn_policy_list, - topo.policy_list) - if not new_policy_to_add: - result = 'False' - msg = "test %s cannot be run as required config not available in topology; aborting test" - self.logger.info(msg) - assertEqual(result, True, msg) - initial_policy_vn_list = copy.copy(topo.policy_vn[new_policy_to_add]) - new_vn_policy_list = copy.copy(initial_vn_policy_list) - new_policy_vn_list = copy.copy(initial_policy_vn_list) - new_vn_policy_list.append(new_policy_to_add) - new_policy_vn_list.append(test_vn) - test_vn_fix = config_topo['vn'][test_vn] - test_vn_id = test_vn_fix.vn_id - # configure new policy - config_topo['policy'][new_policy_to_add] = self.useFixture( - PolicyFixture( - policy_name=new_policy_to_add, - rules_list=topo.rules[new_policy_to_add], - inputs=self.inputs, - connections=self.connections)) - # get new policy_set to be pushed for the vn - test_policy_fq_names = [] - for policy in new_vn_policy_list: - name = config_topo['policy'][policy].policy_fq_name - test_policy_fq_names.append(name) - self.logger.info( - "adding policy %s to vn %s" % - (new_policy_to_add, test_vn)) - test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id) - # wait for tables update before checking after making changes to system - time.sleep(5) - self.logger.info( - "new policy list of vn %s is %s" % - (test_vn, new_vn_policy_list)) - # update expected topology with this new info for verification - topo.vn_policy[test_vn] = new_vn_policy_list - topo.policy_vn[new_policy_to_add] = new_policy_vn_list - system_vna_verify_policy( - self, - config_topo['policy'][new_policy_to_add], - topo, - state) - # Test unbinding all policies from VN - state = "unbinding all policies" - test_vn_fix.unbind_policies(test_vn_id) - # wait for tables update before checking after making changes to system - time.sleep(5) - current_vn_policy_list = new_vn_policy_list - new_vn_policy_list = [] - self.logger.info( - "new policy list of vn %s is %s" % - (test_vn, new_vn_policy_list)) - # update expected topology with this new info for verification - topo.vn_policy[test_vn] = new_vn_policy_list - for policy in current_vn_policy_list: - topo.policy_vn[policy].remove(test_vn) - system_vna_verify_policy( - self, - config_topo['policy'][new_policy_to_add], - topo, - state) - return True - # end test_policy_modify +class TestBasicPolicyConfigIpv4v6(TestBasicPolicyConfig): + @classmethod + def setUpClass(cls): + super(TestBasicPolicyConfig, cls).setUpClass() + cls.inputs.set_af(af_test) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) + +class TestBasicPolicyRoutingIpv4v6(TestBasicPolicyRouting): + @classmethod + def setUpClass(cls): + super(TestBasicPolicyRoutingIpv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) -# end of class TestBasicPolicyModify diff --git a/scripts/policy/test_policy_acl.py b/scripts/policy/test_policy_acl.py index 164aa9d44..bb3501062 100644 --- a/scripts/policy/test_policy_acl.py +++ b/scripts/policy/test_policy_acl.py @@ -14,6 +14,10 @@ from policy_test import PolicyFixture from vn_policy_test import VN_Policy_Fixture from test import attr +from netaddr import IPNetwork +from common.policy import policy_test_utils + +af_test = 'dual' class TestPolicyAcl(BasePolicyTest): @@ -187,8 +191,8 @@ def test_policy_inheritance_src_vn_dst_pol(self): # create VM self.setup_vm() - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=True) + ret = self.VM11_fixture.ping_with_certainty(expectation=True, + dst_vm_fixture=self.VM21_fixture) if ret == True : self.logger.info("Test with src as VN and dst as policy PASSED") @@ -286,8 +290,8 @@ def test_policy_inheritance_src_pol_dst_vn(self): # create VM self.setup_vm() - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=True) + ret = self.VM11_fixture.ping_with_certainty(expectation=True, + dst_vm_fixture=self.VM21_fixture) if ret == True : self.logger.info("Test with src as policy and dst as VN PASSED") @@ -385,8 +389,8 @@ def test_policy_inheritance_src_any_dst_pol(self): # create VM self.setup_vm() - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=True) + ret = self.VM11_fixture.ping_with_certainty(expectation=True, + dst_vm_fixture=self.VM21_fixture) if ret == True : self.logger.info("Test with src as any and dst as policy PASSED") @@ -485,8 +489,8 @@ def test_policy_inheritance_src_pol_dst_any(self): # create VM self.setup_vm() - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=True) + ret = self.VM11_fixture.ping_with_certainty(expectation=True, + dst_vm_fixture=self.VM21_fixture) if ret == True : self.logger.info("Test with src as policy and dst as any PASSED") @@ -503,17 +507,22 @@ def test_policy_cidr_src_policy_dst_cidr(self): """Test cases to test policy CIDR""" """Policy Rule :- source = Policy, destination = CIDR.""" result = True + af = self.inputs.get_af() # create Ipam and VN self.setup_ipam_vn() - VN2_subnet = self.VN2_fixture.get_cidrs()[0] + VN2_subnet_v4 = self.VN2_fixture.get_cidrs(af='v4')[0] + if 'v6' == af or 'dual' == af: + VN2_subnet_v6 = self.VN2_fixture.get_cidrs(af='v6')[0] + else: + VN2_subnet_v6 = None # create policy policy_name = 'policy12' rules = [] rules = [{'direction': '<>', 'protocol': 'icmp', - 'dest_subnet': VN2_subnet, + 'dest_subnet': VN2_subnet_v4, 'source_policy': 'policy13', 'dst_ports': 'any', 'simple_action': 'deny', @@ -527,6 +536,9 @@ def test_policy_cidr_src_policy_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN2_subnet_v4:VN2_subnet_v6}) + policy12_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -539,7 +551,7 @@ def test_policy_cidr_src_policy_dst_cidr(self): rules = [{'direction': '<>', 'protocol': 'icmp', 'dest_policy': 'policy13', - 'source_subnet': VN2_subnet, + 'source_subnet': VN2_subnet_v4, 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': 'any' @@ -552,6 +564,9 @@ def test_policy_cidr_src_policy_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN2_subnet_v4:VN2_subnet_v6}) + policy21_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -600,8 +615,8 @@ def test_policy_cidr_src_policy_dst_cidr(self): # create VM self.setup_vm() - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=False) + ret = self.VM11_fixture.ping_with_certainty(expectation=False, + dst_vm_fixture=self.VM21_fixture) if ret == True : cmd = "flow -l | grep %s -A1 | grep %s -A1 " % ( self.VM11_fixture.vm_ip, self.VM21_fixture.vm_ip) @@ -631,18 +646,25 @@ def test_policy_cidr_src_vn_dst_cidr(self): """Test cases to test policy CIDR""" """Policy Rule :- source = VN, destination = CIDR.""" result = True + af = self.inputs.get_af() # create Ipam and VN self.setup_ipam_vn() - VN1_subnet = self.VN1_fixture.get_cidrs()[0] - VN2_subnet = self.VN2_fixture.get_cidrs()[0] + VN1_subnet_v4 = self.VN1_fixture.get_cidrs(af='v4')[0] + VN2_subnet_v4 = self.VN2_fixture.get_cidrs(af='v4')[0] + if 'v6' == af or 'dual' == af: + VN1_subnet_v6 = self.VN1_fixture.get_cidrs(af='v6')[0] + VN2_subnet_v6 = self.VN2_fixture.get_cidrs(af='v6')[0] + else: + VN1_subnet_v6 = None + VN2_subnet_v6 = None # create policy policy_name = 'policy12' rules = [] rules = [{'direction': '<>', 'protocol': 'icmp', - 'dest_subnet': VN2_subnet, + 'dest_subnet': VN2_subnet_v4, 'source_network': 'VN1', 'dst_ports': 'any', 'simple_action': 'deny', @@ -656,6 +678,9 @@ def test_policy_cidr_src_vn_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN2_subnet_v4:VN2_subnet_v6}) + policy12_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -667,7 +692,7 @@ def test_policy_cidr_src_vn_dst_cidr(self): rules = [] rules = [{'direction': '<>', 'protocol': 'icmp', - 'dest_subnet': VN1_subnet, + 'dest_subnet': VN1_subnet_v4, 'source_network': 'VN2', 'dst_ports': 'any', 'simple_action': 'deny', @@ -681,6 +706,9 @@ def test_policy_cidr_src_vn_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN1_subnet_v4:VN1_subnet_v6}) + policy21_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -710,8 +738,8 @@ def test_policy_cidr_src_vn_dst_cidr(self): # create VM self.setup_vm() - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=False) + ret = self.VM11_fixture.ping_with_certainty(expectation=False, + dst_vm_fixture=self.VM21_fixture) if ret == True : cmd = "flow -l | grep %s -A1 | grep %s -A1 " % ( @@ -740,19 +768,28 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): """Policy Rule1 :- source = VN-A, destination = CIDR-A.""" """Policy Rule2 :- source = VN-A, destination = CIDR-B.""" result = True + af = self.inputs.get_af() # create Ipam and VN self.setup_ipam_vn() - VN1_subnet = self.VN1_fixture.get_cidrs()[0] - VN2_subnet = self.VN2_fixture.get_cidrs()[0] - VN3_subnet = self.VN3_fixture.get_cidrs()[0] + VN1_subnet_v4 = self.VN1_fixture.get_cidrs(af='v4')[0] + VN2_subnet_v4 = self.VN2_fixture.get_cidrs(af='v4')[0] + VN3_subnet_v4 = self.VN3_fixture.get_cidrs(af='v4')[0] + if 'v6' == af or 'dual' == af: + VN1_subnet_v6 = self.VN1_fixture.get_cidrs(af='v6')[0] + VN2_subnet_v6 = self.VN2_fixture.get_cidrs(af='v6')[0] + VN3_subnet_v6 = self.VN3_fixture.get_cidrs(af='v6')[0] + else: + VN1_subnet_v6 = None + VN2_subnet_v6 = None + VN3_subnet_v6 = None # create policy policy_name = 'policy123' rules = [] rules = [{'direction': '<>', 'protocol': 'icmp', - 'dest_subnet': VN2_subnet, + 'dest_subnet': VN2_subnet_v4, 'source_network': 'VN1', 'dst_ports': 'any', 'simple_action': 'deny', @@ -760,7 +797,7 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): }, {'direction': '<>', 'protocol': 'icmp', - 'dest_subnet': VN3_subnet, + 'dest_subnet': VN3_subnet_v4, 'source_network': 'VN1', 'dst_ports': 'any', 'simple_action': 'deny', @@ -782,6 +819,10 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN2_subnet_v4:VN2_subnet_v6, + VN3_subnet_v4:VN3_subnet_v6}) + policy123_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -793,7 +834,7 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): rules = [] rules = [{'direction': '<>', 'protocol': 'icmp', - 'dest_subnet': VN1_subnet, + 'dest_subnet': VN1_subnet_v4, 'source_network': 'VN2', 'dst_ports': 'any', 'simple_action': 'deny', @@ -807,6 +848,9 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN1_subnet_v4:VN1_subnet_v6}) + policy21_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -818,7 +862,7 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): rules = [] rules = [{'direction': '<>', 'protocol': 'icmp', - 'dest_subnet': VN1_subnet, + 'dest_subnet': VN1_subnet_v4, 'source_network': 'VN3', 'dst_ports': 'any', 'simple_action': 'deny', @@ -832,6 +876,9 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN1_subnet_v4:VN1_subnet_v6}) + policy31_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -870,8 +917,8 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): # create VM self.setup_vm() - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=False) + ret = self.VM11_fixture.ping_with_certainty(expectation=False, + dst_vm_fixture=self.VM21_fixture) if ret == True : cmd = "flow -l | grep %s -A1 | grep %s -A1 " % ( @@ -892,8 +939,8 @@ def test_policy_cidr_src_duplicate_vn_dst_cidr(self): ret = False flow_record = 0 - ret = self.VM11_fixture.ping_with_certainty(self.VM31_fixture.vm_ip, \ - expectation=False) + ret = self.VM11_fixture.ping_with_certainty(expectation=False, + dst_vm_fixture=self.VM31_fixture) if ret == True : cmd = "flow -l | grep %s -A1 | grep %s -A1 " % ( @@ -920,11 +967,18 @@ def test_policy_cidr_src_cidr_dst_any(self): """Policy Rule :- source = CIDR, destination = ANY.""" """Policy Rule :- source = ANY, destination = CIDR.""" result = True + af = self.inputs.get_af() # create Ipam and VN self.setup_ipam_vn() - VN1_subnet = self.VN1_fixture.get_cidrs()[0] - VN2_subnet = self.VN2_fixture.get_cidrs()[0] + VN1_subnet_v4 = self.VN1_fixture.get_cidrs(af='v4')[0] + VN2_subnet_v4 = self.VN2_fixture.get_cidrs(af='v4')[0] + if 'v6' == af or 'dual' == af: + VN1_subnet_v6 = self.VN1_fixture.get_cidrs(af='v6')[0] + VN2_subnet_v6 = self.VN2_fixture.get_cidrs(af='v6')[0] + else: + VN1_subnet_v6 = None + VN2_subnet_v6 = None # create policy policy_name = 'policy12' @@ -932,7 +986,7 @@ def test_policy_cidr_src_cidr_dst_any(self): rules = [{'direction': '<>', 'protocol': 'icmp', 'dest_network': 'any', - 'source_subnet': VN1_subnet, + 'source_subnet': VN1_subnet_v4, 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': 'any' @@ -945,6 +999,9 @@ def test_policy_cidr_src_cidr_dst_any(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN1_subnet_v4:VN1_subnet_v6}) + policy12_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -956,7 +1013,7 @@ def test_policy_cidr_src_cidr_dst_any(self): rules = [] rules = [{'direction': '<>', 'protocol': 'icmp', - 'dest_subnet': VN1_subnet, + 'dest_subnet': VN1_subnet_v4, 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'deny', @@ -970,6 +1027,9 @@ def test_policy_cidr_src_cidr_dst_any(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {VN1_subnet_v4:VN1_subnet_v6}) + policy21_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -999,11 +1059,11 @@ def test_policy_cidr_src_cidr_dst_any(self): # create VM self.setup_vm() - ret1 = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=False) + ret1 = self.VM11_fixture.ping_with_certainty(expectation=False, + dst_vm_fixture=self.VM21_fixture) - ret2 = self.VM21_fixture.ping_with_certainty(self.VM11_fixture.vm_ip, \ - expectation=False) + ret2 = self.VM21_fixture.ping_with_certainty(expectation=False, + dst_vm_fixture=self.VM11_fixture) if ((ret1 == True) and (ret2 == True)): cmd = "flow -l | grep %s -A1 | grep %s -A1 " % ( @@ -1033,12 +1093,10 @@ def test_policy_cidr_src_cidr_dst_cidr(self): """Policy1 Rule :- source = CIDR-VM11, destination = CIDR-VM12.""" """Policy2 Rule :- source = CIDR-VM11, destination = CIDR-VM21.""" result = True + af = self.inputs.get_af() # create Ipam and VN self.setup_ipam_vn() - VN1_subnet = self.VN1_fixture.get_cidrs()[0] - VN2_subnet = self.VN2_fixture.get_cidrs()[0] - VN3_subnet = self.VN3_fixture.get_cidrs()[0] # create VM self.setup_vm() @@ -1051,8 +1109,8 @@ def test_policy_cidr_src_cidr_dst_cidr(self): self.VM12_fixture.wait_till_vm_is_up() #Check initial connectivity without policies in place. - ret = self.VM11_fixture.ping_with_certainty(self.VM12_fixture.vm_ip, \ - expectation=True) + ret = self.VM11_fixture.ping_with_certainty(expectation=True, + dst_vm_fixture=self.VM12_fixture) if ret == True : self.logger.info("ICMP traffic is allowed between VMs in same VN") else: @@ -1060,8 +1118,8 @@ def test_policy_cidr_src_cidr_dst_cidr(self): self.logger.error( "ICMP traffic is not allowed between VMs in same VN, which is wrong") - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=False) + ret = self.VM11_fixture.ping_with_certainty(expectation=False, + dst_vm_fixture=self.VM21_fixture) if ret == True : self.logger.info("ICMP traffic is not allowed between VMs accross VNs") else: @@ -1071,10 +1129,18 @@ def test_policy_cidr_src_cidr_dst_cidr(self): if result == False: return result - #get the VM IP Addresses with 32 bit mask in cidr format. - vm11_ip = self.VM11_fixture.vm_ip + '/32' - vm12_ip = self.VM12_fixture.vm_ip + '/32' - vm21_ip = self.VM21_fixture.vm_ip + '/32' + #get the VM IP Addresses in cidr format. + vm11_ip = str(IPNetwork(self.VM11_fixture.get_vm_ips(af='v4')[0])) + vm12_ip = str(IPNetwork(self.VM12_fixture.get_vm_ips(af='v4')[0])) + vm21_ip = str(IPNetwork(self.VM21_fixture.get_vm_ips(af='v4')[0])) + if 'v6' == af or 'dual' == af: + vm11_ipv6 = str(IPNetwork(self.VM11_fixture.get_vm_ips(af='v6')[0])) + vm12_ipv6 = str(IPNetwork(self.VM12_fixture.get_vm_ips(af='v6')[0])) + vm21_ipv6 = str(IPNetwork(self.VM21_fixture.get_vm_ips(af='v6')[0])) + else: + vm11_ipv6 = None + vm12_ipv6 = None + vm21_ipv6 = None # create policy policy_name = 'policy1112' @@ -1095,6 +1161,10 @@ def test_policy_cidr_src_cidr_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {vm11_ip:vm11_ipv6, + vm12_ip:vm12_ipv6}) + policy1112_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -1120,6 +1190,10 @@ def test_policy_cidr_src_cidr_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {vm11_ip:vm11_ipv6, + vm12_ip:vm12_ipv6}) + policy1211_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -1145,6 +1219,10 @@ def test_policy_cidr_src_cidr_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {vm11_ip:vm11_ipv6, + vm21_ip:vm21_ipv6}) + policy1121_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -1170,6 +1248,10 @@ def test_policy_cidr_src_cidr_dst_cidr(self): 'simple_action': 'pass', 'src_ports': 'any'}] + rules = policy_test_utils.update_cidr_rules_with_ipv6(af, rules, + {vm11_ip:vm11_ipv6, + vm21_ip:vm21_ipv6}) + policy2111_fixture = self.useFixture( PolicyFixture( policy_name=policy_name, @@ -1202,8 +1284,8 @@ def test_policy_cidr_src_cidr_dst_cidr(self): #Test traffic with the policies having cidr as src and dst, #attached to the respective networks. - ret = self.VM11_fixture.ping_with_certainty(self.VM12_fixture.vm_ip, \ - expectation=False) + ret = self.VM11_fixture.ping_with_certainty(expectation=False, + dst_vm_fixture=self.VM12_fixture) if ret == True : cmd = "flow -l | grep %s -A1 | grep %s -A1 " % ( self.VM11_fixture.vm_ip, self.VM12_fixture.vm_ip) @@ -1229,8 +1311,8 @@ def test_policy_cidr_src_cidr_dst_cidr(self): ret = False flow_record = 0 - ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ - expectation=True) + ret = self.VM11_fixture.ping_with_certainty(expectation=True, + dst_vm_fixture=self.VM21_fixture) if ret == True : cmd = "flow -l | grep %s -A1 | grep %s -A1 " % ( self.VM11_fixture.vm_ip, self.VM21_fixture.vm_ip) @@ -1259,5 +1341,88 @@ def test_policy_cidr_src_cidr_dst_cidr(self): return result # end test_policy_cidr_src_cidr_dst_cidr + @tcutils.wrappers.preposttest_wrapper + def test_route_leaking_pass_protocol_src_cidr_dst_cidr(self): + """Test case to test route leaking with specific protocol""" + """Policy Rule :- source = CIDR, destination = CIDR.""" + result = True + # create Ipam and VN + self.setup_ipam_vn() + VN1_subnet = self.VN1_fixture.get_cidrs()[0] + VN2_subnet = self.VN2_fixture.get_cidrs()[0] + # create policy + policy_name = 'policy12' + rules = [] + rules = [{'direction': '<>', + 'protocol': 'icmp', + 'dest_subnet': VN2_subnet, + 'source_subnet': VN1_subnet, + 'dst_ports': 'any', + 'simple_action': 'deny', + 'src_ports': 'any' + }, + {'direction': '<>', + 'protocol': 'tcp', + 'dest_network': 'VN2', + 'source_network': 'VN1', + 'dst_ports': 'any', + 'simple_action': 'pass', + 'src_ports': 'any'}] + + policy12_fixture = self.useFixture( + PolicyFixture( + policy_name=policy_name, + rules_list=rules, + inputs=self.inputs, + connections=self.connections)) + + # attach policy to VN + VN1_policy_fixture = self.useFixture( + VN_Policy_Fixture( + connections=self.connections, + vn_name=self.VN1_fixture.vn_name, + policy_obj={self.VN1_fixture.vn_name : [policy12_fixture.policy_obj]}, + vn_obj={self.VN1_fixture.vn_name : self.VN1_fixture}, + vn_policys=['policy12'], + project_name=self.project.project_name)) + + VN2_policy_fixture = self.useFixture( + VN_Policy_Fixture( + connections=self.connections, + vn_name=self.VN2_fixture.vn_name, + policy_obj={self.VN2_fixture.vn_name : [policy12_fixture.policy_obj]}, + vn_obj={self.VN2_fixture.vn_name : self.VN2_fixture}, + vn_policys=['policy12'], + project_name=self.project.project_name)) + # create VM + self.setup_vm() + agent_inspect_h = self.agent_inspect[self.VM11_fixture.vm_node_ip] + vrf_id = agent_inspect_h.get_vna_vrf_id(self.VN1_fixture.vn_fq_name) + route = agent_inspect_h.get_vna_route(vrf_id= vrf_id[0], ip=self.VM21_fixture.vm_ip) + self.logger.debug("Route value : %s" % route) + if route: + self.logger.info("Route of VN2 found in VN1 database. Route leaking successful") + else: + self.logger.error("Route of VN2 not found in VN1 database. Route leaking failed") + result = False + assert result, "Route leaking between VN1 and VN2 failed" + assert self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \ + expectation=False),"ICMP deny rule working unexpectedly and allowing ICMP" + assert self.VM11_fixture.check_file_transfer(self.VM21_fixture, mode='scp',\ + size='100', expectation=True),"TCP Allow rule working unexpectedly and denying TCP as well" + # end test_route_leaking_pass_protocol_src_cidr_dst_cidr # end PolicyAclTests + +class TestPolicyAclIpv4v6(TestPolicyAcl): + + @classmethod + def setUpClass(cls): + super(TestPolicyAclIpv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) + diff --git a/scripts/policy/test_policy_api.py b/scripts/policy/test_policy_api.py index 9942fd70f..5ca54d781 100644 --- a/scripts/policy/test_policy_api.py +++ b/scripts/policy/test_policy_api.py @@ -484,6 +484,8 @@ def test_policy_rules_scaling_with_ping_api(self): id=str(vn_fixture[vn_of_vm[vm_name]]._obj.uuid)) vn_quantum_obj = self.quantum_h.get_vn_obj_if_present( vn_read.name) + assert vn_read, "VN %s not found in API" % (vn_of_vm[vm_name]) + assert vn_quantum_obj, "VN %s not found in neutron" % (vn_of_vm[vm_name]) # Launch VM with 'ubuntu-traffic' image which has scapy pkg # remember to call install_pkg after VM bringup # Bring up with 2G RAM to support multiple traffic streams.. @@ -495,26 +497,15 @@ def test_policy_rules_scaling_with_ping_api(self): flavor='contrail_flavor_small', image_name='ubuntu-traffic', vm_name=vm_name)) - vm_fixture[vm_name].verify_vm_launched() - vm_node_ip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm( - vm_fixture[vm_name].vm_obj)]['host_ip'] + for vm_name in vm_names: self.logger.info("Calling VM verifications... ") - time.sleep(5) # wait for 5secs after launching VM's - vm_verify_out = None - vm_verify_out = vm_fixture[vm_name].verify_on_setup() - if not vm_verify_out: - m = "%s - vm verify in agent after launch failed" % vm_node_ip - self.logger.error(m) - return vm_verify_out + assert vm_fixture[vm_name].verify_on_setup() for vm_name in vm_names: out = self.nova_h.wait_till_vm_is_up( vm_fixture[vm_name].vm_obj) if not out: self.logger.error("VM failed to come up") return out - else: - vm_fixture[vm_name].install_pkg("Traffic") # Test ping with scaled policy and rules dst_vm = vm_names[len(vm_names) - 1] # 'vm2' dst_vm_fixture = vm_fixture[dst_vm] diff --git a/scripts/policy/test_policy_detailed.py b/scripts/policy/test_policy_detailed.py index 226c2089a..e6f6ca2f7 100644 --- a/scripts/policy/test_policy_detailed.py +++ b/scripts/policy/test_policy_detailed.py @@ -12,92 +12,9 @@ from tcutils.test_lib.test_utils import assertEqual import sdn_single_vm_multiple_policy_topology import sdn_policy_traffic_test_topo +import test_policy_basic - -class TestDetailedPolicy0(BasePolicyTest): - _interface = 'json' - - @classmethod - def setUpClass(cls): - super(TestDetailedPolicy0, cls).setUpClass() - - @test.attr(type=['sanity', 'ci_sanity', 'vcenter']) - @preposttest_wrapper - def test_repeated_policy_modify(self): - """ Configure policies based on topology; Replace VN's existing policy [same policy name but with different rule set] multiple times and verify. - """ - ### - # Get config for test from topology - # very simple topo will do, one vn, one vm, multiple policies with n - # rules - topology_class_name = sdn_single_vm_multiple_policy_topology.sdn_single_vm_multiple_policy_config - self.logger.info( - "Scenario for the test used is: %s" % - (topology_class_name)) - # set project name - try: - # provided by wrapper module if run in parallel test env - topo = topology_class_name( - project=self.project.project_name, - username=self.project.username, - password=self.project.password) - except NameError: - topo = topology_class_name() - ### - # Test setup: Configure policy, VN, & VM - # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} - # Returned topo is of following format: - # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture} - setup_obj = self.useFixture( - sdnTopoSetupFixture( - self.connections, - topo)) - out = setup_obj.topo_setup() - assertEqual(out['result'], True, out['msg']) - if out['result']: - topo, config_topo = out['data'] - ### - # Verify [and assert on fail] after setup - # Calling system policy verification, pick any policy fixture to - # access fixture verification - policy_name = topo.policy_list[0] - system_vna_verify_policy( - self, - config_topo['policy'][policy_name], - topo, - 'setup') - ### - # Test procedure: - # Test repeated update of a policy attached to a VM - test_vm = topo.vmc_list[0] - test_vn = topo.vn_of_vm[test_vm] - test_vn_fix = config_topo['vn'][test_vn] - test_vn_id = test_vn_fix.vn_id - for policy in topo.policy_list: - # set new policy for test_vn to policy - test_policy_fq_names = [] - name = config_topo['policy'][policy].policy_fq_name - test_policy_fq_names.append(name) - state = "policy for %s updated to %s" % (test_vn, policy) - test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id) - # wait for tables update before checking after making changes to - # system - time.sleep(5) - self.logger.info( - "new policy list of vn %s is %s" % - (test_vn, policy)) - # update expected topology with this new info for verification - updated_topo = policy_test_utils.update_topo(topo, test_vn, policy) - system_vna_verify_policy( - self, - config_topo['policy'][policy], - updated_topo, - state) - return True - # end test_repeated_policy_modify - -# end of class TestDetailedPolicy0 - +af_test = 'dual' class TestDetailedPolicy1(BasePolicyTest): _interface = 'json' @@ -208,7 +125,8 @@ def repeated_policy_update_test_with_ping(self, topo): expectedResult = True if matching_rule_action[ test_proto] == 'pass' else False ret = test_vm1_fixture.ping_with_certainty( - test_vm2_fixture.vm_ip, expectation=expectedResult) + test_vm2_fixture.vm_ip, expectation=expectedResult, + dst_vm_fixture=test_vm2_fixture) result_msg = "vm ping test result after %s is: %s" % (state, ret) self.logger.info(result_msg) if not ret: @@ -309,7 +227,8 @@ def test_policy_rules_scaling_with_ping(self): vm2_fixture.wait_till_vm_is_up() self.logger.info("Verify ping to vm %s" % (vn1_vm2_name)) ret = vm1_fixture.ping_with_certainty( - vm2_fixture.vm_ip, expectation=True) + vm2_fixture.vm_ip, expectation=True, + dst_vm_fixture=vm2_fixture) result_msg = "vm ping test result to vm %s is: %s" % ( vn1_vm2_name, ret) self.logger.info(result_msg) @@ -333,7 +252,7 @@ def setUpClass(cls): @preposttest_wrapper def test_scale_policy_with_ping(self): - """ Test focus is on the scale of VM/VN created..have polict attached to all VN's and ping from one VM to all. + """ Test focus is on the scale of VM/VN created.have policy attached to all VN's and ping from one VM to all. """ topology_class_name = sdn_policy_traffic_test_topo.sdn_20vn_20vm_config self.logger.info( @@ -400,7 +319,8 @@ def policy_scale_test_with_ping(self, topo): expectedResult = True if matching_rule_action[ test_proto] == 'pass' else False ret = test_vm1_fixture.ping_with_certainty( - test_vm2_fixture.vm_ip, expectation=expectedResult) + test_vm2_fixture.vm_ip, expectation=expectedResult, + dst_vm_fixture=test_vm2_fixture) result_msg = "vm ping test result to vm %s is: %s" % (vmi, ret) self.logger.info(result_msg) if not ret: @@ -409,3 +329,53 @@ def policy_scale_test_with_ping(self, topo): self.assertEqual(result, True, msg) return result # end test_policy_with_ping + +class TestDetailedPolicy0Ipv4v6(test_policy_basic.TestDetailedPolicy0): + @classmethod + def setUpClass(cls): + super(TestDetailedPolicy0Ipv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) + + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_repeated_policy_modify(self): + super(TestDetailedPolicy0Ipv4v6, self).test_repeated_policy_modify() + +class TestDetailedPolicy1Ipv4v6(TestDetailedPolicy1): + @classmethod + def setUpClass(cls): + super(TestDetailedPolicy1Ipv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) + +class TestDetailedPolicy2Ipv4v6(TestDetailedPolicy2): + @classmethod + def setUpClass(cls): + super(TestDetailedPolicy2Ipv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) + +class TestDetailedPolicy3Ipv4v6(TestDetailedPolicy3): + @classmethod + def setUpClass(cls): + super(TestDetailedPolicy3Ipv4v6, cls).setUpClass() + cls.inputs.set_af(af_test) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) + diff --git a/scripts/project/__init__.py b/scripts/project/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/project/base.py b/scripts/project/base.py deleted file mode 100644 index af9140ca1..000000000 --- a/scripts/project/base.py +++ /dev/null @@ -1,31 +0,0 @@ -import test -from common.connections import ContrailConnections -from common import isolated_creds - -class BaseProjectTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BaseProjectTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.analytics_obj=cls.connections.analytics_obj - #end setUpClass - - @classmethod - def tearDownClass(cls): -# cls.isolated_creds.delete_tenant() - super(BaseProjectTest, cls).tearDownClass() - #end tearDownClass - diff --git a/scripts/project/test_projects.py b/scripts/project/test_projects.py deleted file mode 100644 index e6fe1f268..000000000 --- a/scripts/project/test_projects.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import fixtures -import testtools -import time - -from vn_test import * -from vm_test import * -from user_test import UserFixture -from common.connections import ContrailConnections -from tcutils.wrappers import preposttest_wrapper - -from project.base import BaseProjectTest -import test -from tcutils.util import get_random_name -from vnc_api.vnc_api import NoIdError - -class TestProject(BaseProjectTest): - - @classmethod - def setUpClass(cls): - super(TestProject, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - super(TestProject, cls).tearDownClass() - - @test.attr(type=['sanity', 'ci_sanity']) - @preposttest_wrapper - def test_project_add_delete(self): - ''' Validate that a new project can be added and deleted - 1. Create new tenant using keystone and verify it and default SG - 2. Delete tenant and verify - Pass criteria: Step 1 and 2 should pass - ''' - result = True - project_name = get_random_name('project128') - user_fixture= self.useFixture(UserFixture( - connections=self.connections, username=self.inputs.stack_user, - password=self.inputs.stack_password)) - project_fixture_obj = self.useFixture(ProjectFixture( - username=self.inputs.stack_user, - password=self.inputs.stack_password, - project_name=project_name, - vnc_lib_h=self.vnc_lib, - connections=self.connections)) - user_fixture.add_user_to_tenant(project_name, self.inputs.stack_user, 'admin') - assert project_fixture_obj.verify_on_setup() - - # Check if the default SG is present in it - try: - secgroup = self.vnc_lib.security_group_read( - fq_name=[u'default-domain', project_name, 'default']) - self.logger.info('Default SG is present in the new project') - except NoIdError: - assert False, "Default SG is not created in project %s" % (project_name) - return result - # end test_project_add_delete - diff --git a/scripts/rt_filter/base.py b/scripts/rt_filter/base.py index 141db57fc..0b59896ed 100644 --- a/scripts/rt_filter/base.py +++ b/scripts/rt_filter/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 from common.connections import ContrailConnections from common import isolated_creds from vm_test import VMFixture @@ -6,24 +6,14 @@ from tcutils.util import retry -class BaseRtFilterTest(test.BaseTestCase): +class BaseRtFilterTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseRtFilterTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, - cls.inputs, ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib -# cls.logger= cls.inputs.logger cls.agent_inspect = cls.connections.agent_inspect cls.cn_inspect = cls.connections.cn_inspect cls.analytics_obj = cls.connections.analytics_obj @@ -32,8 +22,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - # cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseRtFilterTest, cls).tearDownClass() # end tearDownClass @@ -152,7 +140,7 @@ def verify_rt_entry_removal(self, control_node, route_target): else: self.logger.warn( 'RT %s is still seen in the bgp.rtarget.0 table of the control nodes' % route_target) - if rt_group_entry is None: + if not rt_group_entry: self.logger.info( 'RT %s removed from the RTGroup Table of the control nodes' % route_target) else: diff --git a/scripts/securitygroup/__init__.py b/scripts/securitygroup/__init__.py deleted file mode 100644 index a82e9e238..000000000 --- a/scripts/securitygroup/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Security group tests.""" diff --git a/scripts/securitygroup/base.py b/scripts/securitygroup/base.py deleted file mode 100644 index e931f2757..000000000 --- a/scripts/securitygroup/base.py +++ /dev/null @@ -1,260 +0,0 @@ -import test -from vn_test import MultipleVNFixture -from vm_test import MultipleVMFixture -from fabric.api import run, hide, settings -from vn_test import VNFixture -from vm_test import VMFixture -from policy_test import PolicyFixture -from common.policy.config import ConfigPolicy -from security_group import SecurityGroupFixture, get_secgrp_id_from_name -from common import isolated_creds -from tcutils.util import get_random_name, copy_file_to_server, fab_put_file_to_vm -import os -from tcutils.topo.sdn_topo_setup import * - -class BaseSGTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BaseSGTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.orch = cls.connections.orch - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.analytics_obj=cls.connections.analytics_obj - cls.api_s_inspect = cls.connections.api_server_inspect - - #end setUpClass - - @classmethod - def tearDownClass(cls): - cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() - super(BaseSGTest, cls).tearDownClass() - #end tearDownClass - - def setUp(self): - super(BaseSGTest, self).setUp() - - def tearDown(self): - super(BaseSGTest, self).tearDown() - - def create_sg_test_resources(self): - """Config common resources.""" - self.logger.info("Configuring setup for security group tests.") - - vn_s = {'vn1': '20.1.1.0/24', 'vn2': ['10.1.1.0/24']} - self.multi_vn_fixture = self.useFixture(MultipleVNFixture( - connections=self.connections, inputs=self.inputs, subnet_count=2, - vn_name_net=vn_s, project_name=self.inputs.project_name)) - vns = self.multi_vn_fixture.get_all_fixture_obj() - (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0] - (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1] - - self.logger.info("Configure security groups required for test.") - self.config_sec_groups() - - self.multi_vm_fixture = self.useFixture(MultipleVMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vm_count_per_vn=3, vn_objs=vns, image_name='ubuntu-traffic', - flavor='contrail_flavor_small')) - vms = self.multi_vm_fixture.get_all_fixture() - (self.vm1_name, self.vm1_fix) = vms[0] - (self.vm2_name, self.vm2_fix) = vms[1] - (self.vm3_name, self.vm3_fix) = vms[2] - (self.vm4_name, self.vm4_fix) = vms[3] - (self.vm5_name, self.vm5_fix) = vms[4] - (self.vm6_name, self.vm6_fix) = vms[5] - - self.logger.info("Adding the sec groups to the VM's") - self.vm1_fix.add_security_group(secgrp=self.sg1_name) - self.vm1_fix.add_security_group(secgrp=self.sg2_name) - self.vm2_fix.add_security_group(secgrp=self.sg2_name) - self.vm4_fix.add_security_group(secgrp=self.sg1_name) - self.vm4_fix.add_security_group(secgrp=self.sg2_name) - self.vm5_fix.add_security_group(secgrp=self.sg1_name) - - self.logger.info("Remove the default sec group form the VM's") - default_secgrp_id = get_secgrp_id_from_name( - self.connections, - ':'.join([self.inputs.domain_name, - self.inputs.project_name, - 'default'])) - self.vm1_fix.remove_security_group(secgrp=default_secgrp_id) - self.vm2_fix.remove_security_group(secgrp=default_secgrp_id) - self.vm4_fix.remove_security_group(secgrp=default_secgrp_id) - self.vm5_fix.remove_security_group(secgrp=default_secgrp_id) - - self.logger.info("Verifying setup of security group tests.") - self.verify_sg_test_resources() - - self.logger.info( - "Finished configuring setup for security group tests.") - - - def config_sec_groups(self): - self.sg1_name = 'test_tcp_sec_group' + '_' + get_random_name() - rule = [{'direction': '<>', - 'protocol': 'tcp', - 'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}, - {'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '<>', - 'protocol': 'tcp', - 'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}, - {'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}], - }] - - self.sg1_fix = self.config_sec_group(name=self.sg1_name, entries=rule) - - self.sg2_name = 'test_udp_sec_group' + '_' + get_random_name() - rule = [{'direction': '<>', - 'protocol': 'udp', - 'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}, - {'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '<>', - 'protocol': 'udp', - 'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}, - {'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}], - }] - self.sg2_fix = self.config_sec_group(name=self.sg2_name, entries=rule) - - def verify_sg_test_resources(self): - """verfiy common resources.""" - self.logger.debug("Verify the configured VN's.") - assert self.multi_vn_fixture.verify_on_setup() - - self.logger.debug("Verify the configured VM's.") - assert self.multi_vm_fixture.verify_on_setup() - - self.logger.debug("Verify the configured security groups.") - result, msg = self.sg1_fix.verify_on_setup() - assert result, msg - result, msg = self.sg2_fix.verify_on_setup() - assert result, msg - - self.logger.debug("Verify the attached security groups in the VM.") - result, msg = self.vm1_fix.verify_security_group(self.sg1_name) - assert result, msg - result, msg = self.vm1_fix.verify_security_group(self.sg2_name) - assert result, msg - result, msg = self.vm2_fix.verify_security_group(self.sg2_name) - assert result, msg - result, msg = self.vm4_fix.verify_security_group(self.sg1_name) - assert result, msg - result, msg = self.vm4_fix.verify_security_group(self.sg2_name) - assert result, msg - result, msg = self.vm5_fix.verify_security_group(self.sg1_name) - assert result, msg - - assert self.multi_vm_fixture.wait_for_ssh_on_vm() - - - def config_sec_group(self, name, secgrpid=None, entries=None): - option = self.option - if self.option == 'openstack': - option = 'neutron' - secgrp_fixture = self.useFixture(SecurityGroupFixture(self.inputs, - self.connections, self.inputs.domain_name, self.inputs.project_name, - secgrp_name=name, uuid=secgrpid, secgrp_entries=entries,option=option)) - result, msg = secgrp_fixture.verify_on_setup() - assert result, msg - return secgrp_fixture - - def delete_sec_group(self, secgrp_fix): - secgrp_fix.cleanUp() - self.remove_from_cleanups(secgrp_fix) - - def remove_from_cleanups(self, fix): - for cleanup in self._cleanups: - if fix.cleanUp in cleanup: - self._cleanups.remove(cleanup) - break - - def config_policy_and_attach_to_vn(self, rules): - randomname = get_random_name() - policy_name = "sec_grp_policy_" + randomname - policy_fix = self.config_policy(policy_name, rules) - assert policy_fix.verify_on_setup() - policy_vn1_attach_fix = self.attach_policy_to_vn( - policy_fix, self.vn1_fix) - policy_vn2_attach_fix = self.attach_policy_to_vn( - policy_fix, self.vn2_fix) - - def create_sec_group_allow_all(self): - ''' create security group which allows all traffic ''' - - self.sg_allow_all = 'sec_group_allow_all' + '_' + get_random_name() - rule = [{'direction': '<>', - 'protocol': 'any', - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '<>', - 'protocol': 'any', - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}], - }] - self.sg_allow_all_fix = self.config_sec_group(name=self.sg_allow_all, entries=rule) - - return self.sg_allow_all_fix.secgrp_id - - def create_topo_setup(self, - topology_class_name, - topo_method): - - topo = topology_class_name() - try: - eval("topo." + topo_method + "(" + - "project='" + self.project.project_name + - "',username='" + self.project.username + - "',password='" + self.project.password + - "',compute_node_list=" + str(self.inputs.compute_ips) + - ",config_option='" + self.option + - "')") - except (NameError, AttributeError): - eval("topo." + topo_method + "(" + - "compute_node_list='" + self.inputs.compute_ips + - "',config_option='" + self.option + - "')") - - setup_obj = self.useFixture( - sdnTopoSetupFixture(self.connections, topo)) - out = setup_obj.topo_setup(VmToNodeMapping=topo.vm_node_map, - config_option=self.option) - self.logger.info("Setup completed with result %s" % (out['result'])) - self.assertEqual(out['result'], True, out['msg']) - if out['result']: - topo_obj, config_topo = out['data'] - - return (topo_obj, config_topo) - -#end class BaseSGTest - diff --git a/scripts/securitygroup/config.py b/scripts/securitygroup/config.py deleted file mode 100644 index 8a2e3c368..000000000 --- a/scripts/securitygroup/config.py +++ /dev/null @@ -1,33 +0,0 @@ -import time - -import paramiko -import fixtures -from fabric.api import run, hide, settings - -from vn_test import VNFixture -from vm_test import VMFixture -from policy_test import PolicyFixture -from common.policy.config import ConfigPolicy -from common.connections import ContrailConnections -from security_group import SecurityGroupFixture - - -class ConfigSecGroup(ConfigPolicy): - - def config_sec_group(self, name, secgrpid=None, entries=None): - secgrp_fixture = self.useFixture(SecurityGroupFixture(self.inputs, - self.connections, self.inputs.domain_name, self.inputs.project_name, - secgrp_name=name, uuid=secgrpid, secgrp_entries=entries)) - result, msg = secgrp_fixture.verify_on_setup() - assert result, msg - return secgrp_fixture - - def delete_sec_group(self, secgrp_fix): - secgrp_fix.cleanUp() - self.remove_from_cleanups(secgrp_fix) - - def remove_from_cleanups(self, fix): - for cleanup in self._cleanups: - if fix.cleanUp in cleanup: - self._cleanups.remove(cleanup) - break diff --git a/scripts/securitygroup/sdn_sg_test_topo.py b/scripts/securitygroup/sdn_sg_test_topo.py deleted file mode 100644 index 85f3507b5..000000000 --- a/scripts/securitygroup/sdn_sg_test_topo.py +++ /dev/null @@ -1,1032 +0,0 @@ -from vnc_api.vnc_api import * -from tcutils.util import get_random_name - -################################################################################ -class sdn_4vn_xvm_config (): - def __init__(self, domain= 'default-domain', project= 'admin', compute_node_list= None, username= None, password= None,config_option='openstack'): - print "building dynamic topo" - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1','vnet2', 'vnet3', 'vnet4'] - ## - # Define network info for each VN: - if config_option == 'openstack': - self.vn_nets= {'vnet1': ['10.1.1.0/24', '11.1.1.0/24'], 'vnet2': ['10.1.2.0/24', '11.1.2.0/24'], 'vnet3': ['10.1.3.0/24', '11.1.3.0/24'], 'vnet4': ['10.1.4.0/24', '11.1.4.0/24']} - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.1.0', 24)), IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))]))], - 'vnet2': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.2.0', 24)), IpamSubnetType(subnet=SubnetType('11.1.2.0', 24))]))], - 'vnet3': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.3.0', 24)), IpamSubnetType(subnet=SubnetType('11.1.3.0', 24))]))], - 'vnet4': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.4.0', 24)), IpamSubnetType(subnet=SubnetType('11.1.4.0', 24))]))] - } - - ## - # Define network policies - self.policy_list= ['policy0', 'policy1', 'policy100'] - self.vn_policy= {'vnet1': ['policy0'], 'vnet2': ['policy0'],'vnet3':['policy0'],'vnet4':['policy0']} - - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet1', 'vm3': 'vnet1', 'vm4': 'vnet2', 'vm5': 'vnet2', - 'vm6': 'vnet3', 'vm7': 'vnet3', 'vm8': 'vnet3', 'vm9': 'vnet4', 'vm10': 'vnet4','vm11':'vnet4','vm12':'vnet3'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = {'vm1':'CN0', 'vm2':'CN0', 'vm3':'CN1', 'vm4':'CN0', 'vm5':'CN1', - 'vm6':'CN0', 'vm7':'CN0', 'vm8':'CN1', 'vm9':'CN0', 'vm10':'CN1','vm11':'CN0','vm12':'CN1'} - elif len(compute_node_list) > 2: - self.vm_node_map = {'vm1':'CN0', 'vm2':'CN0', 'vm3':'CN2', 'vm4':'CN0', 'vm5':'CN1', 'vm6':'CN0', - 'vm7':'CN0', 'vm8':'CN2', 'vm9':'CN0', 'vm10':'CN1', 'vm11':'CN0','vm12':'CN1'} - - #Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - ## - # Define network policy rules - self.rules= {} - # Multiple policies are defined with different action for the test traffic streams.. - self.policy_test_order= ['policy0', 'policy1', 'policy0'] - if config_option == 'openstack': - self.rules['policy0']= [ - {'direction': '<>', 'protocol': 'any', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules['policy1']= [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'vnet1', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}, - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'vnet2', 'source_network': 'vnet0', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules['policy100']= [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - else: - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', dst_addresses=[AddressType(virtual_network='any')], src_addresses=[AddressType( - virtual_network='any')], dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]) - ] - self.rules['policy1'] = [ - PolicyRuleType(direction='<>', protocol='udp', dst_addresses=[AddressType(virtual_network='vnet1')], src_addresses=[AddressType( - virtual_network='vnet0')], dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]), - PolicyRuleType(direction='<>', protocol='udp', dst_addresses=[AddressType(virtual_network='vnet2')], src_addresses=[AddressType( - virtual_network='vnet0')], dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]) - ] - self.rules['policy100'] = [ - PolicyRuleType(direction='<>', protocol='udp', dst_addresses=[AddressType(virtual_network='any')], src_addresses=[AddressType( - virtual_network='any')], dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]) - ] - - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg_allow_all', 'sg_allow_tcp', 'sg_allow_udp', 'sg_allow_icmp', 'sg_allow_udp_sg'] - self.sg_names = self.sg_list[:] - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - self.sg_of_vm['vm6'] = [self.sg_list[4]]; self.sg_of_vm['vm9'] = [self.sg_list[4]]; self.sg_of_vm['vm10'] = [self.sg_list[4]]; - self.sg_of_vm['vm11'] = [self.sg_list[4]]; self.sg_of_vm['vm12'] = [self.sg_list[4]]; - ##Define the security group rules - import uuid - uuid_1= uuid.uuid1().urn.split(':')[2] - uuid_2= uuid.uuid1().urn.split(':')[2] - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[2]]=[ - {'direction' : '>', - 'protocol' : 'udp', - 'dst_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}], - 'rule_uuid': uuid_1 - },{'direction' : '>', - 'protocol' : 'any', - 'src_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'dst_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}],'rule_uuid': uuid_2}] - - self.sg_rules[self.sg_list[4]]=[ - {'direction' : '>', - 'protocol' : 'udp', - 'dst_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_addresses': [{'security_group': self.domain + ':'+ self.project+ ':'+ self.sg_list[4]}], - 'rule_uuid': uuid_1 - },{'direction' : '>', - 'protocol' : 'any', - 'src_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'dst_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}],'rule_uuid': uuid_2}] - - ## - # Define traffic profile. - self.traffic_profile= [{'src_vm':'vm1', 'dst_vm':'vm2', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'},# intra VN, intra compute, same default SG - {'src_vm':'vm1', 'dst_vm':'vm3', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'},# intra VN, inter compute, same default SG - {'src_vm':'vm1', 'dst_vm':'vm5', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'},# inter VN, inter compute, same default SG - {'src_vm':'vm1', 'dst_vm':'vm4', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'},# inter VN, intra compute, same default SG - {'src_vm':'vm6', 'dst_vm':'vm7', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'},# intra VN, intra compute, diff. SG - {'src_vm':'vm6', 'dst_vm':'vm8', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'},# intra VN, inter compute, diff. SG - {'src_vm':'vm6', 'dst_vm':'vm5', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'},# inter VN, inter compute, diff. SG - {'src_vm':'vm6', 'dst_vm':'vm4', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'},# inter VN, intra compute, diff. SG - {'src_vm':'vm9', 'dst_vm':'vm11','proto':'udp','sport':8000,'dport':9000,'exp':'pass'},# intra VN, intra compute, same non-default SG - {'src_vm':'vm9', 'dst_vm':'vm10','proto':'udp','sport':8000,'dport':9000,'exp':'pass'},# intra VN, inter compute, same non-default SG - {'src_vm':'vm9', 'dst_vm':'vm12','proto':'udp','sport':8000,'dport':9000,'exp':'pass'},# inter VN, inter compute, same non-default SG - {'src_vm':'vm9', 'dst_vm':'vm6', 'proto':'udp','sport':8000,'dport':9000,'exp':'pass'}]# inter VN, intra compute, same non-default SG - - # end __init__ -# end class sdn_4vn_xvm_config - -################################################################################ -class sdn_topo_config (): - #2 VN and 4 VM - def build_topo_sg_stateful(self, domain= 'default-domain', project= 'admin', compute_node_list= None, username= None, password= None,config_option='openstack'): - print "building dynamic topo" - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1','vnet2'] - ## - # Define network info for each VN: - if config_option == 'openstack': - self.vn_nets= {'vnet1': ['10.1.1.0/24', '11.1.1.0/24'], 'vnet2': ['10.1.2.0/24', '11.1.2.0/24']} - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.1.0', 24)), IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))]))], - 'vnet2': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.2.0', 24)), IpamSubnetType(subnet=SubnetType('11.1.2.0', 24))]))] - } - - ## - # Define network policies - self.policy_list= ['policy0'] - self.vn_policy= {'vnet1': ['policy0'], 'vnet2': ['policy0']} - - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet1', 'vm3': 'vnet2', 'vm4': 'vnet2'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - - ## - # Define network policy rules - self.rules= {} - # Multiple policies are defined with different action for the test traffic streams.. - self.policy_test_order= ['policy0'] - if config_option == 'openstack': - self.rules['policy0']= [ - {'direction': '<>', 'protocol': 'any', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - else: - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', dst_addresses=[AddressType(virtual_network='any')], src_addresses=[AddressType( - virtual_network='any')], dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]) - ] - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg1_ingress', 'sg2_ingress', 'sg1_egress', 'sg2_egress'] - self.sg_names = self.sg_list[:] - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - self.sg_of_vm['vm1'] = [self.sg_list[0]]; self.sg_of_vm['vm2'] = [self.sg_list[2]]; self.sg_of_vm['vm3'] = [self.sg_list[1]]; - self.sg_of_vm['vm4'] = [self.sg_list[3]]; - ##Define the security group rules - '''import uuid - uuid_1= uuid.uuid1().urn.split(':')[2] - uuid_2= uuid.uuid1().urn.split(':')[2]''' - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[0]]=[ - {'direction' : '>', - 'protocol' : 'udp', - 'dst_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}]}] - - self.sg_rules[self.sg_list[1]]=[ - {'direction' : '>', - 'protocol' : 'udp', - 'dst_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}]}] - - self.sg_rules[self.sg_list[2]]=[ - {'direction' : '>', - 'protocol' : 'udp', - 'src_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'dst_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}]}] - - self.sg_rules[self.sg_list[3]]=[ - {'direction' : '>', - 'protocol' : 'udp', - 'src_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'dst_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}]}] - - - ## - # Define traffic profile. - self.traffic_profile= [{'src_vm':'vm1', 'dst_vm':'vm2', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':'vm2', 'dst_vm':'vm1', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'}, - {'src_vm':'vm1', 'dst_vm':'vm3', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':'vm3', 'dst_vm':'vm1', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':'vm1', 'dst_vm':'vm4', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':'vm4', 'dst_vm':'vm1', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'}, - {'src_vm':'vm2', 'dst_vm':'vm4', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':'vm4', 'dst_vm':'vm2', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'} - ] - - # end build_topo_sg_stateful -# end class sdn_topo_config -################################################################################ - -class sdn_topo_config_multiproject(): - - def __init__(self, domain= 'default-domain', project= 'admin', username= None, password= None): - print "building dynamic topo" - project1 = 'project1' - project2 = 'admin' - self.project_list = [project1, project2] - self.topo_of_project = {self.project_list[0]:'build_topo1', self.project_list[1]:'build_topo1'} - self.user_of_project = {self.project_list[0]:'user1', self.project_list[1]:'user2'} - self.pass_of_project = {self.project_list[0]:'user123', self.project_list[1]:'user223'} - - ## - # Define traffic profile. - self.traffic_profile= [{'src_vm':[project1,'vm1'], 'dst_vm':[project2,'vm1'], 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'}, - {'src_vm':[project1,'vm2'], 'dst_vm':[project2,'vm2'], 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':[project1,'vm1'], 'dst_vm':[project2,'vm2'], 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':[project1,'vm2'], 'dst_vm':[project2,'vm1'], 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'}, - {'src_vm':[project2,'vm1'], 'dst_vm':[project1,'vm1'], 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'}, - {'src_vm':[project2,'vm2'], 'dst_vm':[project1,'vm2'], 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':[project2,'vm2'], 'dst_vm':[project1,'vm1'], 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'pass'}, - {'src_vm':[project2,'vm1'], 'dst_vm':[project1,'vm2'], 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'} - ] - - - def build_topo1(self, domain= 'default-domain', project= 'admin', username= None, password= None,config_option='openstack'): - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1'] - ## - # Define network info for each VN: - if config_option == 'openstack': - if self.project == self.project_list[1]: - self.vn_nets= {'vnet1': ['11.1.1.0/24', '12.1.1.0/24']} - else: - self.vn_nets= {'vnet1': ['11.2.1.0/24', '12.2.1.0/24']} - else: - if self.project == self.project_list[1]: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('11.1.1.0', 24)), IpamSubnetType(subnet=SubnetType('12.1.1.0', 24))]))] - } - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('11.2.1.0', 24)), IpamSubnetType(subnet=SubnetType('12.2.1.0', 24))]))] - } - - ## - # Define network policies - self.policy_list= ['policy0'] - self.vn_policy= {'vnet1': ['policy0']} - - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet1'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - - ## - # Define network policy rules - self.rules= {} - # Multiple policies are defined with different action for the test traffic streams.. - self.policy_test_order= ['policy0'] - if config_option == 'openstack': - self.rules['policy0']= [ - {'direction': '<>', 'protocol': 'any', 'dest_network': ':'.join([self.domain,self.project_list[0],self.vnet_list[0]]), 'source_network': ':'.join([self.domain,self.project_list[1],self.vnet_list[0]]), 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - else: - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', - dst_addresses=[AddressType(virtual_network=':'.join([self.domain,self.project_list[0],self.vnet_list[0]]))], - src_addresses=[AddressType(virtual_network=':'.join([self.domain,self.project_list[1],self.vnet_list[0]]))], - dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]) - ] - - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg1'] - self.sg_names = self.sg_list[:] - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - self.sg_of_vm['vm1'] = [self.sg_list[0]] - ##Define the security group rules - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[0]]=[ - {'direction' : '>', - 'protocol' : 'udp', - 'dst_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}]}, - {'direction' : '>', - 'protocol' : 'udp', - 'src_addresses': [{'security_group': 'local', 'subnet' : None}], - 'dst_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'src_ports': [{'start_port' : 0, 'end_port' : 65535}], - 'dst_addresses': [{'subnet' : {'ip_prefix' : '0.0.0.0', 'ip_prefix_len' : 0}}]}] - - return self - # end build_topo1 -# end class sdn_topo_config_multiproject -################################################################################ - -class sdn_topo_1vn_2vm_config (): - def build_topo(self, domain= 'default-domain', project= 'admin', username= None, password= None,config_option='openstack'): - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1'] - ## - # Define network info for each VN: - if config_option == 'openstack': - self.vn_nets= {'vnet1': ['10.1.1.0/24', '11.1.1.0/24']} - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.1.0', 24)), IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))]))] - } - - ## - # Define network policies - self.policy_list= ['policy0'] - self.vn_policy= {'vnet1': ['policy0']} - - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet1'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - - ## - # Define network policy rules - self.rules= {} - self.policy_test_order= ['policy0'] - if config_option == 'openstack': - self.rules['policy0']= [ - {'direction': '<>', 'protocol': 'any', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - else: - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', dst_addresses=[AddressType(virtual_network='any')], src_addresses=[AddressType( - virtual_network='any')], dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]) - ] - - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg1'] - self.sg_names = self.sg_list[:] - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - self.sg_of_vm['vm1'] = [self.sg_list[0]]; self.sg_of_vm['vm2'] = [self.sg_list[0]]; - ##Define the security group rules - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[0]]=[] - - ## - # Define traffic profile. - self.traffic_profile= [{'src_vm':'vm1', 'dst_vm':'vm2', 'proto':'udp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':'vm1', 'dst_vm':'vm2', 'proto':'tcp', 'sport':8000, 'dport':9000, 'exp':'fail'}, - {'src_vm':'vm1', 'dst_vm':'vm2', 'proto':'icmp', 'sport':8000, 'dport':9000, 'exp':'fail'} - ] - - return self - # end build_topo1 - - -################################################################################ -class sdn_topo_icmp_error_handling(): - #2 VN and 3 VM - def build_topo(self, domain= 'default-domain', project= 'admin', compute_node_list= None, username= None, password= None,config_option='openstack'): - print "building dynamic topo" - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1','vnet2'] - ## - # Define network info for each VN: - if config_option == 'openstack': - self.vn_nets= {'vnet1': ['10.1.1.0/24'], 'vnet2': ['11.1.1.0/24']} - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.1.0', 24))]))], - 'vnet2': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))]))] - } - - ## - # Define network policies - self.policy_list= ['policy0'] - self.vn_policy= {'vnet1': ['policy0'], 'vnet2': ['policy0']} - - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet1', 'vm3': 'vnet2'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - - ## - # Define network policy rules - self.rules= {} - # Multiple policies are defined with different action for the test traffic streams.. - self.policy_test_order= ['policy0'] - if config_option == 'openstack': - self.rules['policy0']= [ - {'direction': '<>', 'protocol': 'any', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - else: - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', dst_addresses=[AddressType(virtual_network='any')], src_addresses=[AddressType( - virtual_network='any')], dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]) - ] - - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg1'] - self.sg_names = self.sg_list[:] - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - self.sg_of_vm['vm1'] = [self.sg_list[0]]; self.sg_of_vm['vm2'] = [self.sg_list[0]]; self.sg_of_vm['vm3'] = [self.sg_list[0]]; - ##Define the security group rules - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[0]] = [ - {'direction': '>', - 'protocol': 'any', - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '>', - 'protocol': 'udp', - 'src_addresses':[{'security_group': self.domain + ':'+ self.project+ ':'+ self.sg_list[0]}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}],}] - - return self - # end build_topo - - #1VN 2 VM - def build_topo2(self, domain= 'default-domain', project= 'admin', compute_node_list= None, username= None, password= None,config_option='openstack'): - print "building dynamic topo" - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1'] - ## - # Define network info for each VN: - if config_option == 'openstack': - self.vn_nets= {'vnet1': ['10.1.1.0/24']} - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType([IpamSubnetType(subnet=SubnetType('10.1.1.0', 24))]))] - } - - ## - # Define network policies - self.policy_list= ['policy0'] - self.vn_policy= {'vnet1': ['policy0']} - - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet1'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = {'vm1':'CN0', 'vm2':'CN1'} - elif len(compute_node_list) > 2: - self.vm_node_map = {'vm1':'CN0', 'vm2':'CN1'} - - #Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - ## - # Define network policy rules - self.rules= {} - # Multiple policies are defined with different action for the test traffic streams.. - self.policy_test_order= ['policy0'] - if config_option == 'openstack': - self.rules['policy0']= [ - {'direction': '<>', 'protocol': 'any', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - else: - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', dst_addresses=[AddressType(virtual_network='any')], src_addresses=[AddressType( - virtual_network='any')], dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), src_ports=[PortType(-1, -1)]) - ] - - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg1', 'sg-ingress'] - self.sg_names = self.sg_list[:] - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - self.sg_of_vm['vm1'] = [self.sg_list[0]] - self.sg_of_vm['vm2'] = [self.sg_list[1]] - ##Define the security group rules - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[0]] = [ - {'direction': '>', - 'protocol': 'udp', - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }] - - self.sg_rules[self.sg_list[1]] = [ - {'direction': '>', - 'protocol': 'udp', - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}], - }] - - return self - # end build_topo2 - -# end class sdn_topo_icmp_error_handling - -class sdn_topo_mx_with_si(): - def build_topo(self, domain= 'default-domain', project= 'admin', - compute_node_list= None, username= None, - password= None, public_vn_info=None,config_option='openstack'): - print "building dynamic topo" - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1','public'] - ## - # Define network info for each VN: - if config_option == 'openstack': - self.vn_nets= {'vnet1': ['9.9.9.0/24'], 'public': public_vn_info['subnet']} - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), - VnSubnetsType([ - IpamSubnetType( - subnet=SubnetType( - '9.9.9.0', - 24)) - ]) - )], - 'public': [(NetworkIpam(), - VnSubnetsType([ - IpamSubnetType( - subnet=SubnetType( - public_vn_info['subnet'][0].split('/')[0], - int(public_vn_info['subnet'][0].split('/')[1]) - ) - )]) - )] - } - - - #Define diff. VN params - self.vn_params = {self.vnet_list[0]:{'router_asn':public_vn_info['router_asn'], - 'rt_number':public_vn_info['rt_number'] - } - } - - # define service templates - self.st_list = ['st_trans_firewall'] - self.st_params = {self.st_list[0]: {'svc_img_name': 'vsrx-bridge', 'svc_type': 'firewall', 'if_list': - [['management', False, False], ['left', False, False], - ['right', False, False]], 'svc_mode': 'transparent', - 'svc_scaling': False, 'flavor': 'm1.medium', - 'ordered_interfaces': True - }} - - # define service instance - self.si_list = ['si_trans_firewall'] - self.si_params = { - self.si_list[0]: {'if_list': [['management', False, False], ['left', False, False], - ['right', False, False]], 'svc_template': self.st_list[0], - 'left_vn': None, 'right_vn': None - }} - - # - # Define network policies - self.policy_list= ['policy0', 'pol-si'] - self.vn_policy= {self.vnet_list[0]: ['policy0'], self.vnet_list[1]: ['policy0']} - - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'public'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - - ## - # Define network policy rules - self.rules= {} - self.policy_test_order= ['policy0'] - if config_option == 'openstack': - self.rules['pol-si']= [{'direction': '<>', 'protocol': 'any', 'dest_network': self.vnet_list[0], - 'source_network': self.vnet_list[1], 'dst_ports': 'any', - 'simple_action': 'pass', 'src_ports': 'any', - 'action_list': {'simple_action':'pass', 'apply_service': [':'.join([self.domain, - self.project, - self.si_list[0]]) - ]} - }] - - self.rules['policy0']= [{'direction': '<>', 'protocol': 'any', 'dest_network': self.vnet_list[0], - 'source_network': self.vnet_list[1], 'dst_ports': 'any', - 'simple_action': 'pass', 'src_ports': 'any' - }] - else: - self.rules['pol-si'] = [ - PolicyRuleType(direction='<>', protocol='any', - dst_addresses=[AddressType(virtual_network=':'.join([self.domain,self.project,self.vnet_list[0]]))], - src_addresses=[AddressType(virtual_network=':'.join([self.domain,self.project,self.vnet_list[1]]))], - dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass', - apply_service=[':'.join([self.domain, self.project, self.si_list[0]])]), - src_ports=[PortType(-1, -1)]) - ] - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', - dst_addresses=[AddressType(virtual_network=':'.join([self.domain,self.project,self.vnet_list[0]]))], - src_addresses=[AddressType(virtual_network=':'.join([self.domain,self.project,self.vnet_list[1]]))], - dst_ports=[PortType(-1, -1)], action_list=ActionListType(simple_action='pass'), - src_ports=[PortType(-1, -1)]) - ] - - - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg1'] - self.sg_names = self.sg_list[:] - - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - self.sg_of_vm['vm1'] = [self.sg_list[0]]; self.sg_of_vm['vm2'] = [self.sg_list[0]] - ##Define the security group rules - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[0]] = [ - {'direction': '>', - 'protocol': 'udp', - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '>', - 'protocol': 'udp', - 'src_addresses':[{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}],}] - - return self - # end build_topo - -################################################################################ -class sdn_topo_flow_to_sg_rule_mapping(): - #2 VN and 2 VM - def build_topo(self, domain= 'default-domain', project= 'admin', - compute_node_list= None, username= None, - password= None,no_of_vm=2, - config_option='openstack'): - #no_of_vm must be 2 or 3 - print "building dynamic topo" - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1','vnet2'] - ## - # Define network info for each VN: - if config_option == 'openstack': - self.vn_nets= {'vnet1': ['10.1.1.0/24'], 'vnet2': ['11.1.1.0/24']} - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType( - [IpamSubnetType( - subnet=SubnetType( - '10.1.1.0', - 24))]))], - 'vnet2': [(NetworkIpam(), VnSubnetsType( - [IpamSubnetType( - subnet=SubnetType( - '11.1.1.0', - 24))]))] - } - - ## - # Define network policies - self.policy_list= ['policy0'] - self.vn_policy= {'vnet1': ['policy0'], 'vnet2': ['policy0']} - - if no_of_vm == 3: - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet1', 'vm3': 'vnet2'} - if no_of_vm == 2: - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet2'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = {'vm1':'CN0', 'vm2':'CN1'} - elif len(compute_node_list) > 2: - self.vm_node_map = {'vm1':'CN0', 'vm2':'CN1'} - if no_of_vm == 3:self.vm_node_map['vm3'] = 'CN0' - - #Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - ## - # Define network policy rules - self.rules= {} - # Multiple policies are defined with different action for the test traffic streams.. - self.policy_test_order= ['policy0'] - if config_option == 'openstack': - self.rules['policy0']= [ - {'direction': '<>', 'protocol': 'any', 'dest_network': 'any', - 'source_network': 'any', 'dst_ports': 'any', - 'simple_action': 'pass', 'src_ports': 'any'}] - else: - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', - dst_addresses=[AddressType(virtual_network='any')], - src_addresses=[AddressType(virtual_network='any')], - dst_ports=[PortType(-1, -1)], - action_list=ActionListType(simple_action='pass'), - src_ports=[PortType(-1, -1)]) - ] - - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg1'] - self.sg_names = self.sg_list[:] - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - ##Define the security group rules - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[0]] = [ - {'direction': '>', - 'protocol': 'udp', - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '>', - 'protocol': 'udp', - 'src_addresses':[{'security_group': self.domain + ':'+ self.project+ ':'+ self.sg_list[0]}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}],}] - - return self - # end build_topo - - def build_topo2(self, domain= 'default-domain', project= 'admin', - compute_node_list= None, username= None, - password= None,no_of_vm=2, - config_option='openstack'): - #no_of_vm must be 2 or 3 - print "building dynamic topo" - ## - # Domain and project defaults: Do not change until support for non-default is tested! - self.domain= domain; self.project= project; self.username= username; self.password= password - ## - # Define VN's in the project: - self.vnet_list= ['vnet1','vnet2'] - ## - # Define network info for each VN: - if config_option == 'openstack': - self.vn_nets= {'vnet1': ['10.1.1.0/24'], 'vnet2': ['11.1.1.0/24']} - else: - self.vn_nets = { - 'vnet1': [(NetworkIpam(), VnSubnetsType( - [IpamSubnetType( - subnet=SubnetType( - '10.1.1.0', - 24))]))], - 'vnet2': [(NetworkIpam(), VnSubnetsType( - [IpamSubnetType( - subnet=SubnetType( - '11.1.1.0', - 24))]))] - } - - ## - # Define network policies - self.policy_list= ['policy0'] - self.vn_policy= {'vnet1': ['policy0'], 'vnet2': ['policy0']} - - if no_of_vm == 3: - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet1', 'vm3': 'vnet2'} - if no_of_vm == 2: - self.vn_of_vm= {'vm1': 'vnet1', 'vm2': 'vnet2'} - - #Define the vm to compute node mapping to pin a vm to a particular - #compute node or else leave empty. - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = {'vm1':'CN0', 'vm2':'CN1'} - elif len(compute_node_list) > 2: - self.vm_node_map = {'vm1':'CN0', 'vm2':'CN1'} - if no_of_vm == 3:self.vm_node_map['vm3'] = 'CN0' - #Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - ## - # Define network policy rules - self.rules= {} - # Multiple policies are defined with different action for the test traffic streams.. - self.policy_test_order= ['policy0'] - if config_option == 'openstack': - self.rules['policy0']= [ - {'direction': '<>', 'protocol': 'any', 'dest_network': 'any', - 'source_network': 'any', 'dst_ports': 'any', - 'simple_action': 'pass', 'src_ports': 'any'}] - else: - self.rules['policy0'] = [ - PolicyRuleType(direction='<>', protocol='any', - dst_addresses=[AddressType(virtual_network='any')], - src_addresses=[AddressType(virtual_network='any')], - dst_ports=[PortType(-1, -1)], - action_list=ActionListType(simple_action='pass'), - src_ports=[PortType(-1, -1)]) - ] - - #Define the security_group and its rules - # Define security_group name - self.sg_list=['sg1', 'sg2'] - self.sg_names = self.sg_list[:] - ## - #Define security_group with vm - self.sg_of_vm = {} - for key in self.vn_of_vm: - self.sg_of_vm[key] = [] - self.sg_of_vm['vm1'] = [self.sg_list[0], self.sg_list[1]]; self.sg_of_vm['vm2'] = [self.sg_list[0], self.sg_list[1]] - ##Define the security group rules - self.sg_rules={} - for sg in self.sg_list: - self.sg_rules[sg] = [] - self.sg_rules[self.sg_list[0]] = [ - {'direction': '>', - 'protocol': 'udp', - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '>', - 'protocol': 'udp', - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}],}, - {'direction': '>', - 'protocol': 'icmp', - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '>', - 'protocol': 'icmp', - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}],}] - - self.sg_rules[self.sg_list[1]] = [ - {'direction': '>', - 'protocol': 'tcp', - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '>', - 'protocol': 'tcp', - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}],}] - - return self - # end build_topo2 - diff --git a/scripts/securitygroup/setup.py b/scripts/securitygroup/setup.py deleted file mode 100644 index 019ad9b82..000000000 --- a/scripts/securitygroup/setup.py +++ /dev/null @@ -1,179 +0,0 @@ -import os - -import fixtures -from testresources import TestResource - -from policy_test import PolicyFixture -from vn_test import MultipleVNFixture -from vm_test import MultipleVMFixture -from common.connections import ContrailConnections -from securitygroup.config import ConfigSecGroup -from common.contrail_test_init import ContrailTestInit - - -class SecurityGroupSetup(fixtures.Fixture, ConfigSecGroup): - - """Common resources required for the security group regression test suite. - """ - - def __init__(self, common_resource): - super(SecurityGroupSetup, self).__init__() - self.common_resource = common_resource - - def setUp(self): - super(SecurityGroupSetup, self).setUp() - if 'PARAMS_FILE' in os.environ: - self.ini_file = os.environ.get('PARAMS_FILE') - else: - self.ini_file = 'params.ini' - self.inputs = ContrailTestInit(self.ini_file) - self.connections = ContrailConnections(self.inputs) - self.quantum_h = self.connections.quantum_h - self.nova_h = self.connections.nova_h - self.vnc_lib = self.connections.vnc_lib - self.logger = self.inputs.logger - - self.logger.info("Configuring setup for security group tests.") - self.setup() - self.logger.info("Verifying setup of security group tests.") - self.verify() - self.logger.info( - "Finished configuring setup for security group tests.") - return self - - def setup(self): - """Config common resources.""" - vn_s = {'vn1': '20.1.1.0/24', 'vn2': ['10.1.1.0/24']} - self.multi_vn_fixture = self.useFixture(MultipleVNFixture( - connections=self.connections, inputs=self.inputs, subnet_count=2, - vn_name_net=vn_s, project_name=self.inputs.project_name)) - vns = self.multi_vn_fixture.get_all_fixture_obj() - (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0] - (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1] - - self.logger.info("Configure security groups required for test.") - self.config_sec_groups() - - self.multi_vm_fixture = self.useFixture(MultipleVMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vm_count_per_vn=3, vn_objs=vns, image_name='ubuntu-traffic', - flavor='contrail_flavor_small')) - vms = self.multi_vm_fixture.get_all_fixture() - (self.vm1_name, self.vm1_fix) = vms[0] - (self.vm2_name, self.vm2_fix) = vms[1] - (self.vm3_name, self.vm3_fix) = vms[2] - (self.vm4_name, self.vm4_fix) = vms[3] - (self.vm5_name, self.vm5_fix) = vms[4] - (self.vm6_name, self.vm6_fix) = vms[5] - - self.logger.info("Adding the sec groups to the VM's") - self.vm1_fix.add_security_group(secgrp=self.sg1_name) - self.vm1_fix.add_security_group(secgrp=self.sg2_name) - self.vm2_fix.add_security_group(secgrp=self.sg2_name) - self.vm4_fix.add_security_group(secgrp=self.sg1_name) - self.vm4_fix.add_security_group(secgrp=self.sg2_name) - self.vm5_fix.add_security_group(secgrp=self.sg1_name) - - self.logger.info("Remove the default sec group form the VM's") - self.vm1_fix.remove_security_group(secgrp='default') - self.vm2_fix.remove_security_group(secgrp='default') - self.vm4_fix.remove_security_group(secgrp='default') - self.vm5_fix.remove_security_group(secgrp='default') - - def config_sec_groups(self): - self.sg1_name = 'test_tcp_sec_group' - rule = [{'direction': '<>', - 'protocol': 'tcp', - 'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}, - {'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '<>', - 'protocol': 'tcp', - 'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}, - {'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}], - }] - - self.sg1_fix = self.config_sec_group(name=self.sg1_name, entries=rule) - - self.sg2_name = 'test_udp_sec_group' - rule = [{'direction': '<>', - 'protocol': 'udp', - 'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}, - {'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'src_addresses': [{'security_group': 'local'}], - }, - {'direction': '<>', - 'protocol': 'udp', - 'src_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}, - {'subnet': {'ip_prefix': '20.1.1.0', 'ip_prefix_len': 24}}], - 'src_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_ports': [{'start_port': 0, 'end_port': -1}], - 'dst_addresses': [{'security_group': 'local'}], - }] - self.sg2_fix = self.config_sec_group(name=self.sg2_name, entries=rule) - - def verify(self): - """verfiy common resources.""" - self.logger.debug("Verify the configured VN's.") - assert self.multi_vn_fixture.verify_on_setup() - - self.logger.debug("Verify the configured VM's.") - assert self.multi_vm_fixture.verify_on_setup() - - self.logger.info("Installing traffic package in VM.") - self.vm1_fix.install_pkg("Traffic") - self.vm2_fix.install_pkg("Traffic") - self.vm3_fix.install_pkg("Traffic") - self.vm4_fix.install_pkg("Traffic") - self.vm5_fix.install_pkg("Traffic") - self.vm6_fix.install_pkg("Traffic") - - self.logger.debug("Verify the configured security groups.") - result, msg = self.sg1_fix.verify_on_setup() - assert result, msg - result, msg = self.sg2_fix.verify_on_setup() - assert result, msg - - self.logger.debug("Verify the attached security groups in the VM.") - result, msg = self.vm1_fix.verify_security_group(self.sg1_name) - assert result, msg - result, msg = self.vm1_fix.verify_security_group(self.sg2_name) - assert result, msg - result, msg = self.vm2_fix.verify_security_group(self.sg2_name) - assert result, msg - result, msg = self.vm4_fix.verify_security_group(self.sg1_name) - assert result, msg - result, msg = self.vm4_fix.verify_security_group(self.sg2_name) - assert result, msg - result, msg = self.vm5_fix.verify_security_group(self.sg2_name) - assert result, msg - - def tearDown(self): - self.logger.info("Tearing down resources of security group tests") - super(SecurityGroupSetup, self).cleanUp() - - def dirtied(self): - self.common_resource.dirtied(self) - - -class _SecurityGroupSetupResource(TestResource): - - def make(self, dependencyresource): - base_setup = SecurityGroupSetup(self) - base_setup.setUp() - return base_setup - - def clean(self, base_setup): - base_setup.logger.info( - "Cleaning up security group test resources here") - base_setup.tearDown() - -SecurityGroupSetupResource = _SecurityGroupSetupResource() diff --git a/scripts/securitygroup/test_regression.py b/scripts/securitygroup/test_regression.py index 0e51591dd..574e5de1f 100644 --- a/scripts/securitygroup/test_regression.py +++ b/scripts/securitygroup/test_regression.py @@ -22,116 +22,7 @@ from tcutils.util import get_random_name from base_traffic import * from tcutils.util import skip_because - -class SecurityGroupRegressionTests1(BaseSGTest, VerifySecGroup, ConfigPolicy): - - @classmethod - def setUpClass(cls): - super(SecurityGroupRegressionTests1, cls).setUpClass() - cls.option = 'openstack' - - def runTest(self): - pass - - @test.attr(type=['sanity','ci_sanity']) - @preposttest_wrapper - def test_sec_group_add_delete(self): - """ - Description: Verify security group add delete - Steps: - 1. Create custom security group with rule in it - 2. Delete custom security group - Pass criteria: Step 1 and 2 should pass - """ - rule = [{'direction': '>', - 'protocol': 'tcp', - 'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}], - 'dst_ports': [{'start_port': 8000, 'end_port': 8000}], - 'src_ports': [{'start_port': 9000, 'end_port': 9000}], - 'src_addresses': [{'security_group': 'local'}], - }] - secgrp_fix = self.config_sec_group(name='test_sec_group', entries=rule) - self.delete_sec_group(secgrp_fix) - return True - - @test.attr(type=['sanity','ci_sanity','vcenter']) - @preposttest_wrapper - def test_vm_with_sec_group(self): - """ - Description: Verify attach dettach security group in VM - Steps: - 1. Create VN with subnet - 2. Create security group with custom rules - 3. Launch VM in custom created security group and verify - 4. Remove secuity group association with VM - 5. Add back custom security group to VM and verify - 6. Try to delete security group with association to VM. It should fail. - Pass criteria: Step 2,3,4,5 and 6 should pass - """ - vn_name = "test_sec_vn" - vn_net = ['11.1.1.0/24'] - vn = self.useFixture(VNFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_name=vn_name, inputs=self.inputs, subnets=vn_net)) - assert vn.verify_on_setup() - - secgrp_name = 'test_sec_group' + '_' + get_random_name() - rule = [{'direction': '>', - 'protocol': 'tcp', - 'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}], - 'dst_ports': [{'start_port': 8000, 'end_port': 8000}], - 'src_ports': [{'start_port': 9000, 'end_port': 9000}], - 'src_addresses': [{'security_group': 'local'}], - }] - secgrp = self.config_sec_group(name=secgrp_name, entries=rule) - secgrp_id = secgrp.secgrp_id - vm_name = "test_sec_vm" - img_name = os.environ['ci_image'] if os.environ.has_key('ci_image') else 'ubuntu-traffic' - vm = self.useFixture(VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=vn.obj, vm_name=vm_name, image_name=img_name, flavor='contrail_flavor_small', - sg_ids=[secgrp_id])) - assert vm.verify_on_setup() - assert vm.wait_till_vm_is_up() - result, msg = vm.verify_security_group(secgrp_name) - assert result, msg - - self.logger.info("Remove security group %s from VM %s", - secgrp_name, vm_name) - vm.remove_security_group(secgrp=secgrp_id) - result, msg = vm.verify_security_group(secgrp_name) - if result: - assert False, "Security group %s is not removed from VM %s" % (secgrp_name, - vm_name) - - import time - time.sleep(4) - vm.add_security_group(secgrp=secgrp_name) - result, msg = vm.verify_security_group(secgrp_name) - assert result, msg - - self.logger.info( - "Try deleting the security group %s with back ref.", secgrp_name) - try: - if secgrp.option == 'openstack': - secgrp.quantum_h.delete_security_group(secgrp.secgrp_id) - else: - secgrp.secgrp_fix.cleanUp() - except Exception, msg: - self.logger.info(msg) - self.logger.info( - "Not able to delete the security group with back ref as expected") - else: - try: - secgroup = self.vnc_lib.security_group_read( - fq_name=secgrp.secgrp_fq_name) - self.logger.info( - "Not able to delete the security group with back ref as expected") - except NoIdError: - errmsg = "Security group deleted, when it is attached to a VM." - self.logger.error(errmsg) - assert False, errmsg - return True +import test_regression_basic class SecurityGroupRegressionTests2(BaseSGTest, VerifySecGroup, ConfigPolicy): @@ -1099,6 +990,7 @@ def test_icmp_error_handling2(self): vm2_name = 'dest_vm' #vm1_fixture = self.config_vm(vn1_fixture, vm1_name) #vm2_fixture = self.config_vm(vn2_fixture, vm2_name) + self.inputs.set_af('dual') vm1_fixture = self.useFixture(VMFixture( project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn1_fixture.obj, vm_name=vm1_name, node_name=None, @@ -1140,10 +1032,8 @@ def test_icmp_error_handling2(self): cmd_ping = ('ping -M want -s 2500 -c 10 %s | grep \"Frag needed and DF set\"' % (dst_vm_fix.vm_ip)) # cmd_tcpdump = 'tcpdump -vvv -c 5 -ni eth0 -v icmp > /tmp/op1.log' - gw = src_vm_fix.vm_ip - gw = gw.split('.') - gw[-1] = '1' - gw = '.'.join(gw) + output = src_vm_fix.run_cmd_on_vm(cmds=['''netstat -anr |grep ^0.0.0.0 | awk '{ print $2 }' '''], as_sudo=True) + gw = output.values()[0].split('\r\n')[-1] filters = 'icmp' session, pcap = start_tcpdump_for_vm_intf(self, src_vm_fix, src_vn_fq_name, filters = filters) cmds = ['ifconfig eth0 mtu 3000', cmd_ping, @@ -1166,13 +1056,7 @@ def test_icmp_error_handling2(self): self.logger.info("increasing MTU on src VM and ping6 with bigger size and reverting MTU") cmd_ping = 'ping6 -s 2500 -c 10 %s | grep \"Packet too big\"' % (vm2_fixture.vm_ip) - if self.option == 'openstack': - src_vn_fq_name = vn1_fixture.vn_fq_name -# dst_vn_fq_name = vn2_fixture.vn_fq_name - else: - src_vn_fq_name = ':'.join(vn1_fixture._obj.get_fq_name()) -# dst_vn_fq_name = ':'.join(vn2_fixture._obj.get_fq_name()) -# cmd_tcpdump = 'tcpdump -vvv -c 5 -ni eth0 -v icmp6 > /tmp/op.log' + src_vn_fq_name = vn1_fixture.vn_fq_name gw = vm1_fixture.vm_ip gw = gw.split(':') gw[-1] = '1' @@ -2565,10 +2449,10 @@ def test_syn_ack_create_flow(self): #creating new classes to run all tests with contrail apis -class SecurityGroupRegressionTests1_contrail(SecurityGroupRegressionTests1): +class SecurityGroupBasicRegressionTests1_contrail(test_regression_basic.SecurityGroupBasicRegressionTests1): @classmethod def setUpClass(cls): - super(SecurityGroupRegressionTests1, cls).setUpClass() + super(SecurityGroupBasicRegressionTests1_contrail, cls).setUpClass() cls.option = 'contrail' class SecurityGroupRegressionTests2_contrail(SecurityGroupRegressionTests2): @classmethod diff --git a/scripts/securitygroup/verify.py b/scripts/securitygroup/verify.py deleted file mode 100644 index 47a3f2f96..000000000 --- a/scripts/securitygroup/verify.py +++ /dev/null @@ -1,636 +0,0 @@ -import os -import sys -from time import sleep -from tcutils.util import retry -sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) -from traffic.core.stream import Stream -from traffic.core.helpers import Host, Sender, Receiver -from traffic.core.profile import StandardProfile,\ - ContinuousProfile -from tcutils.util import get_random_name -sys.path.append(os.path.realpath('tcutils/traffic_utils')) -from base_traffic import * -from security_group import list_sg_rules -from tcutils.tcpdump_utils import * - -class VerifySecGroup(): - - def verify_traffic(self, sender_vm, receiver_vm, proto, sport, dport, count=None, fip=None): - - traffic_obj = BaseTraffic.factory(proto=proto) - assert traffic_obj - assert traffic_obj.start(sender_vm, receiver_vm, - proto, sport, dport, pkt_count=count) - sleep(1) - sent, recv = traffic_obj.stop() - - return (sent, recv) - - def assert_traffic(self, sender, receiver, proto, sport, dport, - expectation='pass'): - self.logger.info("Sending %s traffic from %s with %s to %s with %s" % - (proto, sender[0].vm_name, sender[1], receiver[0].vm_name, receiver[1])) - sent, recv = self.verify_traffic(sender[0], receiver[0], - proto, sport, dport) - if expectation == 'pass': - msg = "%s traffic from %s with %s to %s with %s passed " % (proto, - sender[0].vm_name, sender[1], receiver[0].vm_name, receiver[1]) - errmsg = "%s traffic from %s with %s to %s with %s Failed " % (proto, - sender[0].vm_name, sender[1], receiver[0].vm_name, receiver[1]) - if (sent and (recv == sent or recv > sent)): - self.logger.info(msg) - return (True, msg) - else: - self.logger.error(errmsg) - if self.inputs.stop_on_fail: - self.logger.info( - "Sub test failed; Stopping test for debugging.") - import pdb - pdb.set_trace() - return (False, errmsg) - - elif expectation == 'fail': - msg = "%s traffic from %s with %s to %s with %s "\ - "failed as expected" % (proto, sender[0].vm_name, sender[1], - receiver[0].vm_name, receiver[1]) - errmsg = "%s traffic from %s port %s with %s to %s port %s with %s "\ - "passed; Expcted to fail " % (proto, sender[0].vm_name,sport, sender[1], - receiver[0].vm_name,dport, receiver[1]) - if (recv == 0): - self.logger.info(msg) - return (True, msg) - else: - self.logger.error(errmsg) - if self.inputs.stop_on_fail: - self.logger.info( - "Sub test failed; Stopping test for debugging.") - import pdb - pdb.set_trace() - return (False, errmsg) - - def start_traffic_scapy(self, sender_vm, receiver_vm, proto, - sport, dport, count=None, fip=None, - payload=None, icmp_type=None, icmp_code=None, - recvr=True): - # Create stream and profile - if fip: - stream = Stream( - protocol="ip", sport=sport, dport=dport, proto=proto, src=sender_vm.vm_ip, - dst=fip,type=icmp_type,code=icmp_code) - else: - stream = Stream( - protocol="ip", sport=sport, dport=dport, proto=proto, src=sender_vm.vm_ip, - dst=receiver_vm.vm_ip,type=icmp_type,code=icmp_code) - profile_kwargs = {'stream': stream} - if fip: - profile_kwargs.update({'listener': receiver_vm.vm_ip}) - if payload: - profile_kwargs.update({'payload': payload}) - if count: - profile_kwargs.update({'count': count}) - profile = StandardProfile(**profile_kwargs) - else: - profile = ContinuousProfile(**profile_kwargs) - - # Set VM credentials - send_node = Host(sender_vm.vm_node_ip, - self.inputs.username, self.inputs.password) - recv_node = Host(receiver_vm.vm_node_ip, - self.inputs.username, self.inputs.password) - send_host = Host(sender_vm.local_ip, - sender_vm.vm_username, sender_vm.vm_password) - recv_host = Host(receiver_vm.local_ip, - receiver_vm.vm_username, receiver_vm.vm_password) - - # Create send, receive helpers - sender = Sender("send%s" % - proto, profile, send_node, send_host, self.inputs.logger) - receiver = Receiver("recv%s" % - proto, profile, recv_node, recv_host, self.inputs.logger) - - # start traffic - if recvr: - receiver.start() - sender.start() - - return (sender, receiver) - - def stop_traffic_scapy(self, sender, receiver,recvr=True): - - # stop traffic - sender.stop() - if recvr: - receiver.stop() - self.logger.info("Sent: %s; Received: %s", sender.sent, receiver.recv) - return (sender.sent, receiver.recv) - - - def verify_sec_group_port_proto(self, port_test=False, double_rule=False): - results = [] - self.logger.info("Verifcations with UDP traffic") - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm2_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'pass')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm3_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm4_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'pass')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm5_fix, self.sg1_fix.secgrp_name) - if double_rule: - exp = 'pass' - else: - exp = 'fail' - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, exp)) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm6_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9010, 'fail')) - - self.logger.info("Verifcations with TCP traffic") - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm2_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm3_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm4_fix, self.sg1_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'pass')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm5_fix, self.sg1_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'pass')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm6_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - if port_test: - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9010, 'fail')) - - errmsg = '' - for (rc, msg) in results: - if rc: - self.logger.debug(msg) - else: - errmsg += msg + '\n' - if errmsg: - assert False, errmsg - - def verify_sec_group_with_udp_and_policy_with_tcp(self): - results = [] - self.logger.info("Verifcations with TCP traffic") - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm2_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm3_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm4_fix, self.sg1_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'pass')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm5_fix, self.sg1_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'pass')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm6_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - - self.logger.info("Verifcations with UDP traffic") - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm2_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'pass')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm3_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm4_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm5_fix, self.sg1_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm6_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - - errmsg = '' - for (rc, msg) in results: - if rc: - self.logger.debug(msg) - else: - errmsg += msg + '\n' - if errmsg: - assert False, errmsg - - def verify_sec_group_with_udp_and_policy_with_tcp_port(self): - results = [] - self.logger.info("Verifcations with TCP traffic") - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm2_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm3_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm4_fix, self.sg1_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'pass')) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9010, 'fail')) - - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm5_fix, self.sg1_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'pass')) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9000, 'fail')) - sender = (self.vm1_fix, self.sg1_fix.secgrp_name) - receiver = (self.vm6_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8000, 9000, 'fail')) - results.append( - self.assert_traffic(sender, receiver, 'tcp', 8010, 9000, 'fail')) - - self.logger.info("Verifcations with UDP traffic") - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm2_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'pass')) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9000, 'pass')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm3_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm4_fix, self.sg2_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm5_fix, self.sg1_fix.secgrp_name) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9000, 'fail')) - - sender = (self.vm1_fix, self.sg2_fix.secgrp_name) - receiver = (self.vm6_fix, 'default') - results.append( - self.assert_traffic(sender, receiver, 'udp', 8000, 9000, 'fail')) - results.append( - self.assert_traffic(sender, receiver, 'udp', 8010, 9000, 'fail')) - - errmsg = '' - for (rc, msg) in results: - if rc: - self.logger.debug(msg) - else: - errmsg += msg + '\n' - if errmsg: - assert False, errmsg - - def start_traffic_and_verify(self, topo, config_topo, prto=None, sprt=None, dprt=None, expt=None, start=0, end=None, traffic_reverse=True): - results = [] - if not end: - end = len(topo.traffic_profile) - 1 - for i in range(start, end+1): - sender = (config_topo['vm'][topo.traffic_profile[i]['src_vm']], topo.sg_of_vm[topo.traffic_profile[i]['src_vm']]) - receiver = (config_topo['vm'][topo.traffic_profile[i]['dst_vm']], topo.sg_of_vm[topo.traffic_profile[i]['dst_vm']]) - if not sprt: - sport = topo.traffic_profile[i]['sport'] - else: - sport = sprt - if not dprt: - dport = topo.traffic_profile[i]['dport'] - else: - dport = dprt - if not prto: - proto = topo.traffic_profile[i]['proto'] - else: - proto = prto - if not expt: - exp = topo.traffic_profile[i]['exp'] - else: - exp = expt - results.append(self.assert_traffic(sender, receiver, proto, sport, dport, exp)) - if traffic_reverse: - results.append(self.assert_traffic(receiver, sender, proto, sport, dport, exp)) - - errmsg = '' - for (rc, msg) in results: - if rc: - self.logger.debug(msg) - else: - errmsg += msg + '\n' - if errmsg: - assert False, errmsg - - - def start_traffic_and_verify_multiproject(self, topo_objs, config_topo, prto=None, sprt=None, dprt=None, expt=None, start=0, end=None, traffic_reverse=True): - results = [] - topo = topo_objs[self.topo.project_list[0]] - if not end: - end = len(topo.traffic_profile) - 1 - for i in range(start, end+1): - sender = (config_topo[topo.traffic_profile[i]['src_vm'][0]]['vm'][topo.traffic_profile[i]['src_vm'][1]], - topo_objs[topo.traffic_profile[i]['src_vm'][0]].sg_of_vm[topo.traffic_profile[i]['src_vm'][1]]) - receiver = (config_topo[topo.traffic_profile[i]['dst_vm'][0]]['vm'][topo.traffic_profile[i]['dst_vm'][1]], - topo_objs[topo.traffic_profile[i]['dst_vm'][0]].sg_of_vm[topo.traffic_profile[i]['dst_vm'][1]]) - - if not sprt: - sport = topo.traffic_profile[i]['sport'] - else: - sport = sprt - if not dprt: - dport = topo.traffic_profile[i]['dport'] - else: - dport = dprt - if not prto: - proto = topo.traffic_profile[i]['proto'] - else: - proto = prto - if not expt: - exp = topo.traffic_profile[i]['exp'] - else: - exp = expt - results.append(self.assert_traffic(sender, receiver, proto, sport, dport, exp)) - if traffic_reverse: - results.append(self.assert_traffic(receiver, sender, proto, sport, dport, exp)) - - errmsg = '' - for (rc, msg) in results: - if rc: - self.logger.debug(msg) - else: - errmsg += msg + '\n' - if errmsg: - assert False, errmsg - - - def start_traffic_and_verify_negative_cases(self, topo, config_topo): - self.start_traffic_and_verify(topo, config_topo) - self.start_traffic_and_verify(topo, config_topo, prto='tcp',expt='fail',start=4) - self.start_traffic_and_verify(topo, config_topo, prto='icmp',expt='fail',start=4) - - def fetch_flow_verify_sg_uuid( - self, - nh, - src_vm_fix, - dst_vm_fix, - sport, - dport, - proto, - uuid_exp, - comp_node_ip): - # get the forward flow on compute node - inspect_h1 = self.agent_inspect[comp_node_ip] - flow_rec1 = None - count = 1 - test_result = False - while (flow_rec1 is None and count < 10): - flow_rec1 = inspect_h1.get_vna_fetchflowrecord( - nh=nh, - sip=src_vm_fix.vm_ip, - dip=dst_vm_fix.vm_ip, - sport=unicode(sport), - dport=unicode(dport), - protocol=proto) - count += 1 - sleep(0.5) - - key = 'sg_rule_uuid' - if flow_rec1 is None: - self.logger.error( - "no flow in agent introspect for the proto:%s traffic" % - (proto)) - test_result = False - else: - self.logger.info("Flow found in agent %s for nh %s is: %s" % (comp_node_ip, nh, flow_rec1)) - for item in flow_rec1: - if key in item: - # compare uuid here - if item[key] == uuid_exp: - self.logger.info( - "security group rule uuid matches with flow secgrp uuid %s" % - (uuid_exp)) - test_result = True - else: - self.logger.error( - "security group rule uuid %s doesn't matches with flow secgrp uuid %s" % - (uuid_exp, item[key])) - test_result = False - - return test_result - - def verify_flow_to_sg_rule_mapping( - self, - src_vm_fix, - dst_vm_fix, - src_vn_fix, - dst_vn_fix, - secgrp_id, - proto, - port): - ''' this method verifies flow to security group mapping for both forward and reverse flow - for the given sec grp id''' - - if self.option == 'openstack': - src_vn_fq_name = src_vn_fix.vn_fq_name - dst_vn_fq_name = dst_vn_fix.vn_fq_name - else: - src_vn_fq_name = ':'.join(src_vn_fix._obj.get_fq_name()) - dst_vn_fq_name = ':'.join(dst_vn_fix._obj.get_fq_name()) - - nh_src = src_vm_fix.tap_intf[src_vn_fq_name]['flow_key_idx'] - nh_dst = dst_vm_fix.tap_intf[dst_vn_fq_name]['flow_key_idx'] - proto_num = {'udp': '17', 'tcp': '6', 'icmp': '1'} - - test_result = True - rule_uuid = None - # get the egress rule uuid - rules = list_sg_rules(self.connections, secgrp_id) - for rule in rules: - if rule['direction'] == 'egress' and (rule['ethertype'] == 'IPv4' or \ - rule['remote_ip_prefix'] == '0.0.0.0/0') and \ - (rule['protocol'] == 'any' or rule['protocol'] == proto): - rule_uuid = rule['id'] - break - assert rule_uuid, "Egress rule id could not be found" - - # verify forward flow on src compute node - if not self.fetch_flow_verify_sg_uuid( - nh_src, - src_vm_fix, - dst_vm_fix, - port, - port, - proto_num[proto], - rule_uuid, - src_vm_fix.vm_node_ip): - test_result = False - # verify reverse flow on src compute node - if src_vm_fix.vm_node_ip == dst_vm_fix.vm_node_ip: - nh = nh_dst - else: - nh = nh_src - if not self.fetch_flow_verify_sg_uuid( - nh, - dst_vm_fix, - src_vm_fix, - port, - port, - proto_num[proto], - rule_uuid, - src_vm_fix.vm_node_ip): - test_result = False - - if src_vm_fix.vm_node_ip != dst_vm_fix.vm_node_ip: - self.logger.info("verify on destination compute too, \ - as source and destination computes are different") - # get the ingress rule uuid - rule_uuid = None - rules = list_sg_rules(self.connections, secgrp_id) - for rule in rules: - if rule['direction'] == 'ingress' and \ - (rule['ethertype'] == 'IPv4' or \ - rule['remote_group_id'] == secgrp_id or \ - rule['remote_ip_prefix'] == '0.0.0.0/0') and \ - (rule['protocol'] == 'any' or rule['protocol'] == proto): - rule_uuid = rule['id'] - break - assert rule_uuid, "Ingress rule id could not be found" - # verify forward flow on dst compute node - if not self.fetch_flow_verify_sg_uuid( - nh_dst, - src_vm_fix, - dst_vm_fix, - port, - port, - proto_num[proto], - rule_uuid, - dst_vm_fix.vm_node_ip): - test_result = False - # verify reverse flow on dst compute node - if not self.fetch_flow_verify_sg_uuid( - nh_dst, - dst_vm_fix, - src_vm_fix, - port, - port, - proto_num[proto], - rule_uuid, - dst_vm_fix.vm_node_ip): - test_result = False - - return test_result - - def verify_traffic_on_vms(self, - src_vm_fix, src_vn_fix, - dst_vm_fix, dst_vn_fix, - src_filters, dst_filters, - src_exp_count=None, dst_exp_count=None - ): - - result = False - if self.option == 'openstack': - src_vn_fq_name = src_vn_fix.vn_fq_name - dst_vn_fq_name = dst_vn_fix.vn_fq_name - else: - src_vn_fq_name = ':'.join(src_vn_fix._obj.get_fq_name()) - dst_vn_fq_name = ':'.join(dst_vn_fix._obj.get_fq_name()) - - # start tcpdump on dst VM - session1, pcap1 = start_tcpdump_for_vm_intf(self, - dst_vm_fix, dst_vn_fq_name, - filters = dst_filters) - # start tcpdump on src VM - session2, pcap2 = start_tcpdump_for_vm_intf(self, - src_vm_fix, src_vn_fq_name, - filters = src_filters) - - #verify packet count and stop tcpdump on dst VM - if not verify_tcpdump_count(self, session1, pcap1, exp_count=dst_exp_count): - return result - #verify packet count and stop tcpdump on src VM - if not verify_tcpdump_count(self, session2, pcap2, exp_count=src_exp_count): - return result - - return True diff --git a/scripts/serial_scripts/__init__.py b/scripts/serial_scripts/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/smgr/base.py b/scripts/smgr/base.py index daeb83ec3..03e9d0701 100644 --- a/scripts/smgr/base.py +++ b/scripts/smgr/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 import fixtures import sys import os @@ -6,7 +6,7 @@ from smgr_common import SmgrFixture -class ServerManagerTest(test.BaseTestCase): +class ServerManagerTest(test_v1.BaseTestCase_v1): @classmethod diff --git a/scripts/smgr/smgr_inventory_monitoring_tests.py b/scripts/smgr/smgr_inventory_monitoring_tests.py new file mode 100644 index 000000000..1aca443c2 --- /dev/null +++ b/scripts/smgr/smgr_inventory_monitoring_tests.py @@ -0,0 +1,343 @@ +from fabric.api import local +from fabric.api import settings, run +from tcutils.test_lib.test_utils import assertEqual +import os +import time +from base import ServerManagerTest +import test +import fixtures +from smgr_common import SmgrFixture +import json +import pdb + + +def inventory_show_tests(self): + cluster_id=self.smgr_fixture.get_cluster_id() + cmd="server-manager show server --cluster_id " + cluster_id + " --select 'id' | grep 'id' | head -n 1 | cut -d ':' -f 2 | cut -d '" + cmd=cmd + '"' + "' -f 2" + server_id=local(cmd,capture=True) + + #Show and check if server inventory has the desired fields. + cmd="server-manager show inventory --server_id " + server_id + server_inventory=local(cmd,capture=True) + if (('"name": "'+server_id+'"' in server_inventory) and + ('"cluster_id": "'+cluster_id+'"' in server_inventory) and + ('interface_infos' in server_inventory) and + ('fru_infos' in server_inventory) and + ('cpu_info_state' in server_inventory) and + ('mem_state' in server_inventory) and + ('kernel_version' in server_inventory)): + self.logger.info("Verification of show inventory for server with server_id Passed.") + else: + self.logger.error("Verification of show inventory for server with server_id Failed.") + return False + + #Show and check if server inventory has the desired fields when displayed with cluster_id. + cmd="server-manager show inventory --cluster_id " + cluster_id + server_inventory=local(cmd,capture=True) + if (('"name": "'+server_id+'"' in server_inventory) and + ('"cluster_id": "'+cluster_id+'"' in server_inventory) and + ('interface_infos' in server_inventory) and + ('fru_infos' in server_inventory) and + ('cpu_info_state' in server_inventory) and + ('mem_state' in server_inventory) and + ('kernel_version' in server_inventory)): + self.logger.info("Verification of show inventory for server with cluster_id Passed.") + else: + self.logger.error("Verification of show inventory for server with cluster_id Failed.") + return False + + #Show and check if server inventory has the desired fields when displayed with tags. + server_ip=self.smgr_fixture.get_ip_using_server_id(server_id) + self.smgr_fixture.add_tag_to_server(server_ip,"datacenter","inventory_tag") + cmd='server-manager show inventory --tag "datacenter=inventory_tag"' + server_inventory=local(cmd,capture=True) + if (('"name": "'+server_id+'"' in server_inventory) and + ('"cluster_id": "'+cluster_id+'"' in server_inventory) and + ('interface_infos' in server_inventory) and + ('fru_infos' in server_inventory) and + ('cpu_info_state' in server_inventory) and + ('mem_state' in server_inventory) and + ('kernel_version' in server_inventory)): + self.logger.info("Verification of show inventory with tags Passed.") + else: + self.logger.error("Verification of show inventory with tags Failed.") + return False + return True +#end inventory_show_tests + +def inventory_tests(self, node_name=None): + if node_name is None: + self.logger.error("ERROR :: Target node name has to be specified to test inventory information.") + return False + self.logger.info("------------INVENTORY TEST FOR NODE %s------------" % node_name) + local("server-manager-client display inventory --server_id %s > working_db.txt" % node_name) + fd=open('working_db.txt','r') + lines=fd.readlines() + fd.close() + fd=open('working_db.json','w') + for i in range(1,len(lines)-1): + fd.write(lines[i]) + fd.close() + fd=open('working_db.json','r') + inventory_data=json.load(fd) + fd.close() + + node_ip=self.smgr_fixture.get_ip_using_server_id(node_name) + node_pswd=self.smgr_fixture.get_pswd_using_server_id(node_name) + + #Check for cpu details in inventory. + with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True): + cpu_cores=run('cat /proc/cpuinfo | grep "cpu cores" | head -n 1 |cut -d ":" -f2') + clock_speed=run('cat /proc/cpuinfo | grep "cpu MHz" | head -n 1 |cut -d ":" -f2') + model=run('cat /proc/cpuinfo | grep "model name" | head -n 1 |cut -d ":" -f2') + + assertEqual(int(cpu_cores), inventory_data['ServerInventoryInfo']['cpu_cores_count'], + "cpu_cores_count mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s" % (node_name,inventory_data['ServerInventoryInfo']['cpu_cores_count'],cpu_cores)) + assertEqual(float(clock_speed), float(inventory_data['ServerInventoryInfo']['cpu_info_state']['clock_speed_MHz']), + "clock_speed mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s" + % (node_name,float(inventory_data['ServerInventoryInfo']['cpu_info_state']['clock_speed_MHz']),float(clock_speed))) + assertEqual(model, inventory_data['ServerInventoryInfo']['cpu_info_state']['model'], + "model mismatch for node %s = inventory_data - %s, proc-cpuinfo data - %s" + % (node_name,inventory_data['ServerInventoryInfo']['cpu_info_state']['model'],model)) + + #Check for interface details in inventory both physical and virtual intrerfaces should be listed. + with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True): + intf_names=run("ifconfig -a | grep 'Link encap' | awk '{print $1}'") + intf_list=intf_names.split('\r\n') + + track_intf=list(intf_list) + for i in range(0,len(track_intf)): + if '-' in track_intf[i]: + del track_intf[i] + + for intf_data in inventory_data['ServerInventoryInfo']['interface_infos']: + if '_' in intf_data['interface_name']: + continue + if intf_data['interface_name'] in track_intf: + if (intf_data['ip_addr'] and intf_data['ip_addr'] != 'N/A'): + with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True): + ip_addr=run("ifconfig " + intf_data['interface_name'] + " | grep inet | awk '{print $2}' | cut -d ':' -f 2") + assertEqual(ip_addr, intf_data['ip_addr'], "ip address mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s" + % (intf_data['interface_name'],node_name,intf_data['ip_addr'],ip_addr)) + + if (intf_data['macaddress'] and intf_data['macaddress'] != 'N/A'): + with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True): + mac_addr=run("cat /sys/class/net/" + intf_data['interface_name'] + "/address") + assertEqual(mac_addr.lower(), intf_data['macaddress'].lower(), "mac address mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s" + % (intf_data['interface_name'],node_name,intf_data['macaddress'].lower(),mac_addr.lower())) + + if (intf_data['netmask'] and intf_data['netmask'] != 'N/A'): + with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True): + mask=run("ifconfig " + intf_data['interface_name'] + " | grep Mask | awk '{print $4}' | cut -d ':' -f 2") + assertEqual(mask, intf_data['netmask'], "netmask mis-match for interface %s on node %s. inventory data - %s, ifconfig data %s" + % (intf_data['interface_name'],node_name,intf_data['netmask'],mask)) + + else: + self.logger.error("ERROR :: Interface not found in inventory but there as part of the system info") + self.logger.error("ERROR :: Inventory interface information %s" % intf_data) + self.logger.error("ERROR :: System interface information %s" % track_intf) + return False + + #Check for memory state and number of disks. + with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True): + dimm_size_mb=run("dmidecode -t 17 | grep Size | head -n 1 | awk '{print $2}'") + mem_speed_MHz=run("dmidecode -t 17 | grep Speed | head -n 1 | awk '{print $2}'") + mem_type=run("dmidecode -t 17 | grep Type | head -n 1 | awk '{print $2}'") + num_of_dimms=run("dmidecode -t 17 | grep 'Memory Device' | wc -l") + swap_size_mb=run("vmstat -s -S M | grep 'total swap' | awk '{print $1}'") + total_mem_mb=run("vmstat -s -S M | grep 'total memory' | awk '{print $1}'") + + assertEqual(int(dimm_size_mb), inventory_data['ServerInventoryInfo']['mem_state']['dimm_size_mb'], + "dimm_size_mb mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['dimm_size_mb'],int(dimm_size_mb))) + assertEqual(int(mem_speed_MHz), inventory_data['ServerInventoryInfo']['mem_state']['mem_speed_MHz'], + "mem_speed_MHz mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['mem_speed_MHz'],int(mem_speed_MHz))) + assertEqual(mem_type, inventory_data['ServerInventoryInfo']['mem_state']['mem_type'], + "mem_type mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['mem_type'],mem_type)) + assertEqual(int(num_of_dimms), inventory_data['ServerInventoryInfo']['mem_state']['num_of_dimms'], + "num_of_dimms mismatch for node %s = inventory_data - %s, dmidecode data - %s" % (node_name,inventory_data['ServerInventoryInfo']['mem_state']['num_of_dimms'],int(num_of_dimms))) + + if (float(swap_size_mb)*0.98 <= float(inventory_data['ServerInventoryInfo']['mem_state']['swap_size_mb']) <= float(swap_size_mb)*1.02): + self.logger.info("swap_size_mb matched inventory data.") + else: + self.logger.error("swap_size_mb for node %s = inventory_data - %s, vmstat data - %s --- Not in range 98% to 102%" + % (node_name,float(inventory_data['ServerInventoryInfo']['mem_state']['swap_size_mb']),float(swap_size_mb))) + return False + + if (float(total_mem_mb)*0.98 <= float(inventory_data['ServerInventoryInfo']['mem_state']['total_mem_mb']) <= float(total_mem_mb)*1.02): + self.logger.info("total_mem_mb matched inventory data.") + else: + self.logger.error("total_mem_mb for node %s = inventory_data - %s, vmstat data - %s --- Not in range 98% to 102%" + % (node_name,float(inventory_data['ServerInventoryInfo']['mem_state']['total_mem_mb']),float(total_mem_mb))) + return False + + #Check for system related inventory information. + with settings(host_string='root@'+node_ip, password=node_pswd, warn_only=True): + board_manufacturer=run("dmidecode -t 3 | grep 'Manufacturer' | awk '{print $2}'") + kernel_version=run("uname -r | cut -d '-' -f 1") + name=run("uname -n") + hardware_model=run("uname -i") + node_os=run("uname -v | cut -d '-' -f 2 | awk '{print $1}'") + + assertEqual(board_manufacturer, inventory_data['ServerInventoryInfo']['fru_infos'][0]['board_manufacturer'], + "board_manufacturer mismatch for node %s = inventory_data - %s, dmidecode data - %s" + % (node_name,inventory_data['ServerInventoryInfo']['fru_infos'][0]['board_manufacturer'],board_manufacturer)) + assertEqual(kernel_version, inventory_data['ServerInventoryInfo']['kernel_version'], + "kernel_version mismatch for node %s = inventory_data - %s, uname data - %s" % (node_name,inventory_data['ServerInventoryInfo']['kernel_version'],kernel_version)) + assertEqual(name, inventory_data['ServerInventoryInfo']['name'], + "name mismatch for node %s = inventory_data - %s, uname data - %s" % (node_name,inventory_data['ServerInventoryInfo']['name'],name)) + assertEqual(hardware_model, inventory_data['ServerInventoryInfo']['hardware_model'], + "hardware_model mismatch for node %s = inventory_data - %s, uname data - %s" % (node_name,inventory_data['ServerInventoryInfo']['hardware_model'],hardware_model)) + assertEqual(node_os, inventory_data['ServerInventoryInfo']['os'], + "os mismatch for node %s = inventory_data - %s, uname data - %s" % (node_name,inventory_data['ServerInventoryInfo']['os'],node_os)) + + os.remove('working_db.txt') + self.logger.info("------------END OF INVENTORY TEST FOR NODE %s------------" % node_name) + return True +#end inventory_tests + +def monitoring_show_tests(self): + cluster_id=self.smgr_fixture.get_cluster_id() + cmd="server-manager-client display server --json --cluster_id " + cluster_id + " --select 'id' | grep 'id' | head -n 1 | cut -d ':' -f 2 | cut -d '" + cmd=cmd + '"' + "' -f 2" + server_id=local(cmd,capture=True) + + #Show and check if server monitoring has the desired fields. + cmd="server-manager-client display monitoring --server_id " + server_id + server_monitoring=local(cmd,capture=True) + if (('"name": "'+server_id+'"' in server_monitoring) and + ('"cluster_id": "'+cluster_id+'"' in server_monitoring) and + ('chassis_state' in server_monitoring) and + ('disk_usage_stats' in server_monitoring) and + ('disk_usage_totals' in server_monitoring) and + ('file_system_view_stats' in server_monitoring) and + ('network_info_stats' in server_monitoring) and + ('network_info_totals' in server_monitoring) and + ('resource_info_stats' in server_monitoring) and + ('sensor_stats' in server_monitoring)): + self.logger.info("Verification of show monitoring for server with server_id Passed.") + else: + self.logger.error("Verification of show monitoring for server with server_id Failed.") + return False + + #Show and check if server monitoring has the desired fields when displayed with cluster_id. + cmd="server-manager-client display monitoring --cluster_id " + cluster_id + server_monitoring=local(cmd,capture=True) + if (('"name": "'+server_id+'"' in server_monitoring) and + ('"cluster_id": "'+cluster_id+'"' in server_monitoring) and + ('chassis_state' in server_monitoring) and + ('disk_usage_stats' in server_monitoring) and + ('disk_usage_totals' in server_monitoring) and + ('file_system_view_stats' in server_monitoring) and + ('network_info_stats' in server_monitoring) and + ('network_info_totals' in server_monitoring) and + ('resource_info_stats' in server_monitoring) and + ('sensor_stats' in server_monitoring)): + self.logger.info("Verification of show monitoring for server with cluster_id Passed.") + else: + self.logger.error("Verification of show monitoring for server with cluster_id Failed.") + return False + + #Show and check if server monitoring has the desired fields when displayed with tags. + server_ip=self.smgr_fixture.get_ip_using_server_id(server_id) + self.smgr_fixture.add_tag_to_server(server_ip,"datacenter","monitoring_tag") + cmd='server-manager-client display monitoring --tag "datacenter=monitoring_tag"' + server_monitoring=local(cmd,capture=True) + if (('"name": "'+server_id+'"' in server_monitoring) and + ('"cluster_id": "'+cluster_id+'"' in server_monitoring) and + ('chassis_state' in server_monitoring) and + ('disk_usage_stats' in server_monitoring) and + ('disk_usage_totals' in server_monitoring) and + ('file_system_view_stats' in server_monitoring) and + ('network_info_stats' in server_monitoring) and + ('network_info_totals' in server_monitoring) and + ('resource_info_stats' in server_monitoring) and + ('sensor_stats' in server_monitoring)): + self.logger.info("Verification of show monitoring with tags Passed.") + else: + self.logger.error("Verification of show monitoring with tags Failed.") + return False + return True + +#end monitoring_show_tests + +def monitoring_functionality_tests(self): + cluster_id=self.smgr_fixture.get_cluster_id() + cmd="server-manager show server --cluster_id " + cluster_id + " --select 'id' | grep 'id' | head -n 1 | cut -d ':' -f 2 | cut -d '" + cmd=cmd + '"' + "' -f 2" + server_id=local(cmd,capture=True) + + #Check and verify if monitoring data is available. + cmd='server-manager-client display monitoring' + server_monitoring=local(cmd,capture=True) + if (('"name": "'+server_id+'"' in server_monitoring) and + ('"cluster_id": "'+cluster_id+'"' in server_monitoring) and + ('chassis_state' in server_monitoring) and + ('disk_usage_stats' in server_monitoring) and + ('disk_usage_totals' in server_monitoring) and + ('file_system_view_stats' in server_monitoring) and + ('network_info_stats' in server_monitoring) and + ('network_info_totals' in server_monitoring) and + ('resource_info_stats' in server_monitoring) and + ('sensor_stats' in server_monitoring)): + self.logger.info("Verification of show monitoring Passed.") + + #Disable the monitoring plugin and restart SM to stop monitoring. + cmd='sed -i s/monitoring_plugin/#monitoring_plugin/ /opt/contrail/server_manager/sm-config.ini' + local(cmd) + cmd='service contrail-server-manager restart' + local(cmd) + time.sleep(10) + cmd='service contrail-server-manager status' + SM_status=local(cmd,capture=True) + if 'not running' in SM_status: + self.logger.error('ERROR :: Failed to restart Server Manager after disabling monitoring plugin.') + return False + + #Check that no monitoring data is available once the plugin is disabled. + #Check that right message is passed on to the user about enabling monitoring. + cmd='server-manager-client display monitoring' + server_monitoring=local(cmd,capture=True) + if (not('"return_code": 9' in server_monitoring) or + (not('Reset the configuration correctly and restart Server Manager.' in server_monitoring))): + self.logger.error('ERROR :: Failed to stop monitoring plugin by commenting it out in sm-config.ini file'+ + ' and restarting Server Manager process') + return False + + #Re-enable monitoring plugin and check the monitoring data. + cmd='sed -i s/#monitoring_plugin/monitoring_plugin/ /opt/contrail/server_manager/sm-config.ini' + local(cmd) + cmd='service contrail-server-manager restart' + local(cmd) + time.sleep(10) + cmd='service contrail-server-manager status' + SM_status=local(cmd,capture=True) + if 'not running' in SM_status: + self.logger.error('ERROR :: Failed to restart Server Manager after enabling monitoring plugin.') + return False + + #Sleep for monitoring timer interval. + sleep_time=local("cat /opt/contrail/server_manager/sm-config.ini | grep monitoring_frequency | awk '{print $3}'", capture=True) + time.sleep(int(sleep_time)+5) + cmd='server-manager-client display monitoring' + server_monitoring=local(cmd,capture=True) + if (('"name": "'+server_id+'"' in server_monitoring) and + ('"cluster_id": "'+cluster_id+'"' in server_monitoring) and + ('chassis_state' in server_monitoring) and + ('disk_usage_stats' in server_monitoring) and + ('disk_usage_totals' in server_monitoring) and + ('file_system_view_stats' in server_monitoring) and + ('network_info_stats' in server_monitoring) and + ('network_info_totals' in server_monitoring) and + ('resource_info_stats' in server_monitoring) and + ('sensor_stats' in server_monitoring)): + self.logger.info("Verification of show monitoring after re-enabling the plugin Passed.") + else: + self.logger.error("Verification of show monitoring after re-enabling the plugin Failed.") + return False + else: + self.logger.error("Verification of show monitoring Failed.") + return False + return True + +#end monitoring_functionality_tests diff --git a/scripts/smgr/smgr_upgrade_tests.py b/scripts/smgr/smgr_upgrade_tests.py index f5ab87cc9..8a8eda6c8 100644 --- a/scripts/smgr/smgr_upgrade_tests.py +++ b/scripts/smgr/smgr_upgrade_tests.py @@ -9,6 +9,7 @@ import pdb import random import string +from fabric.api import settings, run, local # Build image id from the image name. def image_name_to_id(self, image_name=None): @@ -37,13 +38,109 @@ def check_if_SM_base_img_is_leq_R21(self, image_name=None): return False #end check_if_SM_base_img_is_leq_R21 +# Setup test environment in cfgm-0 of the target setup. +def setup_contrail_test(self): + cfgm0_host=self.smgr_fixture.testbed.env.roledefs['cfgm'][0] + cfgm0_password=self.smgr_fixture.testbed.env.passwords[cfgm0_host] + cmd1 = 'sshpass -p ' + cfgm0_password + ' scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' + cmd2 = cmd1 + '/root/sm_files/contrail_packages/setup.sh ' + cfgm0_host +':/opt/contrail/contrail_packages/' + # copy setup.sh to /opt/contrail/contrail_packages + ret = local(cmd2, capture=True) + + contrail_test_pkg = os.environ['AR_BASE_DEB'].rsplit('/',1)[0] + '/artifacts_extra/contrail-test-*.tgz' + contrail_fab_pkg = os.environ['AR_BASE_DEB'].rsplit('/',1)[0] + '/artifacts_extra/contrail-fabric-utils*.tgz' + cmd2 = cmd1 + contrail_test_pkg + ' ' + cfgm0_host + ':~/' + # copy contrail-test to cfgm0 + ret = local(cmd2, capture=True) + cmd2 = cmd1 + contrail_fab_pkg + ' ' + cfgm0_host + ':~/' + # copy contrail-fabric-utils to cfgm0 + ret = local(cmd2, capture=True) + + with settings(host_string=cfgm0_host, password=cfgm0_password, warn_only=True): + # run setup.sh + run('/opt/contrail/contrail_packages/setup.sh') + # untar contrail-test and contrail-fabric-utils + run('cd ~; tar xzf contrail-test-[[:digit:]].*.tgz; tar xzf contrail-fabric-utils*.tgz') + + # copy /root/contrail-test/testbed.py to cfgm0 /root/fabric-utils/fabfile/testbeds/ + cmd2 = cmd1 + '/root/sm_files/testbed.py ' + cfgm0_host + ':~/fabric-utils/fabfile/testbeds/' + ret = local(cmd2, capture=True) + + # replace env.test_repo_dir with /root/contrail-test in cfgm0 /root/fabric-utils/fabfile/testbeds/testbed.py + # run fab install_test_repo and setup_test_env + # set environment variables and check if running tests is possible. + ret = '' + with settings(host_string=cfgm0_host, password=cfgm0_password, warn_only=True): + run("sed -i '/env.test_repo_dir/d' /root/fabric-utils/fabfile/testbeds/testbed.py") + cmd = 'echo "env.test_repo_dir=' + "'/root/contrail-test'" + cmd = cmd + '" >> /root/fabric-utils/fabfile/testbeds/testbed.py' + run(cmd) + run('cd /root/fabric-utils/; fab install_test_repo; fab setup_test_env') + cmd = 'cd /root/contrail-test/; export PYTHONPATH=$PATH:$PWD/scripts:$PWD/fixtures; ' + cmd = cmd + 'export TEST_CONFIG_FILE=`basename sanity_params.ini`; ' + cmd1 = cmd + 'python -m testtools.run discover -l serial_scripts.upgrade | grep before' + ret = run(cmd1) + if 'test_fiptraffic_before_upgrade' in ret: + self.logger.info("Successfully installed fabric and setup contrail-test env.") + self.logger.info("Ready to run tests from cfgm0.") + return True; + else: + self.logger.error("ERROR :: Failed to install fabric and setup contrail-test env properly.") + return False +#end setup_contrail_test + +# Create a topology of VM, VN etc on the target setup. +def create_topo_before_upgrade(self): + cfgm0_host=self.smgr_fixture.testbed.env.roledefs['cfgm'][0] + cfgm0_password=self.smgr_fixture.testbed.env.passwords[cfgm0_host] + ret1 = '' + with settings(host_string=cfgm0_host, password=cfgm0_password, warn_only=True): + cmd = "sed -i 's/fixtureCleanup=.*/fixtureCleanup=no/g' /root/contrail-test/sanity_params.ini" + ret = run(cmd) + cmd = 'cd /root/contrail-test/; export PYTHONPATH=$PATH:$PWD/scripts:$PWD/fixtures; ' + cmd = cmd + 'export TEST_CONFIG_FILE=`basename sanity_params.ini`; ' + cmd1 = cmd + 'python -m testtools.run discover -l serial_scripts.upgrade | grep before' + ret = run(cmd1) + cmd1 = cmd + 'python -m testtools.run ' + ret.rsplit('[')[0] + ret1 = run(cmd1, timeout=1200) + + if 'END TEST : test_fiptraffic_before_upgrade : PASSED' in ret1: + self.logger.info("Set up the topology before upgrade successfully.") + return True + else: + self.logger.error("ERROR :: Failures while running test. Need to check why setting up of topology failed") + self.logger.error("ERROR :: Not blocking the upgrade test because of this.") + return True +#end create_topo_before_upgrade + +def verify_topo_after_upgrade(self): + cfgm0_host=self.smgr_fixture.testbed.env.roledefs['cfgm'][0] + cfgm0_password=self.smgr_fixture.testbed.env.passwords[cfgm0_host] + ret1 = '' + with settings(host_string=cfgm0_host, password=cfgm0_password, warn_only=True): + cmd = "sed -i 's/fixtureCleanup=.*/fixtureCleanup=yes/g' /root/contrail-test/sanity_params.ini" + ret = run(cmd) + cmd = 'cd /root/contrail-test/; export PYTHONPATH=$PATH:$PWD/scripts:$PWD/fixtures; ' + cmd = cmd + 'export TEST_CONFIG_FILE=`basename sanity_params.ini`; ' + cmd1 = cmd + 'python -m testtools.run discover -l serial_scripts.upgrade | grep after' + ret = run(cmd1) + cmd1 = cmd + 'python -m testtools.run ' + ret.rsplit('[')[0] + ret1 = run(cmd1, timeout=1200) + + if 'FAIL' in ret1: + self.logger.error("ERROR :: Failures while running test. Need to check why verification of topology failed") + return False + else: + self.logger.info("Verified the topology after upgrade successfully.") + return True +#end verify_topo_after_upgrade + # Accross release contrail upgrade with server-manager upgrade. def AR_upgrade_test_with_SM_upgrade(self): result = True self.logger.info("Running AR_upgrade_test_with_SM_upgrade.....") self.smgr_fixture.uninstall_sm() self.smgr_fixture.install_sm(SM_installer_file_path=os.environ['SM_BASE_IMG']) - pkg_file=None try: pkg_file=self.smgr_fixture.params['pkg_file'] @@ -99,11 +196,21 @@ def AR_upgrade_test_with_SM_upgrade(self): self.smgr_fixture.add_server() #Reimage and Provision the servers with the base release for upgrade test to follow - assert self.smgr_fixture.setup_cluster() + assert self.smgr_fixture.setup_cluster(no_reimage_pkg=True) + + time.sleep(300) + if setup_contrail_test(self): + if create_topo_before_upgrade(self): + self.logger.info("Creation of topology successfull before running upgrade.") + else: + self.logger.error("FAILED to create topology before running upgrade.") + return False + else: + self.logger.error("FAILED to setup test env on the target cfgm node.") + return False self.smgr_fixture.uninstall_sm() self.smgr_fixture.install_sm(SM_installer_file_path=os.environ['SM_UPGD_IMG']) - with open(pkg_file, 'r') as pkgf: data = json.load(pkgf) pkgf.close() @@ -139,8 +246,14 @@ def AR_upgrade_test_with_SM_upgrade(self): #Provision to upgrade the servers with the target release for upgrade test to follow assert self.smgr_fixture.setup_cluster(provision_only=True) - return result + time.sleep(300) + if verify_topo_after_upgrade(self): + self.logger.info("Verification of topology successfull after upgrade.") + else: + self.logger.error("FAILED to verify topology after upgrade.") + return result +#end AR_upgrade_test_with_SM_upgrade # Accross release contrail upgrade without server-manager upgrade. def AR_upgrade_test_without_SM_upgrade(self): diff --git a/scripts/smgr/test_smgr_regression.py b/scripts/smgr/test_smgr_regression.py index 3ae2bf087..72e3d8fda 100644 --- a/scripts/smgr/test_smgr_regression.py +++ b/scripts/smgr/test_smgr_regression.py @@ -8,7 +8,7 @@ import testtools from common.contrail_test_init import ContrailTestInit from smgr_common import SmgrFixture -import smgr_upgrade_tests +import smgr_upgrade_tests, smgr_inventory_monitoring_tests from fabric.api import settings, run import time import pdb @@ -364,4 +364,52 @@ def test_reimage_servers_and_status_check_using_tag(self): return result #end test_restart_servers_using_tag + def test_inventory_information(self): + self.logger.info("Check for inventory information of the servers attached to the SM.") + self.logger.info("Verify few of the fields in the inventory information for the servers attached to the SM.") + nodes = self.smgr_fixture.testbed.env.roledefs['all'] + # Atleast 1 node is needed to run this test. + if len(nodes) < 1: + raise self.skipTest( + "Skipping Test. At least 1 target node required to run the test") + + #Run general inventory show test cases. + if not smgr_inventory_monitoring_tests.inventory_show_tests(self): + self.logger.error("Inventory Show Tests FAILED !!!") + return False + self.logger.info("Inventory Show Tests passed!!!") + + #Run field verification tests on each of the computes inventory output. + target_computes=self.smgr_fixture.get_compute_node_from_testbed_py() + for each_target_node in target_computes: + node_name=self.smgr_fixture.get_server_with_ip_from_db(each_target_node.split('@')[1])['server'][0]['id'] + if not smgr_inventory_monitoring_tests.inventory_tests(self, node_name): + self.logger.error("Inventory Tests for %s FAILED !!!" % node_name) + return False + self.logger.info("Inventory Tests for all the compute nodes passed!!!") + return True + #end test_inventory_information + + def test_monitoring_information(self): + self.logger.info("Check for monitoring information of the servers attached to the SM.") + self.logger.info("Verify few of the fields in the monitoring information for the servers attached to the SM.") + nodes = self.smgr_fixture.testbed.env.roledefs['all'] + # Atleast 1 node is needed to run this test. + if len(nodes) < 1: + raise self.skipTest( + "Skipping Test. At least 1 target node required to run the test") + + #Run general monitoring show test cases. + if not smgr_inventory_monitoring_tests.monitoring_show_tests(self): + self.logger.error("Monitoring Show Tests FAILED !!!") + return False + self.logger.info("Monitoring Show Tests passed!!!") + + #Run negative tests and restart cases on monitoring functionality. + if not smgr_inventory_monitoring_tests.monitoring_functionality_tests(self): + self.logger.error("Monitoring Functionality Tests FAILED !!!") + return False + self.logger.info("Monitoring Functionality Tests passed!!!") + return True + #end test_monitoring_information diff --git a/scripts/svc_firewall/base.py b/scripts/svc_firewall/base.py deleted file mode 100644 index 10f28c17a..000000000 --- a/scripts/svc_firewall/base.py +++ /dev/null @@ -1,51 +0,0 @@ -import test -from common.connections import ContrailConnections -from common import isolated_creds -from common import create_public_vn - -class BaseSvc_FwTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BaseSvc_FwTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib -# cls.logger= cls.inputs.logger - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.analytics_obj=cls.connections.analytics_obj - cls.public_vn_obj = create_public_vn.PublicVn( - cls.__name__, - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.public_vn_obj.configure_control_nodes() - #end setUpClass - - @classmethod - def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() - super(BaseSvc_FwTest, cls).tearDownClass() - #end tearDownClass - - def remove_from_cleanups(self, fix): - for cleanup in self._cleanups: - if fix.cleanUp in cleanup: - self._cleanups.remove(cleanup) - break - #end remove_from_cleanups - - - diff --git a/scripts/svc_firewall/test_svc_fw.py b/scripts/svc_firewall/test_svc_fw.py index 0521a9883..d1bc69a20f 100644 --- a/scripts/svc_firewall/test_svc_fw.py +++ b/scripts/svc_firewall/test_svc_fw.py @@ -23,15 +23,25 @@ def runTest(self): pass # end runTest - @test.attr(type=['ci_sanity_WIP', 'sanity', 'quick_sanity']) + @test.attr(type=['sanity']) @preposttest_wrapper def test_svc_in_network_datapath(self): - return self.verify_svc_in_network_datapath(svc_img_name='tiny_nat_fw', ci=True) + return self.verify_svc_in_network_datapath(svc_mode='in-network', ci=True) + + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_svc_v2_in_network_datapath(self): + return self.verify_svc_in_network_datapath(svc_mode='in-network', st_version=2) + + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_svc_v2_transparent_datapath(self): + return self.verify_svc_transparent_datapath(svc_mode='transparent', st_version=2) @test.attr(type=['ci_sanity_WIP', 'sanity', 'quick_sanity']) @preposttest_wrapper def test_svc_monitor_datapath(self): - return self.verify_svc_transparent_datapath(svc_img_name='tiny_trans_fw', ci=True) + return self.verify_svc_transparent_datapath(svc_mode='transparent', ci=True) @test.attr(type=['sanity']) @preposttest_wrapper @@ -41,36 +51,37 @@ def test_svc_transparent_with_3_instance(self): @test.attr(type=['sanity']) @preposttest_wrapper def test_svc_in_network_nat_private_to_public(self): - if (('MX_GW_TEST' in os.environ) and (os.environ.get('MX_GW_TEST') == '1')): - public_vn_fixture = self.public_vn_obj.public_vn_fixture - public_vn_subnet = self.public_vn_obj.public_vn_fixture.vn_subnets[ - 0]['cidr'] - # Since the ping is across projects, enabling allow_all in the SG - self.project.set_sec_group_for_allow_all( - self.inputs.project_name, 'default') - self.verify_svc_in_network_datapath( - svc_mode='in-network-nat', vn2_fixture=public_vn_fixture, vn2_subnets=[public_vn_subnet]) - self.logger.info('Ping to outside world from left VM') - svms = self.get_svms_in_si( - self.si_fixtures[0], self.inputs.project_name) - svm_name = svms[0].name - host = self.get_svm_compute(svm_name) - tapintf = self.get_svm_tapintf_of_vn(svm_name, self.vn1_fixture) - self.start_tcpdump_on_intf(host, tapintf) - assert self.vm1_fixture.ping_with_certainty('8.8.8.8', count='10') - out = self.stop_tcpdump_on_intf(host, tapintf) - print out - if '8.8.8.8' in out: - self.logger.info( - 'Ping to 8.8.8.8 is going thru %s ' % svm_name) - else: - result = False - assert result, 'Ping to 8.8.8.8 not going thru the SI' - else: + if ('MX_GW_TEST' not in os.environ) or (('MX_GW_TEST' in os.environ) and (os.environ.get('MX_GW_TEST') != '1')): self.logger.info( - "MX_GW_TEST is not set") + "Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test") raise self.skipTest( - "Env variable MX_GW_TEST not set. Skipping the test") + "Skipping Test. Env variable MX_GW_TEST is not set. Skipping the test") + return True + + public_vn_fixture = self.public_vn_obj.public_vn_fixture + public_vn_subnet = self.public_vn_obj.public_vn_fixture.vn_subnets[ + 0]['cidr'] + # Since the ping is across projects, enabling allow_all in the SG + self.project.set_sec_group_for_allow_all( + self.inputs.project_name, 'default') + self.verify_svc_in_network_datapath( + svc_mode='in-network-nat', vn2_fixture=public_vn_fixture, vn2_subnets=[public_vn_subnet]) + self.logger.info('Ping to outside world from left VM') + svms = self.get_svms_in_si( + self.si_fixtures[0], self.inputs.project_name) + svm_name = svms[0].name + host = self.get_svm_compute(svm_name) + tapintf = self.get_svm_tapintf_of_vn(svm_name, self.vn1_fixture) + self.start_tcpdump_on_intf(host, tapintf) + assert self.vm1_fixture.ping_with_certainty('8.8.8.8', count='10') + out = self.stop_tcpdump_on_intf(host, tapintf) + print out + if '8.8.8.8' in out: + self.logger.info( + 'Ping to 8.8.8.8 is going thru %s ' % svm_name) + else: + result = False + assert result, 'Ping to 8.8.8.8 not going thru the SI' return True diff --git a/scripts/svc_mirror/base.py b/scripts/svc_mirror/base.py index 343136fdf..20337049c 100644 --- a/scripts/svc_mirror/base.py +++ b/scripts/svc_mirror/base.py @@ -1,32 +1,23 @@ -import test +import test_v1 from common.connections import ContrailConnections from common import isolated_creds -class BaseMirrorTest(test.BaseTestCase): +class BaseMirrorTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseMirrorTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib cls.agent_inspect= cls.connections.agent_inspect cls.cn_inspect= cls.connections.cn_inspect cls.analytics_obj=cls.connections.analytics_obj + cls.orch=cls.connections.orch #end setUpClass @classmethod def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseMirrorTest, cls).tearDownClass() #end tearDownClass diff --git a/scripts/upgrade/upgrade_only.py b/scripts/upgrade/upgrade_only.py index 9b62b9645..1efcb210c 100644 --- a/scripts/upgrade/upgrade_only.py +++ b/scripts/upgrade/upgrade_only.py @@ -21,7 +21,7 @@ def setUp(self): self.ini_file = os.environ.get('PARAMS_FILE') else: self.ini_file = 'params.ini' - self.inputs = self.useFixture(ContrailTestInit(self.ini_file)) + self.inputs = ContrailTestInit(self.ini_file) self.connections = ContrailConnections(self.inputs) self.agent_inspect = self.connections.agent_inspect self.quantum_h = self.connections.quantum_h diff --git a/scripts/vdns/__init__.py b/scripts/vdns/__init__.py deleted file mode 100644 index 85049705d..000000000 --- a/scripts/vdns/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""VDNS tests.""" diff --git a/scripts/vdns/base.py b/scripts/vdns/base.py index 064d03f4d..73dc0c504 100644 --- a/scripts/vdns/base.py +++ b/scripts/vdns/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 from common.connections import ContrailConnections from common import isolated_creds from random import randint @@ -27,19 +27,11 @@ from vnc_api.gen.resource_test import * from tcutils.wrappers import preposttest_wrapper -class BasevDNSTest(test.BaseTestCase): +class BasevDNSTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BasevDNSTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -53,8 +45,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BasevDNSTest, cls).tearDownClass() #end tearDownClass @@ -140,7 +130,7 @@ def verify_dns_record_order(self, record_order, test_type='test_record_order', r 'Sleep for 180sec to sync vdns server with vdns record entry') sleep(180) # Verify NS look up works for some random records values - self.logger.info('****NSLook up verification****') + self.logger.info('%%%% NSLook up verification %%%%') import re for rec in verify_rec_name_list: cmd = 'nslookup ' + rec @@ -189,6 +179,7 @@ def verify_dns_record_order(self, record_order, test_type='test_record_order', r dns_record = m_obj.group(1).split(':') dns_record_ip = dns_record[1].lstrip() next_ip = self.next_ip_in_list(rec_ip_list, dns_record_ip) + round_robin_success_count = 0 for rec in rec_ip_list: vm_fixture.run_cmd_on_vm(cmds=[cmd]) result = vm_fixture.return_output_cmd_dict[cmd] @@ -201,10 +192,20 @@ def verify_dns_record_order(self, record_order, test_type='test_record_order', r dns_record = m_obj.group(1).split(':') dns_record_ip1 = dns_record[1].lstrip() if record_order == 'round-robin': - if next_ip != dns_record_ip1: - print "\n VDNS records are not sent in round-robin order" + if next_ip == dns_record_ip1: + round_robin_success_count += 1 + else: + round_robin_success_count = 0 + if round_robin_success_count == 3: + self.logger.debug("Consecutive 3 outputs are in round robin fashion") + self.logger.debug("This should be enough to confirm round robin behavior") + break + if rec == rec_ip_list[-1] and round_robin_success_count < 3: + print "\n VDNS records are not sent in \ + round-robin order" self.assertTrue( - False, 'VDNS records are not sent in round-robin order') + False, + 'VDNS records are not sent in round-robin order') next_ip = self.next_ip_in_list(rec_ip_list, dns_record_ip1) if record_order == 'random': if dns_record_ip1 not in rec_ip_list: @@ -281,9 +282,11 @@ def vdns_with_cn_dns_agent_restart(self, restart_process): # Frame the Expected DNS data for VM, one for 'A' record and # another 'PTR' record. rec_name = vm_name + "." + domain_name + agent_inspect_h = self.agent_inspect[vm_fixture[vm_name].vm_node_ip] + assigned_dns_ips = agent_inspect_h.get_vna_discovered_dns_server() vm_dns_exp_data[vm_name] = [{'rec_data': vm_ip, 'rec_type': 'A', 'rec_class': 'IN', 'rec_ttl': str( ttl), 'rec_name': rec_name, 'installed': 'yes', 'zone': domain_name}, {'rec_data': rec_name, 'rec_type': 'PTR', 'rec_class': 'IN', 'rec_ttl': str(ttl), 'rec_name': vm_rev_ip, 'installed': 'yes', 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data[vm_name]) + self.verify_vm_dns_data(vm_dns_exp_data[vm_name], assigned_dns_ips[0]) # ping between two vms which are in same subnets by using name. self.assertTrue(vm_fixture['vm1-test'] .ping_with_certainty(ip=vm_list[1])) @@ -367,7 +370,9 @@ def vdns_with_cn_dns_agent_restart(self, restart_process): vm_name) self.assertTrue(vm_fixture[vm_name] .ping_with_certainty(ip=vm_name), msg) - self.verify_vm_dns_data(vm_dns_exp_data[vm_name]) + agent_inspect_h = self.agent_inspect[vm_fixture[vm_name].vm_node_ip] + assigned_dns_ips = agent_inspect_h.get_vna_discovered_dns_server() + self.verify_vm_dns_data(vm_dns_exp_data[vm_name], assigned_dns_ips[0]) return True # end test_vdns_controlnode_switchover @@ -395,10 +400,10 @@ def verify_ns_lookup_data(self, vm_fix, cmd, expectd_data): return False return True - def verify_vm_dns_data(self, vm_dns_exp_data): + def verify_vm_dns_data(self, vm_dns_exp_data, dns_server_ip): self.logger.info("Inside verify_vm_dns_data") result = True - dnsinspect_h = self.dnsagent_inspect[self.inputs.bgp_ips[0]] + dnsinspect_h = self.dnsagent_inspect[dns_server_ip] dns_data = dnsinspect_h.get_dnsa_config() vm_dns_act_data = [] msg = '' diff --git a/scripts/vdns/test_vdns.py b/scripts/vdns/test_vdns.py index 2a94ec668..83c8ec5fa 100755 --- a/scripts/vdns/test_vdns.py +++ b/scripts/vdns/test_vdns.py @@ -14,10 +14,6 @@ from policy_test import * from multiple_vn_vm_test import * from tcutils.wrappers import preposttest_wrapper -from tcutils.pkgs.Traffic.traffic.core.stream import Stream -from tcutils.pkgs.Traffic.traffic.core.profile import create, ContinuousProfile -from tcutils.pkgs.Traffic.traffic.core.helpers import Host -from tcutils.pkgs.Traffic.traffic.core.helpers import Sender, Receiver from tcutils.util import skip_because from base import BasevDNSTest from common import isolated_creds @@ -32,6 +28,14 @@ from ipam_test import IPAMFixture from vn_test import VNFixture import test +from tcutils.tcpdump_utils import * + +sys.path.append(os.path.realpath('tcutils/traffic_utils')) +from base_traffic import * +sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) +from traffic.core.stream import Stream +from traffic.core.helpers import Host, Sender, Receiver +from traffic.core.profile import StandardProfile,ContinuousProfile class TestvDNS0(BasevDNSTest): @@ -43,115 +47,6 @@ def runTest(self): pass #end runTest - # This Test test vdns functionality-- On VM launch agent should dynamically update dns records to dns agent. - # This test verifies the same functionality and should able to refer VM by - # a name. - @test.attr(type=['sanity', 'ci_sanity', 'vcenter']) - @preposttest_wrapper - def test_vdns_ping_same_vn(self): - ''' - Test:- Test vdns functionality. On VM launch agent should dynamically update dns records to dns agent - 1. Create vDNS server - 2. Create IPAM using above vDNS data - 3. Create VN using above IPAM and launch 2 VM's within it - 4. Ping between these 2 VM's using dns name - 5. Try to delete vDNS server which has IPAM back-reference[Negative case] - 6. Add CNAME VDNS record for vm1-test and verify we able to ping by alias name - Pass criteria: Step 4,5 and 6 should pass - - Maintainer: cf-test@juniper.net - ''' - vn1_ip = '10.10.10.0/24' - vm_list = ['vm1-test', 'vm2-test'] - vn_name = 'vn1-vdns' - dns_server_name = 'vdns1' - domain_name = 'juniper.net' - cname_rec = 'vm1-test-alias' - ttl = 100 - ipam_name = 'ipam1' - rev_zone = vn1_ip.split('.') - rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) - rev_zone = rev_zone + '.in-addr.arpa' - proj_fixt = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random', reverse_resolution=True) - # Create VDNS server object. - vdns_fixt1 = self.useFixture(VdnsFixture( - self.inputs, self.connections, vdns_name=dns_server_name, dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - # Associate VDNS with IPAM. - ipam_fixt1 = self.useFixture(IPAMFixture(ipam_name, vdns_obj= vdns_fixt1.obj, project_obj=proj_fixt, ipamtype=ipam_mgmt_obj)) - vn_fixt = self.useFixture( - VNFixture( - self.connections, self.inputs, - vn_name=vn_name, subnets=[vn1_ip], ipam_fq_name=ipam_fixt1.fq_name, option='contrail')) - vm_fixture = {} - # Launch VM with VN Created above. This test verifies on launch of VM agent should updated DNS 'A' and 'PTR' records - # The following code will verify the same. Also, we should be able ping - # with VM name. - for vm_name in vm_list: - vn_quantum_obj = self.orch.get_vn_obj_if_present( - vn_name=vn_fixt.vn_name, project_id=proj_fixt.uuid) - vm_fixture[vm_name] = self.useFixture( - VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn_quantum_obj, vm_name=vm_name)) - vm_fixture[vm_name].verify_vm_launched() - vm_fixture[vm_name].verify_on_setup() - vm_fixture[vm_name].wait_till_vm_is_up() - vm_ip = vm_fixture[vm_name].get_vm_ip_from_vm( - vn_fq_name=vm_fixture[vm_name].vn_fq_name) - vm_rev_ip = vm_ip.split('.') - vm_rev_ip = '.'.join( - (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) - vm_rev_ip = vm_rev_ip + '.in-addr.arpa' - msg = "Ping by using name %s is failed. Dns server should resolve VM name to IP" % ( - vm_name) - self.assertTrue(vm_fixture[vm_name] - .ping_with_certainty(ip=vm_name), msg) - # Frame the Expected DNS data for VM, one for 'A' record and - # another 'PTR' record. - rec_name = vm_name + "." + domain_name - vm_dns_exp_data = [{'rec_data': vm_ip, 'rec_type': 'A', 'rec_class': 'IN', 'rec_ttl': str( - ttl), 'rec_name': rec_name, 'installed': 'yes', 'zone': domain_name}, {'rec_data': rec_name, 'rec_type': 'PTR', 'rec_class': 'IN', 'rec_ttl': str(ttl), 'rec_name': vm_rev_ip, 'installed': 'yes', 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data) - vm_dns_exp_data = [] - # ping between two vms which are in same subnets by using name. - self.assertTrue(vm_fixture['vm1-test'] - .ping_with_certainty(ip=vm_list[1])) - # delete VDNS with ipam as back refrence. - self.logger.info( - "Try deleting the VDNS entry %s with back ref of ipam.", dns_server_name) - try: - self.vnc_lib.virtual_DNS_delete( - fq_name=vdns_fixt1.obj.get_fq_name()) - errmsg = "VDNS entry deleted which is not expected, when it has back refrence of ipam." - self.logger.error(errmsg) - assert False, errmsg - except Exception, msg: - self.logger.info(msg) - self.logger.info( - "Deletion of the vdns entry failed with back ref of ipam as expected") - # Add VDNS record 'CNAME' and add it to VDNS and ping with alias for - # vm1-test - self.logger.info( - 'Add CNAME VDNS record for vm1-test and verify we able to ping by alias name') - vdns_rec_data = VirtualDnsRecordType( - cname_rec, 'CNAME', 'IN', 'vm1-test', ttl) - vdns_rec_fix = self.useFixture(VdnsRecordFixture( - self.inputs, self.connections, 'test-rec', vdns_fixt1.get_fq_name(), vdns_rec_data)) - result, msg = vdns_rec_fix.verify_on_setup() - self.assertTrue(result, msg) - self.assertTrue(vm_fixture['vm1-test'] - .ping_with_certainty(ip=cname_rec)) - return True - # end test_vdns_ping_same_vn - @preposttest_wrapper def test_vdns_ping_diff_vn(self): '''This Test tests vdns functionality-- test vms on different subnets and we should able to refer each by name. @@ -180,7 +75,8 @@ def test_vdns_ping_diff_vn(self): rev_zone = rev_zone + '.in-addr.arpa' policy_name = 'policy1' project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) + project_connections = project_fixture.get_project_connections() dns_data = VirtualDnsType( domain_name=domain_name, dynamic_records_from_client=True, default_ttl_seconds=ttl, record_order='random', reverse_resolution=True) @@ -229,10 +125,12 @@ def test_vdns_ping_diff_vn(self): vm_rev_ip = vm_rev_ip + '.in-addr.arpa' # Frame the Expected DNS data for VM, one for 'A' record and # another 'PTR' record. + agent_inspect_h = self.agent_inspect[vm_fixture[vm_name].vm_node_ip] + assigned_dns_ips = agent_inspect_h.get_vna_discovered_dns_server() rec_name = vm_name + "." + domain_name vm_dns_exp_data = [{'rec_data': vm_ip, 'rec_type': 'A', 'rec_class': 'IN', 'rec_ttl': str( ttl), 'rec_name': rec_name, 'installed': 'yes', 'zone': domain_name}, {'rec_data': rec_name, 'rec_type': 'PTR', 'rec_class': 'IN', 'rec_ttl': str(ttl), 'rec_name': vm_rev_ip, 'installed': 'yes', 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data) + self.verify_vm_dns_data(vm_dns_exp_data, assigned_dns_ips[0]) vm_dns_exp_data = [] # for test add = 'Address:.*' + vm_ip @@ -280,15 +178,25 @@ def test_vdns_ping_diff_vn(self): continue else: self.assertTrue(False, 'Failed to Modify TTL values') - vm_fixture['vm1-test'].run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture['vm1-test'].return_output_cmd_dict[cmd] - result = result.replace("\t", " ") - #m_obj = re.search(r"rec1.juniper.net\.*\s*([0-9.]*)",result) - m_obj = re.search( - r"rec1.juniper.net\.*\s*([0-9.]*)\s*IN\s*A\s*([0-9.]*)", result) - if not m_obj: - self.assertTrue( - False, 'record search is failed,please check syntax of regular expression') + vm_updated = False + for i in range(0,4): + vm_fixture['vm1-test'].run_cmd_on_vm(cmds=[cmd]) + result = vm_fixture['vm1-test'].return_output_cmd_dict[cmd] + if result: + result = result.replace("\t", " ") + m_obj = re.search(r"rec1.juniper.net\.*\s*([0-9.]*)\s*IN\s*A\s*([0-9.]*)", + result) + if not m_obj: + self.assertTrue(False, + 'record search is failed,please check syntax of regular expression') + if int(m_obj.group(1)) != ttl_mod: + sleep(1) + else: + vm_updated = True + break + else: + slepp(1) + assert vm_updated, "Record not updated on VM " print ("\nTTL VALUE is %s ", m_obj.group(1)) print ("\nrecord ip address is %s", m_obj.group(2)) self.assertEqual(int(m_obj.group(1)), ttl_mod, @@ -350,7 +258,8 @@ def test_vdns_with_next_vdns(self): vm_list[1]: vm2_ping_list, vm_list[2]: vm3_ping_list} project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) + project_connections = project_fixture.get_project_connections() dns_server_name_list = ['vdns1', 'vdns2', 'vdns3'] domain_name_list = {'vdns1': 'juniper.net', 'vdns2': 'bng.juniper.net', 'vdns3': 'eng.juniper.net'} @@ -542,7 +451,8 @@ def test_vdns_with_fip(self): ipam_name = 'ipam1' fip_pool_name1 = 'some-pool1' project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) + project_connections = project_fixture.get_project_connections() # VDNS dns_data = VirtualDnsType( domain_name=domain_name, dynamic_records_from_client=True, @@ -647,16 +557,15 @@ def test_vdns_with_diff_projs(self): connections=self.connections, username=proj_user[proj], password=proj_pass[proj])) project_fixture = self.useFixture( ProjectFixture( - project_name=proj, vnc_lib_h=self.vnc_lib, username=proj_user[ - proj], - password=proj_pass[proj], connections=admin_con)) + project_name=proj, username=self.inputs.admin_username, + password=self.inputs.admin_password, connections=admin_con)) user_fixture.add_user_to_tenant(proj, proj_user[proj] , 'admin') + project_fixture.set_user_creds(proj_user[proj], proj_pass[proj]) project_inputs = ContrailTestInit( - self.ini_file, stack_user=project_fixture.username, - stack_password=project_fixture.password, project_fq_name=['default-domain', proj], logger=self.logger) + self.ini_file, stack_user=project_fixture.project_username, + stack_password=project_fixture.project_user_password, + stack_tenant=proj, logger=self.logger) project_connections = ContrailConnections(project_inputs, logger= self.logger) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=proj)) self.logger.info( 'Default SG to be edited for allow all on project: %s' % proj) project_fixture.set_sec_group_for_allow_all(proj, 'default') @@ -665,9 +574,8 @@ def test_vdns_with_diff_projs(self): proj], inputs=project_inputs, connections=project_connections, rules_list=rules[proj])) # Ipam creation ipam_fixt[proj] = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, virtual_DNS_refs=[vdns_fixt1.obj], - parent_fixt=proj_fixt, network_ipam_name=ipam_list[proj], network_ipam_mgmt=ipam_mgmt_obj)) + IPAMFixture(ipam_list[proj], vdns_obj=vdns_fixt1.obj, \ + project_obj=project_fixture, ipamtype=ipam_mgmt_obj)) # VN Creation vn_fixt[proj] = self.useFixture( VNFixture(project_name=proj, connections=project_connections, @@ -696,10 +604,12 @@ def test_vdns_with_diff_projs(self): rev_zone = rev_zone + '.in-addr.arpa' # Frame the Expected DNS data for VM, one for 'A' record and # another 'PTR' record. + agent_inspect_h = self.agent_inspect[vm_fix[proj].vm_node_ip] + assigned_dns_ips = agent_inspect_h.get_vna_discovered_dns_server() rec_name = vm_list[proj] + "." + domain_name vm_dns_exp_data = [{'rec_data': vm_ip, 'rec_type': 'A', 'rec_class': 'IN', 'rec_ttl': str( ttl), 'rec_name': rec_name, 'installed': 'yes', 'zone': domain_name}, {'rec_data': rec_name, 'rec_type': 'PTR', 'rec_class': 'IN', 'rec_ttl': str(ttl), 'rec_name': vm_rev_ip, 'installed': 'yes', 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data) + self.verify_vm_dns_data(vm_dns_exp_data, assigned_dns_ips[0]) vm_dns_exp_data = [] # ping between two vms which are in different subnets by using name. self.assertTrue(vm_fix['project1'].ping_with_certainty( @@ -731,7 +641,8 @@ def test_vdns_default_mode(self): fip_subnets = [self.inputs.fip_pool] project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) + project_connections = project_fixture.get_project_connections() # VN Creation fvn_fixture = self.useFixture( VNFixture( @@ -800,6 +711,75 @@ def test_vdns_default_mode(self): print m_obj1.group(1) return True + @preposttest_wrapper + def test_agent_crash_dns_malformed_received(self): + '''Verify that Agent do not crash on sending a malformed DNS packet. + This Test case specifically test following Bug + Bug Id 1566067 : "Agent crash at BindUtil::DnsClass" + Steps: + 1. Create a VN with IPAM having Virtual DNS configured. + 2. Create a VM and send a DNS query from VM to DNS server. DNS server should + have the Qclass field as any value other than "01" + 3. Verify that no crash happens when this malformed DNS packet reaches the server + Pass criteria: Vrouter agent should not crash on receiving a malformed DNS packet + Maintainer: pulkitt@juniper.net''' + vm_list = ['vm1', 'vm2'] + vn_name = 'vn1' + vn_nets = {'vn1' : '10.10.10.0/24'} + dns_server_name = 'vdns1' + domain_name = 'juniper.net' + ttl = 100 + ipam_name = 'ipam1' + dns_data = VirtualDnsType( + domain_name=domain_name, dynamic_records_from_client=True, + default_ttl_seconds=ttl, record_order='random', reverse_resolution=True) + vdns_fixt1 = self.useFixture(VdnsFixture(self.inputs, self.connections, + vdns_name=dns_server_name, dns_data=dns_data)) + result, msg = vdns_fixt1.verify_on_setup() + self.assertTrue(result, msg) + dns_server = IpamDnsAddressType( + virtual_dns_server_name=vdns_fixt1.vdns_fq_name) + ipam_mgmt_obj = IpamType( + ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) + # Associate IPAM with VDNS server Object + ipam_fixt1 = self.useFixture(IPAMFixture(ipam_name, vdns_obj=vdns_fixt1.obj, + project_obj=self.project, ipamtype=ipam_mgmt_obj)) + # Launch VM with VN Created above. + vn_fixt = self.useFixture(VNFixture(self.connections, self.inputs, vn_name=vn_name, + subnets=[vn_nets['vn1']], ipam_fq_name=ipam_fixt1.fq_name, option='contrail')) + vm_fixture1 = self.useFixture(VMFixture(project_name=self.inputs.project_name, + connections=self.connections, vn_obj=vn_fixt.obj, vm_name=vm_list[0], + image_name = "ubuntu-traffic")) + assert vm_fixture1.verify_vm_launched() + assert vm_fixture1.verify_on_setup() + assert vm_fixture1.wait_till_vm_is_up() + # DNS payload with 1 query and qclass as "04" instead of "01" + filters = '\'(src host %s and dst host %s and port 1234)\'' \ + % (vm_fixture1.vm_ip,vn_fixt.get_dns_ip(ipam_fq_name = ipam_fixt1.fq_name)) + session, pcap = start_tcpdump_for_vm_intf(self, vm_fixture1, vn_fixt.vn_fq_name, filters = filters) + dnsPayload = '\x12\x34\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x04' + streamObj = Stream(protocol="ip", sport=1234, dport=53, proto='udp', src=vm_fixture1.vm_ip, + dst=vn_fixt.get_dns_ip(ipam_fq_name =ipam_fixt1.fq_name)) + profile_kwargs = {'stream': streamObj, 'count' : 10, 'payload': dnsPayload} + profileObj = StandardProfile(**profile_kwargs) + tx_vm_node_ip = vm_fixture1.vm_node_ip + send_node = Host( + tx_vm_node_ip, + self.inputs.host_data[tx_vm_node_ip]['username'], + self.inputs.host_data[tx_vm_node_ip]['password']) + send_host = Host(vm_fixture1.local_ip, + vm_fixture1.vm_username, vm_fixture1.vm_password) + sender = Sender("senddns", profileObj, send_node, send_host, self.inputs.logger) + sender.start() + sleep(1) + sender.poll() + if not sender.sent: + self.logger.error("Failed to Transmit packet") + assert False, "Failed to Transmit packet" + sender.stop() + stop_tcpdump_for_vm_intf(self, session, pcap) + assert verify_tcpdump_count(self, session, pcap) + class TestvDNS1(BasevDNSTest): @classmethod @@ -851,7 +831,8 @@ def test_vdns_tree_scaling(self): ttl = 1000 project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) + project_connections = project_fixture.get_project_connections() dns_server_name_list = [ 'vdns501', 'vdns502', 'vdns503', 'vdns504', 'vdns505', 'vdns506', 'vdns507', 'vdns508', 'vdns509', 'vdns510', 'vdns511', 'vdns512', 'vdns513', 'vdns514', 'vdns515', 'vdns516'] @@ -994,7 +975,8 @@ def test_vdns_server_scaling(self): # Number of records per server record_num = 1 project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) + project_connections = project_fixture.get_project_connections() vdns_fixt = {} vdns_verify = [] i = 1 diff --git a/scripts/vdns/vdns_tests.py b/scripts/vdns/vdns_tests.py deleted file mode 100755 index 57f3db98f..000000000 --- a/scripts/vdns/vdns_tests.py +++ /dev/null @@ -1,2432 +0,0 @@ -# Need to import path to test/fixtures and test/scripts/ -# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/' -# -# To run tests, you can do 'python -m testtools.run vdns_tests'. -# To run specific tests, You can do 'python -m testtools.run -l vdns_tests' -# Set the env variable PARAMS_FILE to point to your ini file. -# Else it will try to pick params.ini in PWD -# -import os -import unittest -import fixtures -import testtools -import traceback - -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from quantum_test import * -from vnc_api_test import * -from vm_test import * -from common.connections import ContrailConnections -from contrail_fixtures import * -from tcutils.wrappers import preposttest_wrapper -from vnc_api import vnc_api -from vnc_api.gen.resource_test import * -from vdns_fixture import * -from floating_ip import * -from policy_test import * -from control_node import * -from user_test import UserFixture - - -class TestVdnsFixture(testtools.TestCase, VdnsFixture): - - # @classmethod - - def setUp(self): - super(TestVdnsFixture, self).setUp() - if 'PARAMS_FILE' in os.environ: - self.ini_file = os.environ.get('PARAMS_FILE') - else: - self.ini_file = 'params.ini' - self.inputs = self.useFixture(ContrailTestInit(self.ini_file)) - self.connections = ContrailConnections(self.inputs) - self.quantum_h = self.connections.quantum_h - self.nova_h = self.connections.nova_h - self.vnc_lib = self.connections.vnc_lib - self.logger = self.inputs.logger - self.agent_inspect = self.connections.agent_inspect - self.dnsagent_inspect = self.connections.dnsagent_inspect - self.cn_inspect = self.connections.cn_inspect - self.project_fq_name = None - self.api_s_inspect = self.connections.api_server_inspect - self.analytics_obj = self.connections.analytics_obj - - # end setUpClass - - def cleanUp(self): - super(TestVdnsFixture, self).cleanUp() - # end cleanUp - - def runTest(self): - pass - # end runTest - - # This Test test vdns functionality-- On VM launch agent should dynamically - # update dns records to dns agent. - # This test verifies the same functionality and should able to refer VM by - # a name. - @preposttest_wrapper - def test_vdns_ping_same_vn(self): - ''' - Test:- Test vdns functionality. On VM launch agent should dynamically - update dns records to dns agent - 1. Create vDNS server - 2. Create IPAM using above vDNS data - 3. Create VN using above IPAM and launch 2 VM's within it - 4. Ping between these 2 VM's using dns name - 5. Try to delete vDNS server which has IPAM back-ref[Negative case] - 6. Add CNAME VDNS record for vm1-test and - verify we able to ping by alias name - Pass criteria: Step 4,5 and 6 should pass - - Maintainer: cf-test@juniper.net - ''' - vn1_ip = '10.10.10.1' - vm_list = ['vm1-test', 'vm2-test'] - vn_name = 'vn1-vdns' - dns_server_name = 'vdns1' - domain_name = 'juniper.net' - cname_rec = 'vm1-test-alias' - ttl = 100 - ipam_name = 'ipam1' - rev_zone = vn1_ip.split('.') - rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) - rev_zone = rev_zone + '.in-addr.arpa' - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - # Create VDNS server object. - vdns_fixt1 = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_server_name, - dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - # Associate VDNS with IPAM. - ipam_fixt1 = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt1.obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_name, - network_ipam_mgmt=ipam_mgmt_obj)) - vn_nets = { - 'vn1-vdns': [(ipam_fixt1.getObj(), VnSubnetsType( - [IpamSubnetType(subnet=SubnetType(vn1_ip, 24))]))], - } - # Launch VN with IPAM - vn_fixt = self.useFixture( - VirtualNetworkTestFixtureGen( - self.vnc_lib, - virtual_network_name=vn_name, - network_ipam_ref_infos=vn_nets[vn_name], - parent_fixt=proj_fixt, - id_perms=IdPermsType( - enable=True))) - vm_fixture = {} - # Launch VM with VN Created above. This test verifies on - # launch of VM agent should updated DNS 'A' and 'PTR' records - # The following code will verify the same. Also, we should be able ping - # with VM name. - for vm_name in vm_list: - vn_quantum_obj = self.quantum_h.get_vn_obj_if_present( - vn_fixt._name) - vm_fixture[vm_name] = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_quantum_obj, - vm_name=vm_name)) - vm_fixture[vm_name].verify_vm_launched() - vm_fixture[vm_name].verify_on_setup() - vm_fixture[vm_name].wait_till_vm_is_up() - vm_ip = vm_fixture[vm_name].get_vm_ip_from_vm( - vn_fq_name=vm_fixture[vm_name].vn_fq_name) - vm_rev_ip = vm_ip.split('.') - vm_rev_ip = '.'.join( - (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) - vm_rev_ip = vm_rev_ip + '.in-addr.arpa' - msg = "Ping by using name %s is failed. Dns server \ - should resolve VM name to IP" % (vm_name) - self.assertTrue(vm_fixture[vm_name] - .ping_with_certainty(ip=vm_name), msg) - # Frame the Expected DNS data for VM, one for 'A' record and - # another 'PTR' record. - rec_name = vm_name + "." + domain_name - vm_dns_exp_data = [{'rec_data': vm_ip, - 'rec_type': 'A', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': rec_name, - 'installed': 'yes', - 'zone': domain_name}, - {'rec_data': rec_name, - 'rec_type': 'PTR', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': vm_rev_ip, - 'installed': 'yes', - 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data) - vm_dns_exp_data = [] - # ping between two vms which are in same subnets by using name. - self.assertTrue(vm_fixture['vm1-test'] - .ping_with_certainty(ip=vm_list[1])) - # delete VDNS with ipam as back refrence. - self.logger.info( - "Try deleting the VDNS entry %s with back ref of ipam.", - dns_server_name) - try: - self.vnc_lib.virtual_DNS_delete( - fq_name=vdns_fixt1.obj.get_fq_name()) - errmsg = 'VDNS entry deleted which is not expected, \ - when it has back refrence of ipam.' - self.logger.error(errmsg) - assert False, errmsg - except Exception as msg: - self.logger.info(msg) - self.logger.info('Deletion of the vdns entry failed ' - 'with back ref of ipam as expected') - # Add VDNS record 'CNAME' and add it to VDNS and ping with alias for - # vm1-test - self.logger.info('Add CNAME VDNS record for vm1-test and ' - 'verify we able to ping by alias name') - vdns_rec_data = VirtualDnsRecordType( - cname_rec, 'CNAME', 'IN', 'vm1-test', ttl) - vdns_rec_fix = self.useFixture( - VdnsRecordFixture( - self.inputs, - self.connections, - 'test-rec', - vdns_fixt1.vdns_fix, - vdns_rec_data)) - result, msg = vdns_rec_fix.verify_on_setup() - self.assertTrue(result, msg) - self.assertTrue(vm_fixture['vm1-test'] - .ping_with_certainty(ip=cname_rec)) - return True - # end test_vdns_ping_same_vn - - @preposttest_wrapper - def test_vdns_ping_diff_vn(self): - ''' This Test test vdns functionality-- test vms on different subnets - and we should able to refer each by name.We should be able to - ping each of vms by using name - ''' - vn1_ip = '10.10.10.0' - vn2_ip = '20.20.20.0' - vm_list = ['vm1-test', 'vm2-test'] - vn_list = ['vn1', 'vn2'] - vm_vn_list = {'vm1-test': 'vn1', 'vm2-test': 'vn2'} - dns_server_name = 'vdns1' - domain_name = 'juniper.net' - ttl = 100 - ipam_name = 'ipam1' - rev_zone = vn1_ip.split('.') - rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) - rev_zone = rev_zone + '.in-addr.arpa' - policy_name = 'policy1' - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - vdns_fixt1 = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_server_name, - dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - # Associate IPAM with VDNS server Object - ipam_fixt1 = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt1.obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_name, - network_ipam_mgmt=ipam_mgmt_obj)) - vn_nets = { - 'vn1': [(ipam_fixt1.getObj(), - VnSubnetsType([ - IpamSubnetType(subnet=SubnetType(vn1_ip, 24))]))], - 'vn2': [(ipam_fixt1.getObj(), - VnSubnetsType([ - IpamSubnetType(subnet=SubnetType(vn2_ip, 24))]))], - } - # create policy - rules = {} - rules[policy_name] = [ - PolicyRuleType( - direction='<>', protocol='icmp', dst_addresses=[ - AddressType( - virtual_network='any')], src_addresses=[ - AddressType( - virtual_network='local')], action_list=ActionListType( - simple_action='pass'), src_ports=[ - PortType( - -1, -1)], dst_ports=[ - PortType( - -1, -1)])] - policy_fixt = self.useFixture( - NetworkPolicyTestFixtureGen( - self.vnc_lib, - network_policy_name=policy_name, - parent_fixt=proj_fixt, - network_policy_entries=PolicyEntriesType( - rules[policy_name]))) - policy_ref = [ - (policy_fixt.getObj(), - VirtualNetworkPolicyType( - sequence=SequenceType( - major=0, - minor=0)))] - - vn_fixt = {} - vm_fixture = {} - # Launch VM with VN Created above. This test verifies - # on launch of VM agent should updated DNS 'A' and 'PTR' records - # The following code will verify the same. Also, we should be able ping - # with VM name. - for vm_name in vm_list: - vn = vm_vn_list[vm_name] - vn_fixt[vm_name] = self.useFixture( - VirtualNetworkTestFixtureGen( - self.vnc_lib, - virtual_network_name=vm_vn_list[vm_name], - network_ipam_ref_infos=vn_nets[vn], - parent_fixt=proj_fixt, - id_perms=IdPermsType( - enable=True), - network_policy_ref_infos=policy_ref)) - vn_quantum_obj = self.quantum_h.get_vn_obj_if_present(vn) - vm_fixture[vm_name] = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_quantum_obj, - vm_name=vm_name)) - vm_fixture[vm_name].verify_vm_launched() - vm_fixture[vm_name].verify_on_setup() - vm_fixture[vm_name].wait_till_vm_is_up() - msg = "Ping by using name %s is failed." % (vm_name) - msg += "Dns server should resolve VM name to IP" - self.assertTrue(vm_fixture[vm_name] - .ping_with_certainty(ip=vm_name), msg) - vm_ip = vm_fixture[vm_name].get_vm_ip_from_vm( - vn_fq_name=vm_fixture[vm_name].vn_fq_name) - vm_rev_ip = vm_ip.split('.') - vm_rev_ip = '.'.join( - (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) - vm_rev_ip = vm_rev_ip + '.in-addr.arpa' - # Frame the Expected DNS data for VM, one for 'A' record and - # another 'PTR' record. - rec_name = vm_name + "." + domain_name - vm_dns_exp_data = [{'rec_data': vm_ip, - 'rec_type': 'A', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': rec_name, - 'installed': 'yes', - 'zone': domain_name}, - {'rec_data': rec_name, - 'rec_type': 'PTR', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': vm_rev_ip, - 'installed': 'yes', - 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data) - vm_dns_exp_data = [] - # for test - add = 'Address:.*' + vm_ip - exp_data = vm_ip - cmd = 'nslookup ' + vm_name + '|' + ' grep ' + '\'' + add + '\'' - msg = 'nslookup failed for VM ' + vm_name - self.assertTrue( - self.verify_ns_lookup_data( - vm_fixture[vm_name], - cmd, - exp_data), - msg) - cmd = 'nslookup ' + vm_ip + '|' + ' grep ' + '\'' + vm_name + '\'' - exp_data = vm_name + '.' + domain_name - msg = 'reverse nslookup failed for VM ' + vm_name - self.assertTrue( - self.verify_ns_lookup_data( - vm_fixture[vm_name], - cmd, - exp_data), - msg) - # ping between two vms which are in different subnets by using name. - self.assertTrue(vm_fixture['vm1-test'] - .ping_with_certainty(ip=vm_list[1])) - # Add VDNS record and verify TTL value correctly - self.logger.info( - 'Add VDNS record and verify TTL value is set' - ' correctly using with dig command') - vdns_rec_data = VirtualDnsRecordType('rec1', 'A', 'IN', '1.1.1.1', ttl) - vdns_rec_fix = self.useFixture( - VdnsRecordFixture( - self.inputs, - self.connections, - 'test-rec', - vdns_fixt1.vdns_fix, - vdns_rec_data)) - result, msg = vdns_rec_fix.verify_on_setup() - self.assertTrue(result, msg) - cmd = 'dig +nocmd ' + 'rec1.' + domain_name + ' +noall +answer' - import re - vdns_record_obj = vdns_rec_fix.obj - ttl_list = [100, 2000, 0, 86400, 2147483647, -1, 2147483648] - i = 1 - # modify the record TTL and address values and verify - for ttl_mod in ttl_list: - ip_add = '1.1.1.' + str(i) - # Already configured TTL as a 100, so not configuring TTL value for - # first time - if ttl_mod != 100: - vdns_rec_data = VirtualDnsRecordType( - 'rec1', 'A', 'IN', ip_add, ttl_mod) - vdns_record_obj.set_virtual_DNS_record_data(vdns_rec_data) - try: - self.vnc_lib.virtual_DNS_record_update(vdns_record_obj) - except Exception as e: - if (ttl_mod == -1 or ttl_mod == 2147483648): - self.logger.info( - 'Failed to configure invalid TTL as expected') - continue - else: - self.assertTrue(False, 'Failed to Modify TTL values') - vm_fixture['vm1-test'].run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture['vm1-test'].return_output_cmd_dict[cmd] - result = result.replace("\t", " ") - m_obj = re.search( - r"rec1.juniper.net\.*\s*([0-9.]*)\s*IN\s*A\s*([0-9.]*)", - result) - if not m_obj: - self.assertTrue( - False, - 'record search is failed,please check ' - 'syntax of regular expression') - print ("\nTTL VALUE is %s ", m_obj.group(1)) - print ("\nrecord ip address is %s", m_obj.group(2)) - self.assertEqual(int(m_obj.group( - 1)), ttl_mod, 'TTL value is not matching for ' - 'static record after record modification') - self.assertEqual( - m_obj.group(2), - ip_add, - 'IP Address is not matching for static record ' - 'after record modification') - i = i + 1 - return True - # end of test_vdns_ping_diff_vn - - # This test creates 3 vnds servers vdns1,vdns2 and vdns3. - # For vdns2 and vdns3, vdns1 act a next vdns nerver. - # The VDNS server are configured as shown below. - # vdns1 (domain: juniper.net) - # ^ ^ - # / \ - # / \ - # (bng.juniper.net) vdns2 vdns3(eng.juniper.net) - # - # - @preposttest_wrapper - def test_vdns_with_next_vdns(self): - ''' This test creates 3 vnds servers vdns1,vdns2 and vdns3. - For vdns2 and vdns3, vdns1 act a next vdns nerver. - ''' - vn1_ip = '10.10.10.0' - vn2_ip = '20.20.20.0' - vn3_ip = '30.30.30.0' - vm_list = ['vm1-test', 'vm2-test', 'vm3-test'] - vm_vn_list = {'vm1-test': 'vn1', 'vm2-test': 'vn2', 'vm3-test': 'vn3'} - policy_name = 'policy1' - dns_server_name1 = 'vdns1' - dns_server_name2 = 'vdns2' - dns_server_name3 = 'vdns3' - domain_name1 = 'juniper.net' - domain_name2 = 'bng.juniper.net' - domain_name3 = 'eng.juniper.net' - ttl = 100 - vm1_ping_list = [vm_list[0] + "." + domain_name1, vm_list[1] - + "." + domain_name2, vm_list[2] + "." + domain_name3] - vm2_ping_list = [vm_list[1] + "." + domain_name2, - vm_list[0] + "." + domain_name1] - vm3_ping_list = [vm_list[2] + "." + domain_name3, - vm_list[0] + "." + domain_name1] - vm_domain_list = {vm_list[0]: vm1_ping_list, - vm_list[1]: vm2_ping_list, vm_list[2]: vm3_ping_list} - - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - dns_server_name_list = ['vdns1', 'vdns2', 'vdns3'] - domain_name_list = {'vdns1': 'juniper.net', 'vdns2': - 'bng.juniper.net', 'vdns3': 'eng.juniper.net'} - rec_names = {'vdns2': 'test-rec1', 'vdns3': 'test-rec2'} - ipam_dns_list = {'vdns1': 'ipam1', 'vdns2': 'ipam2', 'vdns3': 'ipam3'} - - vdns_fix = {} - vdns_data = {} - vdns_rec = {} - for dns_name in dns_server_name_list: - # VDNS1 - if dns_name == 'vdns1': - vdns_data[dns_name] = VirtualDnsType( - domain_name=domain_name_list[dns_name], - dynamic_records_from_client=True, - default_ttl_seconds=ttl, - record_order='random') - else: - # VDNS2 and VDNS3 need to point VDNS1 as next vdns server. - vdns_data[dns_name] = VirtualDnsType( - domain_name=domain_name_list[dns_name], - dynamic_records_from_client=True, - default_ttl_seconds=ttl, - record_order='random', - next_virtual_DNS=vdns_fix['vdns1'].vdns_fq_name) - vdns_fix[dns_name] = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_name, - dns_data=vdns_data[dns_name])) - result, msg = vdns_fix[dns_name].verify_on_setup() - self.assertTrue(result, msg) - - # Try to delete vdns entry which was referenced in other vdns entry, - # deletion should fail. - self.logger.info( - "Try deleting the VDNS entry %s with back ref.", dns_server_name1) - try: - self.vnc_lib.virtual_DNS_delete( - fq_name=vdns_fix[dns_server_name1].obj.get_fq_name()) - errmsg = "VDNS entry deleted which is not expected, " - errmsg += "when it is attached to a other vdns servers." - self.logger.error(errmsg) - assert False, errmsg - except Exception as msg: - self.logger.info(msg) - self.logger.info( - "Not able to delete the vdns entry with back ref as expected") - # In VDNS1 need to be added 'NS' records to delegate a subdomain to - # VDNS2 and VDNS3. - for dns_name in dns_server_name_list: - if dns_name != 'vdns1': - vdns_rec_data = VirtualDnsRecordType( - domain_name_list[dns_name], - 'NS', - 'IN', - vdns_fix[dns_name].vdns_fq_name, - ttl) - vdns_rec[dns_name] = self.useFixture( - VdnsRecordFixture( - self.inputs, - self.connections, - rec_names[dns_name], - vdns_fix['vdns1'].vdns_fix, - vdns_rec_data)) - result, msg = vdns_rec[dns_name].verify_on_setup() - self.assertTrue(result, msg) - - ipam_fixt = {} - # Create IPAM entrys with VDNS servers - for ipam in ipam_dns_list: - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fix[ipam].vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', - ipam_dns_server=dns_server) - ipam_fixt[ipam] = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fix[ipam].obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_dns_list[ipam], - network_ipam_mgmt=ipam_mgmt_obj)) - - rules = {} - rules[policy_name] = [ - PolicyRuleType( - direction='<>', protocol='icmp', dst_addresses=[ - AddressType( - virtual_network='any')], src_addresses=[ - AddressType( - virtual_network='any')], action_list=ActionListType( - simple_action='pass'), src_ports=[ - PortType( - -1, -1)], dst_ports=[ - PortType( - -1, -1)])] - policy_fixt = self.useFixture( - NetworkPolicyTestFixtureGen( - self.vnc_lib, - network_policy_name=policy_name, - parent_fixt=proj_fixt, - network_policy_entries=PolicyEntriesType( - rules[policy_name]))) - policy_ref = [ - (policy_fixt.getObj(), - VirtualNetworkPolicyType( - sequence=SequenceType( - major=0, - minor=0)))] - - vn_nets = { - 'vn1': [(ipam_fixt['vdns1'].getObj(), VnSubnetsType([ - IpamSubnetType(subnet=SubnetType(vn1_ip, 24))]))], - 'vn2': [(ipam_fixt['vdns2'].getObj(), VnSubnetsType([ - IpamSubnetType(subnet=SubnetType(vn2_ip, 24))]))], - 'vn3': [(ipam_fixt['vdns3'].getObj(), VnSubnetsType([ - IpamSubnetType(subnet=SubnetType(vn3_ip, 24))]))], - } - - vn_fixt = {} - vm_fixture = {} - # Launch VM with VN Created above. This test verifies - # on launch of VM agent should updated DNS 'A' and 'PTR' records - # The following code will verify the same. Also, we should be able ping - # with VM name. - for vm_name in vm_list: - vn = vm_vn_list[vm_name] - vn_fixt[vm_name] = self.useFixture( - VirtualNetworkTestFixtureGen( - self.vnc_lib, - virtual_network_name=vm_vn_list[vm_name], - network_ipam_ref_infos=vn_nets[vn], - parent_fixt=proj_fixt, - id_perms=IdPermsType( - enable=True), - network_policy_ref_infos=policy_ref)) - vn_quantum_obj = self.quantum_h.get_vn_obj_if_present(vn) - vm_fixture[vm_name] = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_quantum_obj, - vm_name=vm_name)) - vm_fixture[vm_name].verify_vm_launched() - vm_fixture[vm_name].verify_on_setup() - vm_fixture[vm_name].wait_till_vm_is_up() - - # Verify DNS entries are resolved for sub domains. - for vm_name in vm_list: - vm_ping_list = vm_domain_list[vm_name] - for cmd in vm_ping_list: - self.assertTrue(vm_fixture[vm_name] - .ping_with_certainty(ip=cmd)) - - # Try to delete vdns entry which was referenced in other vdns entry, - # deletion should fail. - self.logger.info( - "Try deleting the VDNS entry %s with back ref of vdns records.", - dns_server_name1) - try: - self.vnc_lib.virtual_DNS_delete( - fq_name=vdns_fix[dns_server_name1].obj.get_fq_name()) - errmsg = "VDNS entry deleted which is not " - errmsg += "expected, when it had vdns records." - self.logger.error(errmsg) - assert False, errmsg - except Exception as msg: - self.logger.info(msg) - self.logger.info( - 'Not able to delete the vdns entry' - 'with back ref of vdns records') - return True - - @preposttest_wrapper - def test_vdns_controlnode_switchover(self): - ''' This test test control node switchover functionality''' - restart_process = 'ControlNodeRestart' - self.vdns_with_cn_dns_agent_restart(restart_process) - return True - - @preposttest_wrapper - def test_vdns_dns_restart(self): - ''' This test test dns process restart functionality''' - restart_process = 'DnsRestart' - self.vdns_with_cn_dns_agent_restart(restart_process) - return True - - @preposttest_wrapper - def test_vdns_agent_restart(self): - '''This test tests agent process restart functionality''' - restart_process = 'AgentRestart' - self.vdns_with_cn_dns_agent_restart(restart_process) - return True - - @preposttest_wrapper - def test_vdns_named_restart(self): - '''This test tests named process restart functionality''' - restart_process = 'NamedRestart' - self.vdns_with_cn_dns_agent_restart(restart_process) - return True - - @preposttest_wrapper - def test_vdns_scp(self): - '''This test tests scp with VDNS functionality''' - restart_process = 'scp' - self.vdns_with_cn_dns_agent_restart(restart_process) - return True - - # This Test test vdns functionality with control node restart - def vdns_with_cn_dns_agent_restart(self, restart_process): - ''' - This test test the functionality of controlnode/dns/agent - restart with vdns feature. - ''' - if restart_process == 'ControlNodeRestart': - if len(set(self.inputs.bgp_ips)) < 2: - raise self.skipTest( - "Skipping Test. At least 2 control nodes required" - " to run the control node switchover test") - vn1_ip = '10.10.10.1' - vm_list = ['vm1-test', 'vm2-test'] - vn_name = 'vn1' - dns_server_name = 'vdns1' - domain_name = 'juniper.net' - ttl = 100 - ipam_name = 'ipam1' - rev_zone = vn1_ip.split('.') - rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) - rev_zone = rev_zone + '.in-addr.arpa' - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - # Create VDNS server object. - vdns_fixt1 = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_server_name, - dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - # Associate VDNS with IPAM. - ipam_fixt1 = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt1.obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_name, - network_ipam_mgmt=ipam_mgmt_obj)) - vn_nets = {'vn1': [(ipam_fixt1.getObj(), VnSubnetsType( - [IpamSubnetType(subnet=SubnetType(vn1_ip, 24))]))], } - # Launch VN with IPAM - vn_fixt = self.useFixture( - VirtualNetworkTestFixtureGen( - self.vnc_lib, - virtual_network_name=vn_name, - network_ipam_ref_infos=vn_nets[vn_name], - parent_fixt=proj_fixt, - id_perms=IdPermsType( - enable=True))) - vm_fixture = {} - vm_dns_exp_data = {} - # Launch VM with VN Created above. This test verifies on - # launch of VM agent should updated DNS 'A' and 'PTR' records - # The following code will verify the same. Also, we should be able ping - # with VM name. - for vm_name in vm_list: - vn_quantum_obj = self.quantum_h.get_vn_obj_if_present( - vn_fixt._name) - vm_fixture[vm_name] = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_quantum_obj, - vm_name=vm_name)) - vm_fixture[vm_name].verify_vm_launched() - vm_fixture[vm_name].verify_on_setup() - vm_fixture[vm_name].wait_till_vm_is_up() - vm_ip = vm_fixture[vm_name].get_vm_ip_from_vm( - vn_fq_name=vm_fixture[vm_name].vn_fq_name) - vm_rev_ip = vm_ip.split('.') - vm_rev_ip = '.'.join( - (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) - vm_rev_ip = vm_rev_ip + '.in-addr.arpa' - # Frame the Expected DNS data for VM, one for 'A' record and - # another 'PTR' record. - rec_name = vm_name + "." + domain_name - vm_dns_exp_data[vm_name] = [{'rec_data': vm_ip, - 'rec_type': 'A', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': rec_name, - 'installed': 'yes', - 'zone': domain_name}, - {'rec_data': rec_name, - 'rec_type': 'PTR', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': vm_rev_ip, - 'installed': 'yes', - 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data[vm_name]) - # ping between two vms which are in same subnets by using name. - self.assertTrue(vm_fixture['vm1-test'] - .ping_with_certainty(ip=vm_list[1])) - active_controller = vm_fixture['vm1-test'].get_active_controller() - self.logger.info( - 'Active control node from the Agent %s is %s' % - (vm_fixture['vm1-test'].vm_node_ip, active_controller)) - # Control node restart/switchover. - if restart_process == 'ControlNodeRestart': - # restart the Active control node - self.logger.info('restarting active control node') - self.inputs.restart_service( - 'contrail-control', [active_controller]) - sleep(5) - # Check the control node shifted to other control node - new_active_controller = vm_fixture[ - 'vm1-test'].get_active_controller() - self.logger.info( - 'Active control node from the Agent %s is %s' % - (vm_fixture['vm1-test'].vm_node_ip, new_active_controller)) - if new_active_controller == active_controller: - self.logger.error( - 'Control node switchover fail. Old Active controlnode ' - 'was %s and new active control node is %s' % - (active_controller, new_active_controller)) - return False - self.inputs.restart_service( - 'contrail-control', [new_active_controller]) - if restart_process == 'DnsRestart': - # restart the dns process in the active control node - self.logger.info( - 'restart the dns process in the active control node') - self.inputs.restart_service('contrail-dns', [active_controller]) - if restart_process == 'NamedRestart': - # restart the named process in the active control node - self.logger.info( - 'restart the named process in the active control node') - self.inputs.restart_service('contrail-named', [active_controller]) - # restart the agent process in the compute node - if restart_process == 'AgentRestart': - self.logger.info('restart the agent process') - for compute_ip in self.inputs.compute_ips: - self.inputs.restart_service('contrail-vrouter-agent', [compute_ip]) - if restart_process == 'scp': - self.logger.info('scp using name of vm') - vm_fixture['vm1-test'].put_pub_key_to_vm() - vm_fixture['vm2-test'].put_pub_key_to_vm() - size = '1000' - file = 'testfile' - y = 'ls -lrt %s' % file - cmd_to_check_file = [y] - cmd_to_sync = ['sync'] - create_result = True - transfer_result = True - - self.logger.info("-" * 80) - self.logger.info("FILE SIZE = %sB" % size) - self.logger.info("-" * 80) - self.logger.info('Creating a file of the specified size on %s' % - vm_fixture['vm1-test'].vm_name) - - self.logger.info( - 'Transferring the file from %s to %s using scp' % - (vm_fixture['vm1-test'].vm_name, - vm_fixture['vm2-test'].vm_name)) - vm_fixture['vm1-test'].check_file_transfer( - dest_vm_fixture=vm_fixture['vm2-test'], - mode='scp', - size=size) - - self.logger.info('Checking if the file exists on %s' % - vm_fixture['vm2-test'].vm_name) - vm_fixture['vm2-test'].run_cmd_on_vm(cmds=cmd_to_check_file) - output = vm_fixture['vm2-test'].return_output_cmd_dict[y] - print output - if size in output: - self.logger.info( - 'File of size %sB transferred via scp properly' % size) - else: - transfer_result = False - self.logger.error( - 'File of size %sB not transferred via scp ' % size) - assert transfer_result, 'File not transferred via scp' - # Verify after controlnode/dns/agent/named process restart ping vm's by - # using name. - for vm_name in vm_list: - msg = "Ping by using name %s is failed after controlnode/dns/"\ - "agent/named process restart. Dns server should resolve "\ - "VM name to IP" % (vm_name) - self.assertTrue(vm_fixture[vm_name] - .ping_with_certainty(ip=vm_name), msg) - self.verify_vm_dns_data(vm_dns_exp_data[vm_name]) - return True - # end test_vdns_controlnode_switchover - - @preposttest_wrapper - def test_vdns_roundrobin_rec_order(self): - ''' This test tests vdns round-robin record order''' - record_order = 'round-robin' - self.verify_dns_record_order(record_order) - return True - - @preposttest_wrapper - def test_vdns_random_rec_order(self): - ''' This test tests vdns random record order''' - record_order = 'random' - self.verify_dns_record_order(record_order) - return True - - @preposttest_wrapper - def test_vdns_fixed_rec_order(self): - '''This test tests vdns fixed record order''' - record_order = 'fixed' - self.verify_dns_record_order(record_order) - return True - - # until Bug #1866 is resolved this test is going to run for 1000 records. - @preposttest_wrapper - def test_vdns_zrecord_scaling(self): - '''This test tests vdns fixed record order''' - record_order = 'random' - test_type = 'recordscaling' - record_num = 1000 - self.verify_dns_record_order(record_order, test_type, record_num) - return True - - def verify_dns_record_order( - self, - record_order, - test_type='test_record_order', - record_num=10): - ''' This test tests DNS record order. - Round-Robin/Fixed/Random - ''' - vn1_ip = '10.10.10.1' - vn_name = 'vn1' - dns_server_name = 'vdns1' - domain_name = 'juniper.net' - ttl = 100 - ipam_name = 'ipam1' - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order=record_order) - # Create VDNS server object. - vdns_fixt1 = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_server_name, - dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - # Associate VDNS with IPAM. - ipam_fixt1 = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt1.obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_name, - network_ipam_mgmt=ipam_mgmt_obj)) - vn_nets = {'vn1': [(ipam_fixt1.getObj(), VnSubnetsType( - [IpamSubnetType(subnet=SubnetType(vn1_ip, 24))]))], } - # Launch VN with IPAM - vn_fixt = self.useFixture( - VirtualNetworkTestFixtureGen( - self.vnc_lib, - virtual_network_name=vn_name, - network_ipam_ref_infos=vn_nets[vn_name], - parent_fixt=proj_fixt, - id_perms=IdPermsType( - enable=True))) - vn_quantum_obj = self.quantum_h.get_vn_obj_if_present( - vn_fixt._name) - vm_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_quantum_obj, - vm_name='vm1-test')) - vm_fixture.verify_vm_launched() - vm_fixture.verify_on_setup() - vm_fixture.wait_till_vm_is_up() - - rec_ip_list = [] - i = 1 - j = 1 - k = 1 - l = 1 - verify_rec_name_list = [] - verify_rec_name_ip = {} - if test_type == 'recordscaling': - self.logger.info('Creating %s number of records', record_num) - for num in range(1, record_num): - rec = 'test-rec-' + str(j) + '-' + str(i) - self.logger.info('Creating record %s', rec) - recname = 'rec' + str(j) + '-' + str(i) - rec_ip = str(l) + '.' + str(k) + '.' + str(j) + '.' + str(i) - vdns_rec_data = VirtualDnsRecordType( - recname, 'A', 'IN', rec_ip, ttl) - vdns_rec_fix = self.useFixture( - VdnsRecordFixture( - self.inputs, - self.connections, - rec, - vdns_fixt1.vdns_fix, - vdns_rec_data)) - sleep(1) - i = i + 1 - if i > 253: - j = j + 1 - i = 1 - if j > 253: - k = k + 1 - j = 1 - i = 1 - # sleep for some time after configuring 10 records. - if num % 10 == 0: - sleep(0.5) - # pic some random records for nslookup verification - if num % 100 == 0: - verify_rec_name_list.append(recname) - verify_rec_name_ip[recname] = rec_ip - # Sleep for some time - DNS takes some time to sync with BIND - # server - self.logger.info( - 'Sleep for 180sec to sync vdns server with vdns record entry') - sleep(180) - # Verify NS look up works for some random records values - self.logger.info('****NSLook up verification****') - import re - for rec in verify_rec_name_list: - cmd = 'nslookup ' + rec - vm_fixture.run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture.return_output_cmd_dict[cmd] - result = result.replace("\r", "") - result = result.replace("\t", "") - result = result.replace("\n", " ") - m_obj = re.search( - r"Address:[0-9.]*#[0-9]*\s*.*Name:(.*\.juniper\.net)\s*Address:\s*([0-9.]*)", - result) - if not m_obj: - self.assertTrue( - False, - 'record search is failed,please check syntax of the ' - 'regular expression/NSlookup is failed') - print ('vm_name is ---> %s \t ip-address is ---> %s' % - (m_obj.group(1), m_obj.group(2))) - else: - for num in range(1, record_num): - rec = 'test-rec-' + str(j) + '-' + str(i) - rec_ip = '1.' + '1.' + str(j) + '.' + str(i) - vdns_rec_data = VirtualDnsRecordType( - 'test1', 'A', 'IN', rec_ip, ttl) - vdns_rec_fix = self.useFixture( - VdnsRecordFixture( - self.inputs, - self.connections, - rec, - vdns_fixt1.vdns_fix, - vdns_rec_data)) - result, msg = vdns_rec_fix.verify_on_setup() - i = i + 1 - if i > 253: - j = j + 1 - i = 1 - rec_ip_list.append(rec_ip) - sleep(2) - # Get the NS look up record Verify record order - cmd = 'nslookup test1' - vm_fixture.run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture.return_output_cmd_dict[cmd] - result = result.replace("\r", "") - result = result.replace("\t", "") - result = result.replace("\n", " ") - import re - m_obj = re.search( - r"Address:[0-9.]*#[0-9]*\s*Name:test1.juniper.net\s*(Address:\s*[0-9.]*)", - result) - if not m_obj: - self.assertTrue( - False, - 'record search is failed,please check ' - 'syntax of regular expression') - print m_obj.group(1) - dns_record = m_obj.group(1).split(':') - dns_record_ip = dns_record[1].lstrip() - next_ip = self.next_ip_in_list(rec_ip_list, dns_record_ip) - for rec in rec_ip_list: - vm_fixture.run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture.return_output_cmd_dict[cmd] - result = result.replace("\r", "") - result = result.replace("\t", "") - result = result.replace("\n", " ") - m_obj = re.search( - r"Address:[0-9.]*#[0-9]*\s*Name:test1.juniper.net\s*(Address:\s*[0-9.]*)", - result) - print m_obj.group(1) - dns_record = m_obj.group(1).split(':') - dns_record_ip1 = dns_record[1].lstrip() - if record_order == 'round-robin': - if next_ip != dns_record_ip1: - print "\n VDNS records are not sent in \ - round-robin order" - self.assertTrue( - False, - 'VDNS records are not sent in round-robin order') - next_ip = self.next_ip_in_list(rec_ip_list, dns_record_ip1) - if record_order == 'random': - if dns_record_ip1 not in rec_ip_list: - print "\n VDNS records are not sent in random order" - self.assertTrue( - False, 'VDNS records are not sent random order') - if record_order == 'fixed': - if dns_record_ip != dns_record_ip1: - print "\n VDNS records are not sent \ - fixed in fixed order" - self.assertTrue( - False, - 'VDNS records are not sent fixed in fixed order') - return True - # end test_dns_record_order - - @preposttest_wrapper - def test_vdns_with_fip(self): - ''' This Test test vdns functionality with floating ip. - ''' - vn_nets = {'vn1': ['10.10.10.0/24'], 'vn2': ['20.20.20.0/24']} - vm_list = ['vm1-test', 'vm2-test'] - vm_vn_list = {'vm1-test': 'vn1', 'vm2-test': 'vn2'} - dns_server_name = 'vdns1' - domain_name = 'juniper.net' - ttl = 100 - ipam_name = 'ipam1' - fip_pool_name1 = 'some-pool1' - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - # VDNS - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - vdns_fixt1 = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_server_name, - dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - # IPAM - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - # Associate IPAM with VDNS server Object - ipam_fixt1 = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt1.obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_name, - network_ipam_mgmt=ipam_mgmt_obj)) - - vn_fixt = {} - vm_fixture = {} - # Launch VM with VN Created above. This test verifies - # on launch of VM agent should updated DNS 'A' and 'PTR' records - # The following code will verify the same. Also, we should be able ping - # with VM name. - for vm_name in vm_list: - vn = vm_vn_list[vm_name] - vn_fixt[vm_name] = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_name=vm_vn_list[vm_name], - inputs=self.inputs, - subnets=vn_nets[vn], - ipam_fq_name=ipam_fixt1.getObj().get_fq_name())) - vm_fixture[vm_name] = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_fixt[vm_name].obj, - vm_name=vm_name)) - vm_fixture[vm_name].verify_vm_launched() - vm_fixture[vm_name].verify_on_setup() - vm_fixture[vm_name].wait_till_vm_is_up() - - # FIP - fip_fixture1 = self.useFixture( - FloatingIPFixture( - project_name=self.inputs.project_name, - inputs=self.inputs, - connections=self.connections, - pool_name=fip_pool_name1, - vn_id=vn_fixt['vm2-test'].vn_id)) - assert fip_fixture1.verify_on_setup() - fip_id1 = fip_fixture1.create_and_assoc_fip( - vn_fixt['vm2-test'].vn_id, vm_fixture['vm1-test'].vm_id) - self.addCleanup(fip_fixture1.disassoc_and_delete_fip, fip_id1) - assert fip_fixture1.verify_fip( - fip_id1, vm_fixture['vm1-test'], vn_fixt['vm2-test']) - # ping between two vms which are in different subnets by using name. - self.assertTrue( - vm_fixture['vm1-test'].ping_with_certainty(ip=vm_list[1]), - "Ping with VM name failed for VDNS with floating ip") - return True - - @preposttest_wrapper - def test_vdns_with_diff_projs(self): - ''' Test vdns with different projects ''' - project_list = ['project1', 'project2'] - ipam_list = {'project1': 'ipam1', 'project2': 'ipam2'} - policy_list = {'project1': 'policy1', 'project2': 'policy2'} - vn_list = {'project1': 'vn1', 'project2': 'vn2'} - vn_nets = {'project1': ['10.10.10.0/24'], - 'project2': ['20.20.20.0/24']} - vn_nets_woutsub = {'project1': '10.10.10.0', 'project2': '20.20.20.0'} - vm_list = {'project1': 'vm1', 'project2': 'vm2'} - proj_user = {'project1': 'user1', 'project2': 'user2'} - proj_pass = {'project1': 'user123', 'project2': 'user134'} - dns_server_name = 'vdns1' - domain_name = 'juniper.net' - ttl = 100 - # VDNS creation - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - vdns_fixt1 = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_server_name, - dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - ipam_fixt = {} - vn_fixt = {} - vm_fix = {} - pol_fixt = {} - rules = {'project1': [{'direction': '<>', - 'protocol': 'any', - 'dest_network': 'default-domain:project2:vn2', - 'source_network': 'any', - 'dst_ports': 'any', - 'simple_action': 'pass', - 'src_ports': 'any'}], - 'project2': [{'direction': '<>', - 'protocol': 'any', - 'dest_network': 'default-domain:project1:vn1', - 'source_network': 'any', - 'dst_ports': 'any', - 'simple_action': 'pass', - 'src_ports': 'any'}]} - admin_ip = self.inputs - admin_con = self.connections - for proj in project_list: - # Project creation - user_fixture = self.useFixture( - UserFixture( - connections=self.connections, - username=proj_user[proj], - password=proj_pass[proj])) - project_fixture = self.useFixture( - ProjectFixture( - project_name=proj, - username=proj_user[proj], - password=proj_pass[proj], - vnc_lib_h=self.vnc_lib, - connections=admin_con)) - user_fixture.add_user_to_tenant(proj, proj_user[proj], 'admin') - project_inputs = self.useFixture( - ContrailTestInit( - self.ini_file, - stack_user=project_fixture.username, - stack_password=project_fixture.password, - project_fq_name=[ - 'default-domain', - proj])) - project_connections = ContrailConnections(project_inputs) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=proj)) - self.logger.info( - 'Default SG to be edited for allow all on project: %s' % proj) - project_fixture.set_sec_group_for_allow_all(proj, 'default') - # policy creation - pol_fixt[proj] = self.useFixture( - PolicyFixture( - policy_name=policy_list[proj], - inputs=project_inputs, - connections=project_connections, - rules_list=rules[proj])) - # Ipam creation - ipam_fixt[proj] = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt1.obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_list[proj], - network_ipam_mgmt=ipam_mgmt_obj)) - # VN Creation - vn_fixt[proj] = self.useFixture( - VNFixture( - project_name=proj, - connections=project_connections, - vn_name=vn_list[proj], - inputs=project_inputs, - subnets=vn_nets[proj], - ipam_fq_name=ipam_fixt[proj].getObj().get_fq_name(), - policy_objs=[ - pol_fixt[proj].policy_obj])) - # VM creation - vm_fix[proj] = self.useFixture( - VMFixture( - project_name=proj, - connections=project_connections, - vn_obj=vn_fixt[proj].obj, - vm_name=vm_list[proj])) - vm_fix[proj].verify_vm_launched() - vm_fix[proj].verify_on_setup() - vm_fix[proj].wait_till_vm_is_up() - msg = "Ping by using name %s is failed. Dns server should \ - resolve VM name to IP" % (vm_list[proj]) - self.assertTrue( - vm_fix[proj].ping_with_certainty(ip=vm_list[proj]), msg) - vm_ip = vm_fix[proj].get_vm_ip_from_vm( - vn_fq_name=vm_fix[proj].vn_fq_name) - vm_rev_ip = vm_ip.split('.') - vm_rev_ip = '.'.join( - (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) - vm_rev_ip = vm_rev_ip + '.in-addr.arpa' - rev_zone = vn_nets_woutsub[proj].split('.') - rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) - rev_zone = rev_zone + '.in-addr.arpa' - # Frame the Expected DNS data for VM, one for 'A' record and - # another 'PTR' record. - rec_name = vm_list[proj] + "." + domain_name - vm_dns_exp_data = [{'rec_data': vm_ip, - 'rec_type': 'A', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': rec_name, - 'installed': 'yes', - 'zone': domain_name}, - {'rec_data': rec_name, - 'rec_type': 'PTR', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': vm_rev_ip, - 'installed': 'yes', - 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data) - vm_dns_exp_data = [] - # ping between two vms which are in different subnets by using name. - self.assertTrue( - vm_fix['project1'].ping_with_certainty( - ip=vm_list['project2']), - "Ping with VM name failed for VDNS across the projects") - return True - - @preposttest_wrapper - def test_vdns_default_mode(self): - ''' Test vdns with default and None DNS Methods''' - vn_nets = {'vn1': ['10.10.10.0/24']} - vm_name = 'vm1-test' - vn_name = 'vn1' - ipam_name = 'ipam1' - fip_pool_name = self.inputs.fip_pool_name - fvn_name = 'public100' - mx_rt = self.inputs.mx_rt - router_name = self.inputs.ext_routers[0][0] - router_ip = self.inputs.ext_routers[0][1] - fip_subnets = [self.inputs.fip_pool] - - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - # VN Creation - fvn_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_name=fvn_name, - inputs=self.inputs, - subnets=fip_subnets, - router_asn=self.inputs.router_asn, - rt_number=mx_rt)) - assert fvn_fixture.verify_on_setup() - # Default DNS server - ipam_mgmt_obj = IpamType(ipam_dns_method='default-dns-server') - # Associate VDNS with IPAM. - ipam_fixt1 = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - parent_fixt=proj_fixt, - network_ipam_name=ipam_name, - network_ipam_mgmt=ipam_mgmt_obj)) - vn_fixt = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_name=vn_name, - inputs=self.inputs, - subnets=vn_nets[vn_name], - ipam_fq_name=ipam_fixt1.getObj().get_fq_name())) - vm_fix = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_fixt.obj, - vm_name=vm_name)) - vm_fix.verify_vm_launched() - vm_fix.verify_on_setup() - vm_fix.wait_till_vm_is_up() - # FIP creation - fip_fixture = self.useFixture( - FloatingIPFixture( - project_name=self.inputs.project_name, - inputs=self.inputs, - connections=self.connections, - pool_name=fip_pool_name, - vn_id=fvn_fixture.vn_id)) - assert fip_fixture.verify_on_setup() - fip_id = fip_fixture.create_and_assoc_fip( - fvn_fixture.vn_id, vm_fix.vm_id) - self.addCleanup(fip_fixture.disassoc_and_delete_fip, fip_id) - assert fip_fixture.verify_fip(fip_id, vm_fix, fvn_fixture) - routing_instance = fvn_fixture.ri_name - # Configuring all control nodes here - for entry in self.inputs.bgp_ips: - hostname = self.inputs.host_data[entry]['name'] - cn_fixture1 = self.useFixture( - CNFixture( - connections=self.connections, - router_name=hostname, - router_ip=entry, - router_type='contrail', - inputs=self.inputs)) - cn_fixturemx = self.useFixture( - CNFixture( - connections=self.connections, - router_name=router_name, - router_ip=router_ip, - router_type='mx', - inputs=self.inputs)) - sleep(5) - assert cn_fixturemx.verify_on_setup() - # DNS methos configued is default, DNS should resolve for external DNS - # lookups. - cmd = 'nslookup juniper.net' - vm_fix.run_cmd_on_vm(cmds=[cmd]) - result = vm_fix.return_output_cmd_dict[cmd] - import re - m_obj = re.search(r"(juniper.net)", result) - if not m_obj: - self.assertTrue( - False, - 'record search is failed,please check ' - 'syntax of the regular expression/NSlookup is failed') - print m_obj.group(1) - # Ipam DNS mentod is set to default, so DNS resolution to external - # world needs to be resolved. - self.assertTrue( - vm_fix.ping_with_certainty( - ip='juniper.net'), - "DNS name resolution failed when vdns set to default DNS method") - # Modify Ipam with DNS Method to none. - ipam_mgmt_obj = IpamType(ipam_dns_method='none') - update_ipam = ipam_fixt1.getObj() - update_ipam.set_network_ipam_mgmt(ipam_mgmt_obj) - self.vnc_lib.network_ipam_update(update_ipam) - vm_fix.run_cmd_on_vm(cmds=[cmd]) - result1 = vm_fix.return_output_cmd_dict[cmd] - m_obj1 = re.search(r"(no\s*servers\s*could\s*be\s*reached)", result1) - if not m_obj1: - self.assertTrue( - False, - 'record search is failed,please check syntax of ' - 'the regular expression/NSlookup is failed') - print m_obj1.group(1) - return True - - # This test creates 16 levels of vdns servers vdns1,vdns2,vdns3...vdns16. - # The VDNS server are configured as shown below. - # vdns1 (domain: juniper.net) - # ^ - # / - # / - # vdns2(domain: one.juniper.net) - # ^ - # / - # / - # vdns3(domain: two.one.juniper.net) - # ... - # vdns16 - # - @preposttest_wrapper - def test_vdns_tree_scaling(self): - ''' This test creates 16 levels of vdns servers vdns1,vdns2,vdns3...vdns16. - The VDNS server are configured as shown below. - vdns1 (domain: juniper.net) - ^ - / - / - vdns2(domain: one.juniper.net) - ^ - / - / - vdns3(domain: two.one.juniper.net) - ... - vdns16 - ''' - ttl = 1000 - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - dns_server_name_list = [ - 'vdns1', - 'vdns2', - 'vdns3', - 'vdns4', - 'vdns5', - 'vdns6', - 'vdns7', - 'vdns8', - 'vdns9', - 'vdns10', - 'vdns11', - 'vdns12', - 'vdns13', - 'vdns14', - 'vdns15', - 'vdns16'] - domain_name_list = { - 'vdns1': 'juniper.net', - 'vdns2': 'two.juniper.net', - 'vdns3': 'three.two.juniper.net', - 'vdns4': 'four.three.two.juniper.net', - 'vdns5': 'five.four.three.two.juniper.net', - 'vdns6': 'six.five.four.three.two.juniper.net', - 'vdns7': 'seven.six.five.four.three.two.juniper.net', - 'vdns8': 'eight.seven.six.five.four.three.two.juniper.net', - 'vdns9': 'nine.eight.seven.six.five.four.three.two.juniper.net', - 'vdns10': 'ten.nine.eight.seven.six.five.four.three.two.juniper.net', - 'vdns11': '11.ten.nine.eight.seven.six.five.four.three.two.juniper.net', - 'vdns12': '12.11.ten.nine.eight.seven.six.five.four.three.two.juniper.net', - 'vdns13': '13.12.11.ten.nine.eight.seven.six.five.four.three.two.juniper.net', - 'vdns14': '14.13.12.11.ten.nine.eight.seven.six.five.four.three.two.juniper.net', - 'vdns15': '15.14.13.12.11.ten.nine.eight.seven.six.five.four.three.two.juniper.net', - 'vdns16': '16.15.14.13.12.11.ten.nine.eight.seven.six.five.four.three.two.juniper.net'} - next_vdns_list = { - 'vdns1': 'vdns2', - 'vdns2': 'vdns3', - 'vdns3': 'vdns4', - 'vdns4': 'vdns5', - 'vdns5': 'vdns6', - 'vdns6': 'vdns7', - 'vdns7': 'vdns8', - 'vdns8': 'vdns9', - 'vdns9': 'vdns10', - 'vdns10': 'vdns11', - 'vdns11': 'vdns12', - 'vdns12': 'vdns13', - 'vdns13': 'vdns14', - 'vdns14': 'vdns15', - 'vdns15': 'vdns16', - 'vdns16': 'none'} - rec_names = { - 'vdns1': 'test-rec1', - 'vdns2': 'test-rec2', - 'vdns3': 'test-rec3', - 'vdns4': 'test-rec4', - 'vdns5': 'test-rec5', - 'vdns6': 'test-rec6', - 'vdns7': 'test-rec7', - 'vdns8': 'test-rec8', - 'vdns9': 'test-rec9', - 'vdns10': 'test-rec10', - 'vdns11': 'test-rec11', - 'vdns12': 'test-rec12', - 'vdns13': 'test-rec13', - 'vdns14': 'test-rec14', - 'vdns15': 'test-rec15', - 'vdns16': 'test-rec16'} - ipam_dns_list = { - 'vdns1': 'ipam1', - 'vdns2': 'ipam2', - 'vdns3': 'ipam3', - 'vdns4': 'ipam4', - 'vdns5': 'ipam5', - 'vdns6': 'ipam6', - 'vdns7': 'ipam7', - 'vdns8': 'ipam8', - 'vdns9': 'ipam9', - 'vdns10': 'ipam10', - 'vdns11': 'ipam11', - 'vdns12': 'ipam12', - 'vdns13': 'ipam13', - 'vdns14': 'ipam14', - 'vdns15': 'ipam15', - 'vdns16': 'ipam16'} - vn_dns_list = { - 'vdns1': [ - 'vn1', - ['10.10.1.0/24']], - 'vdns2': [ - 'vn2', - ['10.10.2.0/24']], - 'vdns3': [ - 'vn3', - ['10.10.3.0/24']], - 'vdns4': [ - 'vn4', - ['10.10.4.0/24']], - 'vdns5': [ - 'vn5', - ['10.10.5.0/24']], - 'vdns6': [ - 'vn6', - ['10.10.6.0/24']], - 'vdns7': [ - 'vn7', - ['10.10.7.0/24']], - 'vdns8': [ - 'vn8', - ['10.10.8.0/24']], - 'vdns9': [ - 'vn9', - ['10.10.9.0/24']], - 'vdns10': [ - 'vn10', - ['10.10.10.0/24']], - 'vdns11': [ - 'vn11', - ['10.10.11.0/24']], - 'vdns12': [ - 'vn12', - ['10.10.12.0/24']], - 'vdns13': [ - 'vn13', - ['10.10.13.0/24']], - 'vdns14': [ - 'vn14', - ['10.10.14.0/24']], - 'vdns15': [ - 'vn15', - ['10.10.15.0/24']], - 'vdns16': [ - 'vn16', - ['10.10.16.0/24']]} - vm_dns_list = { - 'vdns1': 'vm1', - 'vdns2': 'vm2', - 'vdns3': 'vm3', - 'vdns4': 'vm4', - 'vdns5': 'vm5', - 'vdns6': 'vm6', - 'vdns7': 'vm7', - 'vdns8': 'vm8', - 'vdns9': 'vm9', - 'vdns10': 'vm10', - 'vdns11': 'vm11', - 'vdns12': 'vm12', - 'vdns13': 'vm13', - 'vdns14': 'vm14', - 'vdns15': 'vm15', - 'vdns16': 'vm16'} - vm_ip_dns_list = {} - vdns_fix = {} - vdns_data = {} - vdns_rec = {} - next_dns = None - # DNS configuration - for dns_name in dns_server_name_list: - # VNDS1 is root, so Next VDNS entry is not required. - if dns_name == 'vdns1': - vdns_data[dns_name] = VirtualDnsType( - domain_name=domain_name_list[dns_name], - dynamic_records_from_client=True, - default_ttl_seconds=ttl, - record_order='random') - else: - # VDNS2,VDNS3...vdns16 needs to point next vdns server. - vdns_data[dns_name] = VirtualDnsType( - domain_name=domain_name_list[dns_name], - dynamic_records_from_client=True, - default_ttl_seconds=ttl, - record_order='random', - next_virtual_DNS=next_dns.vdns_fq_name) - vdns_fix[dns_name] = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_name, - dns_data=vdns_data[dns_name])) - result, msg = vdns_fix[dns_name].verify_on_setup() - self.assertTrue(result, msg) - next_dns = vdns_fix[dns_name] - - # Configure NS records for Next DNS server - for dns_name in dns_server_name_list: - if next_vdns_list[dns_name] != 'none': - next_dns = next_vdns_list[dns_name] - vdns_rec_data = VirtualDnsRecordType( - domain_name_list[next_dns], - 'NS', - 'IN', - vdns_fix[next_dns].vdns_fq_name, - ttl) - vdns_rec[dns_name] = self.useFixture( - VdnsRecordFixture( - self.inputs, - self.connections, - rec_names[dns_name], - vdns_fix[dns_name].vdns_fix, - vdns_rec_data)) - result, msg = vdns_rec[dns_name].verify_on_setup() - self.assertTrue(result, msg) - vn_fixt = {} - vm_fixture = {} - ipam_fixt = {} - - for dns_name in dns_server_name_list: - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fix[dns_name].vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', - ipam_dns_server=dns_server) - # Associate IPAM with VDNS server Object - ipam_fixt[dns_name] = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fix[dns_name].obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_dns_list[dns_name], - network_ipam_mgmt=ipam_mgmt_obj)) - # Launch VN - vn_fixt[dns_name] = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_name=vn_dns_list[dns_name][0], - inputs=self.inputs, - subnets=vn_dns_list[dns_name][1], - ipam_fq_name=ipam_fixt[dns_name].getObj().get_fq_name())) - # Launch VM - vm_fixture[dns_name] = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_fixt[dns_name].obj, - vm_name=vm_dns_list[dns_name])) - vm_fixture[dns_name].verify_vm_launched() - vm_fixture[dns_name].verify_on_setup() - vm_fixture[dns_name].wait_till_vm_is_up() - vm_ip_dns_list[dns_name] = vm_fixture[dns_name].vm_ip - # perform NS lookup for each level - import re - for dns in dns_server_name_list: - for dns_name in dns_server_name_list: - cmd = 'nslookup ' + \ - vm_dns_list[dns_name] + '.' + domain_name_list[dns_name] - self.logger.info( - 'VM Name is ---> %s\t cmd is---> %s', - vm_dns_list[dns], - cmd) - vm_fixture[dns].run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture[dns].return_output_cmd_dict[cmd] - result = result.replace("\r", "") - result = result.replace("\t", "") - result = result.replace("\n", " ") - m_obj = re.search( - r"Address:[0-9.]*#[0-9]*\s*.*Name:(.*\.juniper\.net)\s*Address:\s*([0-9.]*)", - result) - if not m_obj: - self.assertTrue( - False, - 'record search is failed,please check syntax of ' - 'the regular expression/NSlookup is failed') - print ('vm_name is ---> %s \t ip-address is ---> %s' % - (m_obj.group(1), m_obj.group(2))) - vm_name_to_verify = vm_dns_list[dns_name] + \ - '.' + domain_name_list[dns_name] - self.assertEqual( - m_obj.group(1), - vm_name_to_verify, - 'VM name is not matching with nslookup command output') - self.assertEqual( - m_obj.group(2), - vm_ip_dns_list[dns_name], - 'IP Address is not matching with nslookup command output') - return True - - @preposttest_wrapper - def test_vdns_server_scaling(self): - ''' This Test tests vdns server scaling ''' - ttl = 100 - # Number of VDNS servers - vdns_scale = 1000 - # Number of records per server - record_num = 1 - project_fixture = self.useFixture( - ProjectFixture( - vnc_lib_h=self.vnc_lib, - project_name=self.inputs.project_name, - connections=self.connections)) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.inputs.project_name)) - vdns_fixt = {} - vdns_verify = [] - i = 1 - j = 1 - for num in range(1, vdns_scale + 1): - self.logger.info('Creating %s vdns server', num) - domain_name = 'vdns' + str(num) + '.net' - vdnsName = 'vdns' + str(num) - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - vdns_fixt[vdnsName] = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=vdnsName, - dns_data=dns_data)) - for rec_num in range(1, record_num + 1): - self.logger.info( - 'Creating %s record for vdns server %s', rec_num, num) - rec = 'test-rec-' + str(j) + '-' + str(i) - rec_ip = '1.' + '1.' + str(j) + '.' + str(i) - rec_name = 'rec' + str(j) + '-' + str(i) - vdns_rec_data = VirtualDnsRecordType( - rec_name, 'A', 'IN', rec_ip, ttl) - vdns_rec_fix = self.useFixture( - VdnsRecordFixture( - self.inputs, - self.connections, - rec, - vdns_fixt[vdnsName].vdns_fix, - vdns_rec_data)) - sleep(1) - i = i + 1 - if i > 253: - j = j + 1 - i = 1 - if num % 100 == 0: - vdns_verify.append(vdnsName) - - vm_fixture = {} - i = 1 - # Sleep for some time - DNS takes some time to sync with BIND server - self.logger.info( - 'Sleep for 180sec to sync vdns server with bind server') - sleep(180) - for vdns in vdns_verify: - ipam_name = 'ipam-' + str(i) - vn_name = 'vn-' + str(i) - subnet = '10.10.' + str(i) + '.0/24' - vm_name = 'vm' + str(i) - vm_domain_name = vm_name + '.' + vdns + '.net' - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt[vdns].vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', - ipam_dns_server=dns_server) - # Associate IPAM with VDNS server Object - ipam_fixt = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt[vdns].obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_name, - network_ipam_mgmt=ipam_mgmt_obj)) - # Launch VN - vn_fixt = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_name=vn_name, - inputs=self.inputs, - subnets=[subnet], - ipam_fq_name=ipam_fixt.getObj().get_fq_name())) - # Launch VM - vm_fixture[vdns] = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_fixt.obj, - vm_name=vm_name)) - vm_fixture[vdns].verify_vm_launched() - vm_fixture[vdns].verify_on_setup() - vm_fixture[vdns].wait_till_vm_is_up() - # get vm IP from nova - vm_ip = vm_fixture[vdns].vm_ip - i = i + 1 - cmd = 'nslookup ' + vm_name - self.logger.info( - 'VM Name is ---> %s\t cmd is---> %s', vm_name, cmd) - vm_fixture[vdns].run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture[vdns].return_output_cmd_dict[cmd] - result = result.replace("\r", "") - result = result.replace("\t", "") - result = result.replace("\n", " ") - m_obj = re.search( - r"Address:[0-9.]*#[0-9]*\s*.*Name:(.*\.vdns[0-9]*\.net)\s*Address:\s*([0-9.]*)", - result) - if not m_obj: - self.assertTrue( - False, - 'record search is failed,please check syntax of ' - 'the regular expression/NSlookup is failed') - print ('vm_name is ---> %s \t ip-address is ---> %s' % - (m_obj.group(1), m_obj.group(2))) - self.assertEqual( - m_obj.group(1), - vm_domain_name, - 'VM name is not matching with nslookup command output') - self.assertEqual( - m_obj.group(2), - vm_ip, - 'IP Address is not matching with nslookup command output') - return True - #end test_vdns_server_scaling - - @preposttest_wrapper - def test_vdns_with_same_zone(self): - ''' Test vdns in same zone with multi projects/vdns-servers ''' - project_list = ['project1', - 'project2', - 'project3', - 'project4', - 'project5', - 'project6'] - ipam_list = {'project1': 'ipam1', - 'project2': 'ipam2', - 'project3': 'ipam3', - 'project4': 'ipam4', - 'project5': 'ipam5', - 'project6': 'ipam6'} - vn_list = {'project1': 'vn1', - 'project2': 'vn2', - 'project3': 'vn3', - 'project4': 'vn4', - 'project5': 'vn5', - 'project6': 'vn6'} - vn_nets = {'project1': ['10.10.10.0/24'], - 'project2': ['20.10.10.0/24'], - 'project3': ['30.10.10.0/24'], - 'project4': ['10.10.10.0/24'], - 'project5': ['20.10.10.0/24'], - 'project6': ['30.10.10.0/24']} - vm_list = {'project1': 'vm1', - 'project2': 'vm2', - 'project3': 'vm3', - 'project4': 'vm4', - 'project5': 'vm5', - 'project6': 'vm6'} - proj_user = {'project1': 'user1', - 'project2': 'user2', - 'project3': 'user3', - 'project4': 'user4', - 'project5': 'user5', - 'project6': 'user6'} - proj_pass = {'project1': 'user1', - 'project2': 'user2', - 'project3': 'user3', - 'project4': 'user4', - 'project5': 'user5', - 'project6': 'user6'} - proj_vdns = {'project1': 'vdns1', - 'project2': 'vdns2', - 'project3': 'vdns3', - 'project4': 'vdns4', - 'project5': 'vdns5', - 'project6': 'vdns6'} - vdns_fixt1 = {} - ipam_mgmt_obj = {} - for project in project_list: - dns_server_name = proj_vdns[project] - self.logger.info( - 'Creating vdns server:%s in project:%s', - dns_server_name, - project) - domain_name = 'juniper.net' - ttl = 100 - # VDNS creation - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - vdns_fixt1[project] = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_server_name, - dns_data=dns_data)) - result, msg = vdns_fixt1[project].verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1[project].vdns_fq_name) - ipam_mgmt_obj[project] = IpamType( - ipam_dns_method='virtual-dns-server', - ipam_dns_server=dns_server) - ipam_fixt = {} - vn_fixt = {} - vm_fix = {} - pol_fixt = {} - admin_ip = self.inputs - admin_con = self.connections - for proj in project_list: - # User creation - user_fixture = self.useFixture( - UserFixture( - connections=self.connections, - username=proj_user[proj], - password=proj_pass[proj])) - # Project creation - project_fixture = self.useFixture( - ProjectFixture( - project_name=proj, - vnc_lib_h=self.vnc_lib, - username=proj_user[proj], - password=proj_pass[proj], - connections=admin_con)) - user_fixture.add_user_to_tenant(proj, proj_user[proj], 'admin') - project_inputs = self.useFixture( - ContrailTestInit( - self.ini_file, - stack_user=project_fixture.username, - stack_password=project_fixture.password, - project_fq_name=[ - 'default-domain', - proj])) - project_connections = ContrailConnections(project_inputs) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=proj)) - self.logger.info( - 'Default SG to be edited for allow all on project: %s' % proj) - project_fixture.set_sec_group_for_allow_all(proj, 'default') - # Ipam creation - ipam_fixt[proj] = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt1[proj].obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_list[proj], - network_ipam_mgmt=ipam_mgmt_obj[proj])) - # VN Creation - vn_fixt[proj] = self.useFixture( - VNFixture( - project_name=proj, - connections=project_connections, - vn_name=vn_list[proj], - inputs=project_inputs, - subnets=vn_nets[proj], - ipam_fq_name=ipam_fixt[proj].getObj().get_fq_name())) - # VM creation - vm_fix[proj] = self.useFixture( - VMFixture( - project_name=proj, - connections=project_connections, - vn_obj=vn_fixt[proj].obj, - vm_name=vm_list[proj])) - vm_fix[proj].verify_vm_launched() - vm_fix[proj].verify_on_setup() - vm_fix[proj].wait_till_vm_is_up() - msg = "Ping by using name %s is failed. Dns server \ - should resolve VM name to IP" % (vm_list[proj]) - self.assertTrue( - vm_fix[proj].ping_with_certainty(ip=vm_list[proj]), msg) - vm_ip = vm_fix[proj].get_vm_ip_from_vm( - vn_fq_name=vm_fix[proj].vn_fq_name) - vm_rev_ip = vm_ip.split('.') - vm_rev_ip = '.'.join( - (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) - vm_rev_ip = vm_rev_ip + '.in-addr.arpa' - rev_zone = vn_nets[proj][0].split('/')[0].split('.') - rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) - rev_zone = rev_zone + '.in-addr.arpa' - # Frame the Expected DNS data for VM, one for 'A' record and - # another 'PTR' record. - rec_name = vm_list[proj] + "." + domain_name - vm_dns_exp_data = [{'rec_data': vm_ip, - 'rec_type': 'A', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': rec_name, - 'installed': 'yes', - 'zone': domain_name}, - {'rec_data': rec_name, - 'rec_type': 'PTR', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': vm_rev_ip, - 'installed': 'yes', - 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data) - vm_dns_exp_data = [] - self.logger.info( - 'Restart supervisor-config & supervisor-control and test ping') - for bgp_ip in self.inputs.bgp_ips: - self.inputs.restart_service('supervisor-control', [bgp_ip]) - sleep(30) - for cfgm_ip in self.inputs.cfgm_ips: - self.inputs.restart_service('supervisor-config', [cfgm_ip]) - sleep(60) - for proj in project_list: - msg = "Ping by using name %s is failed. Dns server \ - should resolve VM name to IP" % (vm_list[proj]) - self.assertTrue( - vm_fix[proj].ping_with_certainty(ip=vm_list[proj]), msg) - return True - # end test_vdns_with_same_zone - - @preposttest_wrapper - def test_vdns_with_diff_zone(self): - ''' Test vdns in different zones with multi projects ''' - project_list = [ - 'project1', - 'project2', - 'project3', - 'project4', - 'project5', - 'project6'] - ipam_list = { - 'project1': 'ipam1', - 'project2': 'ipam2', - 'project3': 'ipam3', - 'project4': 'ipam4', - 'project5': 'ipam5', - 'project6': 'ipam6'} - vn_list = { - 'project1': 'vn1', - 'project2': 'vn2', - 'project3': 'vn3', - 'project4': 'vn4', - 'project5': 'vn5', - 'project6': 'vn6'} - vn_nets = {'project1': ['10.10.10.0/24'], - 'project2': ['20.10.10.0/24'], - 'project3': ['30.10.10.0/24'], - 'project4': ['10.10.10.0/24'], - 'project5': ['20.10.10.0/24'], - 'project6': ['30.10.10.0/24'], } - vm_list = { - 'project1': 'vm1', - 'project2': 'vm2', - 'project3': 'vm3', - 'project4': 'vm4', - 'project5': 'vm5', - 'project6': 'vm6'} - proj_user = { - 'project1': 'user1', - 'project2': 'user2', - 'project3': 'user3', - 'project4': 'user4', - 'project5': 'user5', - 'project6': 'user6'} - proj_pass = { - 'project1': 'user1', - 'project2': 'user2', - 'project3': 'user3', - 'project4': 'user4', - 'project5': 'user5', - 'project6': 'user6'} - proj_vdns = { - 'project1': 'vdns1', - 'project2': 'vdns2', - 'project3': 'vdns3', - 'project4': 'vdns4', - 'project5': 'vdns5', - 'project6': 'vdns6'} - vdns_fixt1 = {} - ipam_mgmt_obj = {} - for project in project_list: - dns_server_name = proj_vdns[project] - self.logger.info( - 'Creating vdns server:%s in project:%s', - dns_server_name, - project) - domain_name = '%s.net' % (project) - ttl = 100 - # VDNS creation - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - vdns_fixt1[project] = self.useFixture( - VdnsFixture( - self.inputs, - self.connections, - vdns_name=dns_server_name, - dns_data=dns_data)) - result, msg = vdns_fixt1[project].verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1[project].vdns_fq_name) - ipam_mgmt_obj[project] = IpamType( - ipam_dns_method='virtual-dns-server', - ipam_dns_server=dns_server) - ipam_fixt = {} - vn_fixt = {} - vm_fix = {} - pol_fixt = {} - admin_ip = self.inputs - admin_con = self.connections - for proj in project_list: - # User creation - user_fixture = self.useFixture( - UserFixture( - connections=self.connections, - username=proj_user[proj], - password=proj_pass[proj])) - # Project creation - project_fixture = self.useFixture( - ProjectFixture( - project_name=proj, - vnc_lib_h=self.vnc_lib, - username=proj_user[proj], - password=proj_pass[proj], - connections=admin_con)) - user_fixture.add_user_to_tenant(proj, proj_user[proj], 'admin') - project_inputs = self.useFixture( - ContrailTestInit( - self.ini_file, - stack_user=project_fixture.username, - stack_password=project_fixture.password, - project_fq_name=[ - 'default-domain', - proj])) - project_connections = ContrailConnections(project_inputs) - proj_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=proj)) - self.logger.info( - 'Default SG to be edited for allow all on project: %s' % proj) - project_fixture.set_sec_group_for_allow_all(proj, 'default') - # Ipam creation - ipam_fixt[proj] = self.useFixture( - NetworkIpamTestFixtureGen( - self.vnc_lib, - virtual_DNS_refs=[ - vdns_fixt1[proj].obj], - parent_fixt=proj_fixt, - network_ipam_name=ipam_list[proj], - network_ipam_mgmt=ipam_mgmt_obj[proj])) - # VN Creation - vn_fixt[proj] = self.useFixture( - VNFixture( - project_name=proj, - connections=project_connections, - vn_name=vn_list[proj], - inputs=project_inputs, - subnets=vn_nets[proj], - ipam_fq_name=ipam_fixt[proj].getObj().get_fq_name())) - # VM creation - vm_fix[proj] = self.useFixture( - VMFixture( - project_name=proj, - connections=project_connections, - vn_obj=vn_fixt[proj].obj, - vm_name=vm_list[proj])) - vm_fix[proj].verify_vm_launched() - vm_fix[proj].verify_on_setup() - vm_fix[proj].wait_till_vm_is_up() - msg = "Ping by using name %s is failed. Dns server \ - should resolve VM name to IP" % (vm_list[proj]) - self.assertTrue( - vm_fix[proj].ping_with_certainty(ip=vm_list[proj]), msg) - vm_ip = vm_fix[proj].get_vm_ip_from_vm( - vn_fq_name=vm_fix[proj].vn_fq_name) - vm_rev_ip = vm_ip.split('.') - vm_rev_ip = '.'.join( - (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) - vm_rev_ip = vm_rev_ip + '.in-addr.arpa' - rev_zone = vn_nets[proj][0].split('/')[0].split('.') - rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) - rev_zone = rev_zone + '.in-addr.arpa' - # Frame the Expected DNS data for VM, one for 'A' record and - # another 'PTR' record. - domain_name = '%s.net' % (proj) - rec_name = vm_list[proj] + "." + domain_name - vm_dns_exp_data = [{'rec_data': vm_ip, - 'rec_type': 'A', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': rec_name, - 'installed': 'yes', - 'zone': domain_name}, - {'rec_data': rec_name, - 'rec_type': 'PTR', - 'rec_class': 'IN', - 'rec_ttl': str(ttl), - 'rec_name': vm_rev_ip, - 'installed': 'yes', - 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data) - vm_dns_exp_data = [] - self.logger.info( - 'Restart supervisor-config & supervisor-control and test ping') - for bgp_ip in self.inputs.bgp_ips: - self.inputs.restart_service('supervisor-control', [bgp_ip]) - sleep(30) - for cfgm_ip in self.inputs.cfgm_ips: - self.inputs.restart_service('supervisor-config', [cfgm_ip]) - sleep(60) - for proj in project_list: - msg = "Ping by using name %s is failed. Dns server \ - should resolve VM name to IP" % (vm_list[proj]) - self.assertTrue( - vm_fix[proj].ping_with_certainty(ip=vm_list[proj]), msg) - return True - # end test_vdns_with_diff_zone - - def next_ip_in_list(self, iplist, item): - item_index = iplist.index(item) - next_item = None - # if it not end of list, return next element in the list - if item_index != (len(iplist) - 1): - next_item = iplist[item_index + 1] - # if the item is on end of list, the next element will be first element - # in the list - else: - next_item = iplist[0] - return next_item - - def verify_ns_lookup_data(self, vm_fix, cmd, expectd_data): - self.logger.info("Inside verify_ns_lookup_data") - self.logger.info( - "cmd string is %s and expected data %s for searching" % - (cmd, expectd_data)) - vm_fix.run_cmd_on_vm(cmds=[cmd]) - result = vm_fix.return_output_cmd_dict[cmd] - print ('\n result %s' % result) - if (result.find(expectd_data) == -1): - return False - return True - - def verify_vm_dns_data(self, vm_dns_exp_data): - self.logger.info("Inside verify_vm_dns_data") - result = True - dnsinspect_h = self.dnsagent_inspect[self.inputs.bgp_ips[0]] - dns_data = dnsinspect_h.get_dnsa_config() - vm_dns_act_data = [] - msg = '' - - # Traverse over expected record data - found_rec = False - for expected in vm_dns_exp_data: - # Get te actual record data from introspect - for act in dns_data: - for rec in act['records']: - if (rec['rec_name'] in expected['rec_name']) and ( - rec['rec_data'] in expected['rec_data']): - vm_dns_act_data = rec - found_rec = True - break - if found_rec: - break - if not vm_dns_act_data: - self.logger.info("DNS record match not found in dns agent") - return False - found_rec = False - # Compare the DNS entries populated dynamically on VM Creation. - self.logger.info( - "actual record data %s ,\n expected record data %s" % - (vm_dns_act_data, expected)) - if(vm_dns_act_data['rec_name'] not in expected['rec_name']): - result = result and False - if (vm_dns_act_data['rec_data'] not in expected['rec_data']): - msg = 'DNS record data info is not matching\n' - result = result and False - if(vm_dns_act_data['rec_type'] != expected['rec_type']): - msg = msg + 'DNS record_type info is not matching\n' - result = result and False - if(vm_dns_act_data['rec_ttl'] != expected['rec_ttl']): - msg = msg + 'DNS record ttl info is not matching\n' - result = result and False - if(vm_dns_act_data['rec_class'] != expected['rec_class']): - msg = msg + 'DNS record calss info is not matching\n' - result = result and False - vm_dns_act_data = [] - self.assertTrue(result, msg) - self.logger.info("Out of verify_vm_dns_data") - return True - # end verify_vm_dns_data -if __name__ == '__main__': - unittest.main() -# end of TestVdnsFixture diff --git a/scripts/vm_regression/__init__.py b/scripts/vm_regression/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/vm_regression/base.py b/scripts/vm_regression/base.py deleted file mode 100644 index b588de8aa..000000000 --- a/scripts/vm_regression/base.py +++ /dev/null @@ -1,121 +0,0 @@ -import test -import re -from common.connections import ContrailConnections -from common import isolated_creds -from vm_test import VMFixture -from vn_test import VNFixture - -class BaseVnVmTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BaseVnVmTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.inputs.set_af('v4') - cls.connections = cls.isolated_creds.get_conections() - cls.orch = cls.connections.orch - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.analytics_obj=cls.connections.analytics_obj - cls.api_s_inspect = cls.connections.api_server_inspect - #end setUpClass - - @classmethod - def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() - super(BaseVnVmTest, cls).tearDownClass() - #end tearDownClass - - def remove_from_cleanups(self, fix): - for cleanup in self._cleanups: - if fix.cleanUp in cleanup: - self._cleanups.remove(cleanup) - break - #end remove_from_cleanups - - def get_default_gateway_interface(self,vm_fixture): - cmd = "route"+ r" -" +"n" - output = vm_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=False) - output = output.values()[0].split('\r') - output = output[1:] - for elem in output: - elem = elem.rstrip() - if ('0.0.0.0' in elem.split()[0]): - return elem.split()[-1] - return None - - def get_all_vm_interfaces(self,vm_fixture): - intf_list = [] - cmd = "route"+ r" -" +"n" - output = vm_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=False) - output = output.values()[0].split('\r') - output = output[2:] - for elem in output: - elem = elem.rstrip() - try: - if (elem.split()[-1] not in intf_list): - intf_list.append(elem.split()[-1]) - except Exception as e: - pass - return intf_list - - - - def trim_command_output_from_vm(self, output): - output = output.replace("\r", "") - output = output.replace("\t", "") - output = output.replace("\n", " ") - return output - # end trim_command_output_from_vm - - def create_vn(self, *args, **kwargs): - return self.useFixture( - VNFixture(project_name=self.inputs.project_name, - connections=self.connections, - inputs=self.inputs, - *args, **kwargs - )) - - def create_vm(self, vn_fixture, image_name='ubuntu', *args, **kwargs): - return self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn_fixture.obj, - image_name=image_name, - *args, **kwargs - )) - - def bringup_interface_forcefully(self, vm_fixture, intf='eth1'): - cmd = 'ifconfig %s up'%(intf) - for i in range (5): - cmd_to_pass = [cmd] - vm_fixture.run_cmd_on_vm(cmds=cmd_to_pass, as_sudo=True, timeout=60) - vm_fixture.run_cmd_on_vm(cmds=['ifconfig'], as_sudo=True, timeout=60) - output = vm_fixture.return_output_cmd_dict['ifconfig'] - if output and 'eth1' in output: - break - else: - time.sleep(3) - - def verify_eth1_ip_from_vm(self, vm_fix): - i = 'ifconfig eth1' - cmd_to_pass5 = [i] - out = vm_fix.run_cmd_on_vm(cmds=cmd_to_pass5, as_sudo=True, timeout=60) - output = vm_fix.return_output_cmd_dict[i] - match = re.search('inet addr:(.+?) Bcast:', output) - if match: - return True - else: - return False - diff --git a/scripts/vm_regression/test_esx.py b/scripts/vm_regression/test_esx.py index a97ea72ed..377a9bd52 100644 --- a/scripts/vm_regression/test_esx.py +++ b/scripts/vm_regression/test_esx.py @@ -27,7 +27,6 @@ def is_test_applicable(self): return (False, 'Skipping Test. Either or both nova and esx zones are empty') return (True, None) - @test.attr(type=['quick_sanity']) @preposttest_wrapper def test_ping_within_vn(self): ''' @@ -56,7 +55,6 @@ def test_ping_within_vn(self): return True # end test_ping_within_vn - @test.attr(type=['sanity']) @preposttest_wrapper def test_ping_within_2_vn(self): ''' @@ -91,7 +89,6 @@ def test_ping_within_2_vn(self): return True # end test_ping_within_2_vn - @test.attr(type=['sanity','quick_sanity']) @preposttest_wrapper def test_ping_with_policy(self): ''' @@ -204,12 +201,10 @@ def file_trf_tests(self, mode): return transfer_result # end test_vm_file_trf_scp_tests - @test.attr(type=['sanity']) @preposttest_wrapper def test_vm_file_trf_scp_tests(self): return self.file_trf_tests('scp') - @test.attr(type=['sanity']) @preposttest_wrapper def test_vm_file_trf_tftp_tests(self): return self.file_trf_tests('tftp') diff --git a/scripts/vm_regression/test_vm.py b/scripts/vm_regression/test_vm.py index e9a8d8432..aefb287b9 100644 --- a/scripts/vm_regression/test_vm.py +++ b/scripts/vm_regression/test_vm.py @@ -19,6 +19,9 @@ from tcutils.util import get_subnet_broadcast from tcutils.util import skip_because import test +from tcutils.tcpdump_utils import * + +import test_vm_basic class TestBasicVMVN0(BaseVnVmTest): @@ -30,7 +33,6 @@ def setUpClass(cls): def tearDownClass(cls): super(TestBasicVMVN0, cls).tearDownClass() - @preposttest_wrapper @skip_because(orchestrator = 'vcenter',address_family = 'v6') def test_broadcast_udp_w_chksum(self): @@ -245,7 +247,6 @@ def test_disassociate_vn_from_vm(self): ''' vn_fixture = self.create_vn() assert vn_fixture.verify_on_setup() - vn_obj = vn_fixture.obj vm1_fixture = self.create_vm(vn_fixture=vn_fixture) assert vm1_fixture.verify_on_setup() try: @@ -253,7 +254,7 @@ def test_disassociate_vn_from_vm(self): #if (self.inputs.orchestrator == 'vcenter'): # self.vnc_lib.virtual_network_delete(id=vn_obj.uuid) #else: - self.vnc_lib.virtual_network_delete(id=vn_obj.vn_id) + self.vnc_lib.virtual_network_delete(id=vn_fixture.uuid) except RefsExistError as e: self.logger.info( 'RefsExistError:Check passed that the VN cannot be disassociated/deleted when the VM exists') @@ -355,38 +356,6 @@ def test_host_route_add_delete(self): return True # end test_host_route_add_delete - @test.attr(type=['sanity','ci_sanity','quick_sanity']) - @preposttest_wrapper - @skip_because(orchestrator = 'vcenter',address_family = 'v6') - def test_ipam_add_delete(self): - ''' - Description: Test to validate IPAM creation, association of a VN and creating VMs in the VN. Ping b/w the VMs should be successful. - Test steps: - 1. Create a IPAM. - 2. Use this IPAM to create a VN. - 3. Launch 2 VMs in the VN. - Pass criteria: Ping between the VMs should PASS. - Maintainer : ganeshahv@juniper.net - ''' - ipam_obj = self.useFixture( - IPAMFixture(project_obj=self.project, name=get_random_name('my-ipam'))) - assert ipam_obj.verify_on_setup() - vn_fixture = self.create_vn(ipam_fq_name=ipam_obj.fq_name) - assert vn_fixture.verify_on_setup() - - vm1_fixture = self.create_vm(vn_fixture= vn_fixture, vm_name=get_random_name('vm1')) - vm2_fixture = self.create_vm(vn_fixture= vn_fixture, vm_name=get_random_name('vm2')) - assert vm1_fixture.verify_on_setup() - assert vm2_fixture.verify_on_setup() - - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() - assert vm1_fixture.ping_to_vn(dst_vm_fixture=vm2_fixture, - vn_fq_name=vn_fixture.get_vn_fq_name()) - - return True - # end test_ipam_add_delete - @preposttest_wrapper @skip_because(orchestrator = 'vcenter',address_family = 'v6') def test_multiple_metadata_service_scale(self): @@ -505,29 +474,29 @@ def test_policy_between_vns_diff_proj(self): username=user_list[0][0], password=user_list[0][1])) project_fixture1 = self.useFixture( ProjectFixture( - project_name=projects[ - 0], vnc_lib_h=self.vnc_lib, username=user_list[0][0], + project_name=projects[0], username=user_list[0][0], password=user_list[0][1], connections=self.connections)) + project_fixture1.set_user_creds(project_fixture1.username,project_fixture1.password) user1_fixture.add_user_to_tenant(projects[0], user_list[0][0] , user_list[0][2]) - project_inputs1 = self.useFixture( - ContrailTestInit( - self.ini_file, stack_user=project_fixture1.username, - stack_password=project_fixture1.password, project_fq_name=['default-domain', projects[0]],logger = self.logger)) + project_inputs1 = ContrailTestInit( + self.ini_file, stack_user=project_fixture1.project_username, + stack_password=project_fixture1.project_user_password, + stack_tenant=projects[0], logger = self.logger) project_connections1 = ContrailConnections(project_inputs1,self.logger) user2_fixture= self.useFixture(UserFixture(connections=self.connections, username=user_list[1][0], password=user_list[1][1])) project_fixture2 = self.useFixture( ProjectFixture( - project_name=projects[ - 1], vnc_lib_h=self.vnc_lib, username=user_list[1][0], + project_name=projects[1], username=user_list[1][0], password=user_list[1][1], connections=self.connections)) + project_fixture2.set_user_creds(project_fixture2.username,project_fixture2.password) user2_fixture.add_user_to_tenant(projects[1], user_list[1][0] , user_list[1][2]) - project_inputs2 = self.useFixture( - ContrailTestInit( - self.ini_file, stack_user=project_fixture2.username, - stack_password=project_fixture2.password, project_fq_name=['default-domain', projects[1]], logger = self.logger)) - project_connections2 = ContrailConnections(project_inputs2 , self.logger) + project_inputs2 = ContrailTestInit( + self.ini_file, stack_user=project_fixture2.project_username, + stack_password=project_fixture2.project_user_password, + stack_tenant=projects[1], logger = self.logger) + project_connections2 = ContrailConnections(project_inputs2, self.logger) project_inputs1.set_af(self.inputs.get_af()) project_inputs2.set_af(self.inputs.get_af()) @@ -632,14 +601,14 @@ def test_diff_proj_same_vn_vm_add_delete(self): connections=self.connections, username=user_list[0][0], password=user_list[0][1])) project_fixture1 = self.useFixture( ProjectFixture( - project_name=projects[ - 0], vnc_lib_h=self.vnc_lib, username=user_list[0][0], + project_name=projects[0], username=user_list[0][0], password=user_list[0][1], connections=self.connections)) - user1_fixture.add_user_to_tenant(projects[0], user_list[0][0] , user_list[0][2]) - project_inputs1 = self.useFixture( - ContrailTestInit( - self.ini_file, stack_user=project_fixture1.username, - stack_password=project_fixture1.password, project_fq_name=['default-domain', projects[0]] , logger = self.logger)) + project_fixture1.set_user_creds(project_fixture1.username,project_fixture1.password) + user1_fixture.add_user_to_tenant(projects[0], user_list[0][0] , user_list[0][2]) + project_inputs1 = ContrailTestInit( + self.ini_file, stack_user=project_fixture1.project_username, + stack_password=project_fixture1.project_user_password, + stack_tenant=projects[0], logger = self.logger) project_connections1 = ContrailConnections(project_inputs1 , self.logger) user2_fixture= self.useFixture( @@ -647,14 +616,14 @@ def test_diff_proj_same_vn_vm_add_delete(self): connections=self.connections, username=user_list[1][0], password=user_list[1][1])) project_fixture2 = self.useFixture( ProjectFixture( - project_name=projects[ - 1], vnc_lib_h=self.vnc_lib, username=user_list[1][0], + project_name=projects[1], username=user_list[1][0], password=user_list[1][1], connections=self.connections)) + project_fixture2.set_user_creds(project_fixture2.username,project_fixture2.password) user2_fixture.add_user_to_tenant(projects[1], user_list[1][0] , user_list[1][2]) - project_inputs2 = self.useFixture( - ContrailTestInit( - self.ini_file, stack_user=project_fixture2.username, - stack_password=project_fixture2.password, project_fq_name=['default-domain', projects[1]], logger = self.logger)) + project_inputs2 = ContrailTestInit( + self.ini_file, stack_user=project_fixture2.project_username, + stack_password=project_fixture2.project_user_password, + stack_tenant= projects[1], logger = self.logger) project_connections2 = ContrailConnections(project_inputs2 , self.logger) project_inputs1.set_af(self.inputs.get_af()) project_inputs2.set_af(self.inputs.get_af()) @@ -955,97 +924,6 @@ def test_ping_on_broadcast_multicast(self): return True # end subnet ping - @test.attr(type=['sanity', 'ci_sanity']) - @preposttest_wrapper - @skip_because(orchestrator = 'vcenter',address_family = 'v6') - def test_ping_within_vn_two_vms_two_different_subnets(self): - ''' - Description: Validate Ping between 2 VMs in the same VN, 2 VMs in different VNs. - Test steps: - 1. Create 2 VNs and launch 2 VMs in them. - 2. Ping between the VMs in the same VN should go thru fine. - 3. Ping to the subnet broadcast and all-broadcast address. - Pass criteria: VM in the same subnet will respond to both the pings, while the VM in a different VN should respond only to the - all-broadcast address. - Maintainer : ganeshahv@juniper.net - ''' - vn1_name = 'vn030' - vn1_subnets = ['31.1.1.0/29', '31.1.2.0/29'] - subnet1 = '31.1.1.0/29' - subnet2 = '31.1.2.0/29' - fixed_ip1 = '31.1.1.4' - fixed_ip2 = '31.1.2.4' - subnet_objects = [] - # vn1_subnets=['30.1.1.0/24'] - vn1_vm1_name = 'vm1' - vn1_vm2_name = 'vm2' - vn1_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_name=vn1_name, inputs=self.inputs, subnets=vn1_subnets)) - assert vn1_fixture.verify_on_setup() - - subnet_objects = vn1_fixture.get_subnets() - ports = {} - - for subnet in subnet_objects: - if subnet['cidr'] == subnet1: - ports['subnet1'] = vn1_fixture.create_port(vn1_fixture.vn_id, - subnet_id=subnet['id'], ip_address=fixed_ip1) - elif subnet['cidr'] == subnet2: - ports['subnet2'] = vn1_fixture.create_port(vn1_fixture.vn_id, - subnet_id=subnet['id'],ip_address=fixed_ip2) - - vm1_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=vn1_fixture.obj, vm_name=vn1_vm1_name, port_ids = [ports['subnet1']['id']])) - vm2_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_obj=vn1_fixture.obj, vm_name=vn1_vm2_name,port_ids = [ports['subnet2']['id']])) - assert vm1_fixture.verify_on_setup() - assert vm2_fixture.verify_on_setup() - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() - assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip) - assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip) - # Geting the VM ips - vm1_ip = vm1_fixture.vm_ip - vm2_ip = vm2_fixture.vm_ip - ip_list = [vm1_ip, vm2_ip] -# gettig broadcast ip for vm1_ip - ip_broadcast = get_subnet_broadcast('%s/%s'%(vm1_ip, '29')) - list_of_ip_to_ping = [ip_broadcast, '224.0.0.1', '255.255.255.255'] - # passing command to vms so that they respond to subnet broadcast - cmd_list_to_pass_vm = [ - 'echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts'] - - vm1_fixture.run_cmd_on_vm(cmds=cmd_list_to_pass_vm, as_sudo=True) - vm2_fixture.run_cmd_on_vm(cmds=cmd_list_to_pass_vm, as_sudo=True) - - for dst_ip in list_of_ip_to_ping: - print 'pinging from %s to %s' % (vm1_ip, dst_ip) -# pinging from Vm1 to subnet broadcast - if os.environ.has_key('ci_image'): - ping_output = vm1_fixture.ping_to_ip( - dst_ip, return_output=True) - else: - ping_output = vm1_fixture.ping_to_ip( - dst_ip, return_output=True, other_opt='-b') - expected_result = ' 0% packet loss' - assert (expected_result in ping_output) -# getting count of ping response from each vm - string_count_dict = {} - string_count_dict = get_string_match_count(ip_list, ping_output) - print string_count_dict - if (dst_ip == ip_broadcast): - assert (string_count_dict[vm2_ip] == 0) - if (dst_ip == '224.0.0.1' or dst_ip == '255.255.255.255'): - assert (string_count_dict[vm2_ip] > 0) or ('DUP!' in ping_output) - return True - #test_ping_within_vn_two_vms_two_different_subnets - @preposttest_wrapper @skip_because(orchestrator = 'vcenter',address_family = 'v6') def test_release_ipam(self): @@ -1317,8 +1195,28 @@ def setUpClass(cls): def tearDownClass(cls): super(TestBasicVMVN4, cls).tearDownClass() + @test.attr(type=['sanity', 'ci_sanity', 'vcenter', 'suite1']) @preposttest_wrapper @skip_because(orchestrator = 'vcenter',address_family = 'v6') + def test_traffic_bw_vms_diff_pkt_size_w_chksum(self): + ''' + Description: Test to validate VM creation and deletion. + Test steps: + 1. Create VM in a VN. + Pass criteria: Creation and deletion of the VM should go thru fine. + Maintainer : ganeshahv@juniper.net + ''' + vn_fixture = self.create_vn() + assert vn_fixture.verify_on_setup() + vn_obj = vn_fixture.obj + vm1_fixture = self.create_vm(vn_fixture=vn_fixture, + vm_name=get_random_name('vm_add_delete')) + assert vm1_fixture.verify_on_setup() + return True + # end test_traffic_bw_vms_diff_pkt_size_w_chksum + + + @preposttest_wrapper def test_traffic_bw_vms_diff_pkt_size_w_chksum(self): ''' Description: Test to validate TCP, ICMP, UDP traffic of different packet sizes b/w VMs created within a VN and validate UDP checksum. @@ -1565,7 +1463,110 @@ def test_vm_arp(self): assert result, "ARPing Failure" return True # end test_vm_arp - + + @preposttest_wrapper + def test_updating_vm_ip(self): + ''' + Description: Test to validate that updating the IP address of the VM fails. + This script verifies the fix of "Bug 526260 :Old IP address remained even instance-ip was updated" + Test steps: + 1. Create a VM in a VN. + 2. Try to update the IP of the VM. + Pass criteria: The fix to the bug is that modification of fixed IP will not be allowed. + Proper error should be observed. + Maintainer : pulkitt@juniper.net + ''' + vm_name = 'VM1' + vn_name = 'VN1' + vn_subnets = ['10.10.10.0/24'] + fixed_ip = "10.10.10.7" + vn_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, connections=self.connections, + vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets)) + assert vn_fixture.verify_on_setup() + vn_obj = vn_fixture.obj + subnet_objects = vn_fixture.get_subnets() + ports = {} + for subnet in subnet_objects: + if subnet['cidr'] == vn_subnets[0]: + ports['subnet'] = vn_fixture.create_port(vn_fixture.vn_id, + subnet_id=subnet['id'], ip_address=fixed_ip) + vm_fixture = self.useFixture( VMFixture(project_name=self.inputs.project_name, + connections=self.connections, vn_obj=vn_obj, image_name='ubuntu-traffic', + vm_name=vm_name, port_ids = [ports['subnet']['id']])) + assert vm_fixture.verify_on_setup() + vm_fixture.wait_till_vm_is_up() + port_dict = {} + fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': "10.10.10.5"}] + port_dict['fixed_ips'] = fixed_ips + try: + vn_fixture.update_port(ports['subnet']['id'], port_dict) + self.logger.error("Fixed IP have been modified. It was not expected to happen.\ + This is not supported.") + msg = "Fixed IP have been modified. It was not expected to happen.This is not supported." + result = False + assert result, msg + except Exception as e: + if "Fixed ip cannot be updated on a port" in str(e): + self.logger.info("Expected error raised. Error Logs: %s" % e) + else: + self.logger.error("Some unexpected error happened. Error Logs: %s" % e) + result = False + assert result, e + + @preposttest_wrapper + def test_gratuitous_arp(self): + ''' + Description: This Test case verifies Bug #1513793 + An ARP request/response packet with zero source IP address need to be + treated as Gratuitous ARP packet. The ARP request should reach VMs spanning + multiple compute nodes. + Test steps: + 1. Create 2 VMs in a VN. + 2. Start a arping from one of the VMs with source IP as 0.0.0.0 + Pass criteria: arping should reach the VM on other compute node. + Maintainer : pulkitt@juniper.net + Note: This test case is intended to test multiple compute node scenario but + will work for single node as well. + ''' + vm1_name = 'VM1' + vm2_name = 'VM2' + vn_name = 'VN' + vn_subnets = ['11.1.1.0/24'] + vn_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, connections=self.connections, + vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets)) + assert vn_fixture.verify_on_setup() + vn_obj = vn_fixture.obj + vm1_fixture = self.useFixture(VMFixture(connections=self.connections, + node_name= self.inputs.compute_names[0], vn_obj=vn_obj, + image_name= 'ubuntu-traffic', vm_name=vm1_name, + project_name=self.inputs.project_name)) + assert vm1_fixture.verify_on_setup() + if len(self.inputs.compute_ips) > 1: + vm2_fixture = self.useFixture(VMFixture(connections=self.connections, + node_name=self.inputs.compute_names[1], vn_obj=vn_obj, + image_name='ubuntu-traffic', vm_name=vm2_name, + project_name=self.inputs.project_name)) + else: + vm2_fixture = self.useFixture(VMFixture(connections=self.connections, + node_name=self.inputs.compute_names[0], vn_obj=vn_obj, + image_name='ubuntu-traffic', vm_name=vm2_name, + project_name=self.inputs.project_name)) + assert vm2_fixture.verify_on_setup() + vm1_fixture.wait_till_vm_is_up() + vm2_fixture.wait_till_vm_is_up() + filters = '\'(arp and src host 0.0.0.0)\'' + session, pcap = start_tcpdump_for_vm_intf(self, vm2_fixture, + vn_fq_name = vn_fixture.vn_fq_name, filters = filters) + i = 'arping -c 10 %s -S 0.0.0.0 -t ff:ff:ff:ff:ff:ff' % vm2_fixture.vm_ip + cmd_to_output = [i] + vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True) + assert verify_tcpdump_count(self, session, pcap, exp_count=10) + return True + # end test_gratuitous_arp class TestBasicVMVN5(BaseVnVmTest): @@ -1579,7 +1580,7 @@ def tearDownClass(cls): def runTest(self): pass - #end runTes + #end runTes @preposttest_wrapper @skip_because(orchestrator = 'vcenter',address_family = 'v6') @@ -2093,7 +2094,6 @@ def runTest(self): #end runTes @preposttest_wrapper - @skip_because(orchestrator = 'vcenter',address_family = 'v6') def test_vn_subnet_types(self): ''' Description: Validate various type of subnets associated to VNs. @@ -2191,7 +2191,6 @@ def itest_vn_vm_no_ip_assign(self): vm_name=vm_name, project_name=self.inputs.project_name)) self.project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) vm_obj = self.connections.orch.get_vm_if_present( @@ -2251,7 +2250,6 @@ def test_multiple_vn_vm(self): # end test_multiple_vn_vm @test.attr(type=['sanity']) - #@test.attr(type=['sanity', 'ci_sanity']) @preposttest_wrapper @skip_because(orchestrator = 'vcenter',address_family = 'v6') def test_ping_on_broadcast_multicast_with_frag(self): @@ -2455,211 +2453,6 @@ def test_agent_cleanup_with_control_node_stop(self): return True # end test_agent_cleanup_with_control_node_stop - @test.attr(type=['sanity', 'ci_sanity']) - @preposttest_wrapper - @skip_because(orchestrator = 'vcenter') - def test_metadata_service(self): - ''' - Description: Test to validate metadata service on VM creation. - - 1.Verify from global-vrouter-config if metadata configures or not - fails otherwise - 2.Create a shell script which writes 'hello world ' in a file in /tmp and save the script on the nova api node - 3.Create a vm with userdata pointing to that script - script should get executed during vm boot up - 4.Go to the vm and verify if the file with 'hello world ' written saved in /tmp of the vm - fails otherwise - Maintainer: sandipd@juniper.net - ''' - - gvrouter_cfg_obj = self.api_s_inspect.get_global_vrouter_config() - ln_svc = gvrouter_cfg_obj.get_link_local_service() - if ln_svc: - self.logger.info( - "Metadata configured in global_vrouter_config as %s" % - (str(ln_svc))) - else: - self.logger.warn( - "Metadata NOT configured in global_vrouter_config") - result = False - assert result - return True - - text = """#!/bin/sh -echo "Hello World. The time is now $(date -R)!" | tee /tmp/output.txt - """ - try: - with open("/tmp/metadata_script.txt", "w") as f: - f.write(text) - except Exception as e: - self.logger.exception( - "Got exception while creating /tmp/metadata_script.txt as %s" % (e)) - - if os.environ.has_key('ci_image'): - img_name = os.environ['ci_image'] - else: - img_name = 'ubuntu' - vn_name = get_random_name('vn2_metadata') - vm1_name = get_random_name('vm_in_vn2_metadata') - vn_fixture = self.create_vn(vn_name=vn_name, af='v4') - assert vn_fixture.verify_on_setup() - vm1_fixture = self.create_vm(vn_fixture=vn_fixture, vm_name=vm1_name, - image_name=img_name, - userdata='/tmp/metadata_script.txt', - flavor='m1.tiny') - assert vm1_fixture.verify_on_setup() - assert vm1_fixture.wait_till_vm_is_up() - - cmd = 'ls /tmp/' - result = False - for i in range(3): - self.logger.info("Retry %s" % (i)) - ret = vm1_fixture.run_cmd_on_vm(cmds=[cmd]) - self.logger.info("ret : %s" % (ret)) - for elem in ret.values(): - if 'output.txt' in elem: - result = True - break - if result: - break - time.sleep(2) - if not result: - self.logger.warn( - "metadata_script.txt did not get executed in the vm") - self.logger.info('%s' %vm1_fixture.get_console_output()) - else: - self.logger.info("Printing the output.txt :") - cmd = 'cat /tmp/output.txt' - ret = vm1_fixture.run_cmd_on_vm(cmds=[cmd]) - self.logger.info("%s" % (ret.values())) - for elem in ret.values(): - if 'Hello World' in elem: - result = True - else: - self.logger.warn( - "metadata_script.txt did not get executed in the vm...output.txt does not contain proper output") - result = False - assert result - return True - - @test.attr(type=['sanity', 'ci_sanity', 'vcenter']) - @preposttest_wrapper - @skip_because(orchestrator = 'vcenter',address_family = 'v6') - def test_generic_link_local_service(self): - ''' - Description: Test to validate generic linklocal service - running nova list from vm. - 1.Create generic link local service to be able to wget to jenkins - 2.Create a vm - 3.Try wget to jenkins - passes if successful else fails - - Maintainer: sandipd@juniper.net - ''' - - result = True - vn_name = get_random_name('vn2_metadata') - vm1_name = get_random_name('nova_client_vm') - vn_subnets = ['11.1.1.0/24'] - vn_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, connections=self.connections, - vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets)) - #assert vn_fixture.verify_on_setup() - vn_obj = vn_fixture.obj - img_name = os.environ['ci_image'] if os.environ.has_key('ci_image') else 'ubuntu-traffic' - vm1_fixture = self.useFixture(VMFixture(connections=self.connections, - vn_obj=vn_obj, vm_name=vm1_name, project_name=self.inputs.project_name, - image_name=img_name)) - - time.sleep(90) - assert vm1_fixture.verify_on_setup() - vm1_fixture.wait_till_vm_is_up() - - cfgm_hostname = self.inputs.host_data[self.inputs.cfgm_ip]['name'] - compute_user = self.inputs.host_data[vm1_fixture.vm_node_ip]['username'] - compute_password = self.inputs.host_data[vm1_fixture.vm_node_ip]['password'] - cfgm_host_new_name = cfgm_hostname + '-test' - cfgm_control_ip = self.inputs.host_data[cfgm_hostname]['host_control_ip'] - cfgm_intro_port = '8084' - link_local_args = "--admin_user %s \ - --admin_password %s --linklocal_service_name cfgmintrospect\ - --linklocal_service_ip 169.254.1.2\ - --linklocal_service_port 80\ - --ipfabric_dns_service_name %s\ - --ipfabric_service_port %s\ - --admin_tenant_name %s\ - " %( self.inputs.stack_user, self.inputs.stack_password, - cfgm_host_new_name, cfgm_intro_port, - self.inputs.project_name) - if not self.inputs.devstack: - cmd = "python /opt/contrail/utils/provision_linklocal.py --oper add %s" % (link_local_args) - else: - cmd = "python /opt/stack/contrail/controller/src/config/utils/provision_linklocal.py --oper add %s" % ( - link_local_args) - - update_hosts_cmd = 'echo "%s %s" >> /etc/hosts' % (cfgm_control_ip, - cfgm_host_new_name) - self.inputs.run_cmd_on_server(vm1_fixture.vm_node_ip, - update_hosts_cmd, - compute_user, - compute_password) - - args = shlex.split(cmd.encode('UTF-8')) - process = Popen(args, stdout=PIPE, stderr=PIPE) - (stdout, stderr) = process.communicate() - if stderr: - self.logger.warn( - "Linklocal service could not be created, err : \n %s" % (stderr)) - else: - self.logger.info("%s" % (stdout)) - cmd = 'wget http://169.254.1.2:80' - - ret = None - for i in range(3): - try: - self.logger.info("Retry %s" % (i)) - ret = vm1_fixture.run_cmd_on_vm(cmds=[cmd]) - if not ret[cmd]: - raise Exception('wget of http://169.254.1.2:80 returned None') - except Exception as e: - time.sleep(5) - self.logger.exception("Got exception as %s" % (e)) - else: - break - if ret[cmd]: - if 'Connection timed out' in str(ret): - self.logger.warn("Generic metadata did NOT work") - result = False - if '200 OK' in str(ret) or '100%' in str(ret): - self.logger.info("Generic metadata worked") - result = True - else: - self.logger.error('Generic metadata check failed') - result = False - - if not self.inputs.devstack: - cmd = "python /opt/contrail/utils/provision_linklocal.py --oper delete %s" % (link_local_args) - else: - cmd = "python /opt/stack/contrail/controller/src/config/utils/provision_linklocal.py --oper delete %s" % ( - link_local_args) - - args = shlex.split(cmd.encode('UTF-8')) - self.logger.info('Deleting the link local service') - process = Popen(args, stdout=PIPE) - stdout, stderr = process.communicate() - if stderr: - self.logger.warn( - "Linklocal service could not be deleted, err : \n %s" % (stderr)) - result = result and False - else: - self.logger.info("%s" % (stdout)) - - # Remove the hosts entry which was added earlier - update_hosts_cmd = "sed -i '$ d' /etc/hosts" - self.inputs.run_cmd_on_server(vm1_fixture.vm_node_ip, - update_hosts_cmd, - compute_user, - compute_password) - assert result, "Generic Link local verification failed" - return True - # end test_generic_link_local_service - class TestBasicVMVN9(BaseVnVmTest): @classmethod @@ -2725,7 +2518,7 @@ def test_static_route_to_vm(self): with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.cfgm_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): - status = run('cd /opt/contrail/utils;' + add_static_route_cmd) + status = run('cd /usr/share/contrail-utils/;' + add_static_route_cmd) self.logger.debug("%s" % status) m = re.search(r'Creating Route table', status) assert m, 'Failed in Creating Route table' @@ -2751,10 +2544,10 @@ def test_static_route_to_vm(self): vm1_tapintf) execute_cmd(session, cmd, self.logger) - self.logger.info('***** Will start a ping from %s to 1.2.3.4 *****' % + self.logger.info('%%%%%%%%%% Will start a ping from %s to 1.2.3.4 %%%%%%%%%%' % vm2_fixture.vm_name) vm2_fixture.ping_with_certainty('1.2.3.4', expectation=False) - self.logger.info('***** Will check the result of tcpdump *****') + self.logger.info('%%%%%%%%%% Will check the result of tcpdump %%%%%%%%%%') output_cmd = 'cat /tmp/%s_out.log' % vm1_tapintf output, err = execute_cmd_out(session, output_cmd, self.logger) print output @@ -2782,7 +2575,7 @@ def test_static_route_to_vm(self): with settings(host_string='%s@%s' % (self.inputs.username, self.inputs.cfgm_ips[0]), password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): - del_status = run('cd /opt/contrail/utils;' + del_static_route_cmd) + del_status = run('cd /usr/share/contrail-utils/;' + del_static_route_cmd) self.logger.debug("%s" % del_status) time.sleep(10) @@ -2855,7 +2648,7 @@ def test_dns_resolution_for_link_local_service(self): #check if we provided dns/IP try: socket.inet_aton(service_info[service][2]) - metadata_args = "--admin_user %s\ + metadata_args = "--api_server_ip %s --admin_user %s\ --admin_password %s\ --admin_tenant_name %s\ --linklocal_service_name %s\ @@ -2863,7 +2656,7 @@ def test_dns_resolution_for_link_local_service(self): --linklocal_service_port %s\ --ipfabric_service_ip %s\ --ipfabric_service_port %s\ - --oper add" % (ks_admin_user, + --oper add" % (cfgm_ip, ks_admin_user, ks_admin_password, ks_admin_tenant, service, @@ -2872,7 +2665,7 @@ def test_dns_resolution_for_link_local_service(self): service_info[service][2], service_info[service][1]) except socket.error: - metadata_args = "--admin_user %s\ + metadata_args = "--api_server_ip %s --admin_user %s\ --admin_password %s\ --admin_tenant_name %s\ --linklocal_service_name %s\ @@ -2880,7 +2673,7 @@ def test_dns_resolution_for_link_local_service(self): --linklocal_service_port %s\ --ipfabric_dns_service_name %s\ --ipfabric_service_port %s\ - --oper add" % (ks_admin_user, + --oper add" % (cfgm_ip, ks_admin_user, ks_admin_password, ks_admin_tenant, service, @@ -2892,7 +2685,7 @@ def test_dns_resolution_for_link_local_service(self): password=cfgm_pwd, warn_only=True, abort_on_prompts=False): status = run( - "python /opt/contrail/utils/provision_linklocal.py %s" % + "python /usr/share/contrail-utils/provision_linklocal.py %s" % (metadata_args)) self.logger.debug("%s" % status) sleep(2) @@ -2967,7 +2760,7 @@ def test_dns_resolution_for_link_local_service(self): self.logger.info('unconfigure link local service %s' % service) try: socket.inet_aton(service_info[service][2]) - metadata_args_delete = "--admin_user %s\ + metadata_args_delete = "--api_server_ip %s --admin_user %s\ --admin_password %s\ --admin_tenant_name %s\ --linklocal_service_name %s\ @@ -2975,7 +2768,7 @@ def test_dns_resolution_for_link_local_service(self): --linklocal_service_port %s\ --ipfabric_service_ip %s\ --ipfabric_service_port %s\ - --oper delete" % (ks_admin_user, + --oper delete" % (cfgm_ip, ks_admin_user, ks_admin_password, ks_admin_tenant, service, @@ -2984,7 +2777,7 @@ def test_dns_resolution_for_link_local_service(self): service_info[service][2], service_info[service][1]) except socket.error: - metadata_args_delete = "--admin_user %s\ + metadata_args_delete = "--api_server_ip %s --admin_user %s\ --admin_password %s\ --admin_tenant_name %s\ --linklocal_service_name %s\ @@ -2992,7 +2785,7 @@ def test_dns_resolution_for_link_local_service(self): --linklocal_service_port %s\ --ipfabric_dns_service_name %s\ --ipfabric_service_port %s\ - --oper delete" % (ks_admin_user, + --oper delete" % (cfgm_ip, ks_admin_user, ks_admin_password, ks_admin_tenant, service, @@ -3004,7 +2797,7 @@ def test_dns_resolution_for_link_local_service(self): password=cfgm_pwd, warn_only=True, abort_on_prompts=False): status = run( - "python /opt/contrail/utils/provision_linklocal.py %s" % + "python /usr/share/contrail-utils/provision_linklocal.py %s" % (metadata_args_delete)) self.logger.debug("%s" % status) return True @@ -3019,12 +2812,6 @@ def setUpClass(cls): super(TestBasicIPv6VMVN0, cls).setUpClass() cls.inputs.set_af('v6') - @test.attr(type=['sanity','quick_sanity']) - @preposttest_wrapper - @skip_because(orchestrator = 'vcenter',address_family = 'v6') - def test_ipam_add_delete(self): - super(TestBasicIPv6VMVN0, self).test_ipam_add_delete() - class TestBasicIPv6VMVN2(TestBasicVMVN2): @classmethod @@ -3032,12 +2819,6 @@ def setUpClass(cls): super(TestBasicIPv6VMVN2, cls).setUpClass() cls.inputs.set_af('v6') - @test.attr(type=['sanity']) - @preposttest_wrapper - @skip_because(orchestrator = 'vcenter',address_family = 'v6') - def test_ping_within_vn_two_vms_two_different_subnets(self): - super(TestBasicIPv6VMVN2, self).test_ping_within_vn_two_vms_two_different_subnets() - class TestBasicIPv6VMVN3(TestBasicVMVN3): @classmethod @@ -3066,18 +2847,6 @@ def setUpClass(cls): super(TestBasicIPv6VMVN6, cls).setUpClass() cls.inputs.set_af('v6') - @test.attr(type=['sanity']) - @preposttest_wrapper - @skip_because(orchestrator = 'vcenter',address_family = 'v6') - def test_generic_link_local_service(self): - super(TestBasicIPv6VMVN6, self).test_generic_link_local_service() - - @test.attr(type=['sanity']) - @preposttest_wrapper - @skip_because(orchestrator = 'vcenter') - def test_metadata_service(self): - super(TestBasicIPv6VMVN6, self).test_metadata_service() - class TestBasicIPv6VMVN9(TestBasicVMVN9): @classmethod @@ -3096,68 +2865,6 @@ def setUpClass(cls): def tearDownClass(cls): super(TestBasicVMVNx, cls).tearDownClass() - @test.attr(type=['sanity','ci_sanity', 'quick_sanity', 'vcenter']) - @preposttest_wrapper - def test_vn_add_delete(self): - ''' - Description: Test to validate VN creation and deletion. - Test steps: - 1. Create a VN. - Pass criteria: VN creation and deletion should go thru fine. - Maintainer : ganeshahv@juniper.net - ''' - vn_obj = self.create_vn() - assert vn_obj.verify_on_setup() - return True - #end test_vn_add_delete - - @test.attr(type=['sanity','ci_sanity','vcenter']) - @preposttest_wrapper - def test_vm_add_delete(self): - ''' - Description: Test to validate VM creation and deletion. - Test steps: - 1. Create VM in a VN. - Pass criteria: Creation and deletion of the VM should go thru fine. - Maintainer : ganeshahv@juniper.net - ''' - vn_fixture = self.create_vn() - assert vn_fixture.verify_on_setup() - vn_obj = vn_fixture.obj - vm1_fixture = self.create_vm(vn_fixture=vn_fixture, - vm_name=get_random_name('vm_add_delete')) - assert vm1_fixture.verify_on_setup() - return True - # end test_vm_add_delete - - @test.attr(type=['sanity','ci_sanity','quick_sanity', 'vcenter']) - @preposttest_wrapper - def test_ping_within_vn(self): - ''' - Description: Validate Ping between 2 VMs in the same VN. - Test steps: - 1. Create a VN and launch 2 VMs in it. - Pass criteria: Ping between the VMs should go thru fine. - Maintainer : ganeshahv@juniper.net - ''' - vn1_name = get_random_name('vn30') - vn1_vm1_name = get_random_name('vm1') - vn1_vm2_name = get_random_name('vm2') - vn1_fixture = self.create_vn(vn_name=vn1_name) - assert vn1_fixture.verify_on_setup() - vm1_fixture = self.create_vm(vn_fixture=vn1_fixture, vm_name=vn1_vm1_name) - vm2_fixture = self.create_vm(vn_fixture=vn1_fixture, vm_name=vn1_vm2_name) - assert vm1_fixture.verify_on_setup() - assert vm2_fixture.verify_on_setup() - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() - assert vm1_fixture.ping_with_certainty(dst_vm_fixture=vm2_fixture),\ - "Ping from %s to %s failed" % (vn1_vm1_name, vn1_vm2_name) - assert vm2_fixture.ping_with_certainty(dst_vm_fixture=vm1_fixture),\ - "Ping from %s to %s failed" % (vn1_vm2_name, vn1_vm1_name) - return True - # end test_ping_within_vn - @test.attr(type=['sanity','quick_sanity','ci_sanity', 'vcenter']) @preposttest_wrapper def test_vm_file_trf_scp_tests(self): @@ -3194,8 +2901,6 @@ def test_vm_file_trf_scp_tests(self): assert vm1_fixture.wait_till_vm_is_up() assert vm2_fixture.wait_till_vm_is_up() - vm1_fixture.put_pub_key_to_vm() - vm2_fixture.put_pub_key_to_vm() for size in scp_test_file_sizes: self.logger.info("-" * 80) self.logger.info("FILE SIZE = %sB" % size) @@ -3252,7 +2957,7 @@ def test_vm_file_trf_tftp_tests(self): vn_fixture= self.create_vn(vn_name=vn_name) assert vn_fixture.verify_on_setup() img_name=os.environ['ci_image'] if os.environ.has_key('ci_image')\ - else 'ubuntu' + else 'ubuntu-traffic' flavor='m1.tiny' if os.environ.has_key('ci_image')\ else 'contrail_flavor_small' vm1_fixture = self.create_vm(vn_fixture= vn_fixture, vm_name=vm1_name, @@ -3287,6 +2992,186 @@ def test_vm_file_trf_tftp_tests(self): return transfer_result #end test_vm_file_trf_tftp_tests + @test.attr(type=['sanity']) + @skip_because(address_family = 'v6') + @preposttest_wrapper + def test_sctp_traffic_between_vm(self): + ''' + Description: Test to validate SCTP flow setup between + Test steps: + 1. Run SCTP traffic between 2 VM across VN connected through FIP + 2. Verify the Ingress and Egress flow. + Pass criteria: SCTP egress and ingress flow setup properly. + Maintainer : chhandak@juniper.net + ''' + result = True + fip_pool_name = get_random_name('some-pool') + vn1_vm1_name = get_random_name('vn1_vm1_name') + fvn_vm1_name = get_random_name('fvn_vm1_name') + + (vn1_name, vn1_subnets) = ( + get_random_name("vn1"), [get_random_cidr()]) + (fvn_name, fvn_subnets) = ( + get_random_name("fvn"), [get_random_cidr()]) + + # Get all computes + self.get_two_different_compute_hosts() + + fvn_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=fvn_name, + subnets=fvn_subnets)) + + assert fvn_fixture.verify_on_setup() + vn1_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=vn1_name, + subnets=vn1_subnets)) + + assert vn1_fixture.verify_on_setup() + + vn1_vm1_fixture = self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_obj=vn1_fixture.obj, + vm_name=vn1_vm1_name, + image_name='ubuntu-sctp', + node_name=self.compute_1 + )) + + fvn_vm1_fixture = self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_obj=fvn_fixture.obj, + vm_name=fvn_vm1_name, + image_name='ubuntu-sctp', + node_name=self.compute_2 + )) + + vn1_vm1_fixture.wait_till_vm_up() + fvn_vm1_fixture.wait_till_vm_up() + fip_fixture = self.useFixture( + FloatingIPFixture( + project_name=self.inputs.project_name, + inputs=self.inputs, + connections=self.connections, + pool_name=fip_pool_name, + vn_id=fvn_fixture.vn_id)) + assert fip_fixture.verify_on_setup() + fip_id = fip_fixture.create_and_assoc_fip( + fvn_fixture.vn_id, vn1_vm1_fixture.vm_id) + self.addCleanup(fip_fixture.disassoc_and_delete_fip, fip_id) + assert fip_fixture.verify_fip(fip_id, vn1_vm1_fixture, fvn_fixture) + if not vn1_vm1_fixture.ping_with_certainty(fvn_vm1_fixture.vm_ip): + result = result and False + fip_fixture.disassoc_and_delete_fip(fip_id) + + if not result: + self.logger.error('Test to ping between VMs %s and %s failed' % + (vn1_vm1_name, fvn_vm1_name)) + assert result + + # Setup SCTP flow on the vm + # Server + server_port=3700 + cmd_to_pass="sctp_test -H %s -P %s -l > /dev/null" %(fvn_vm1_fixture.vm_ip,server_port) + fvn_vm1_fixture.run_cmd_on_vm(cmds=[cmd_to_pass], as_sudo=True, + as_daemon=True) + + # Client + client_port=4700 + cmd_to_pass="sctp_test -H %s -P %s -h %s -p %s -s -x 100" %(vn1_vm1_fixture.vm_ip,client_port,fvn_vm1_fixture.vm_ip,server_port) + vn1_vm1_fixture.run_cmd_on_vm(cmds=[cmd_to_pass], as_sudo=True, timeout=60) + + # Verify Flow records here + inspect_h1 = self.agent_inspect[vn1_vm1_fixture.vm_node_ip] + inspect_h2 = self.agent_inspect[fvn_vm1_fixture.vm_node_ip] + flow_rec1 = None + src_port = unicode(client_port) + dst_port = unicode(server_port) + # Verify Ingress Traffic + self.logger.info('Verifying Ingress Flow Record') + vn_fq_name = vn1_vm1_fixture.vn_fq_name + flow_rec1 = inspect_h1.get_vna_fetchflowrecord( + nh=vn1_vm1_fixture.tap_intf[vn_fq_name]['flow_key_idx'], + sip=vn1_vm1_fixture.vm_ip, + dip=fvn_vm1_fixture.vm_ip, + sport=src_port, + dport=dst_port, + protocol='132') + + if flow_rec1 is not None: + self.logger.info('Verifying NAT in flow records') + match = inspect_h1.match_item_in_flowrecord( + flow_rec1, 'nat', 'enabled') + if match is False: + self.logger.error( + 'Test Failed. NAT is not enabled in given flow. Flow details %s' % + (flow_rec1)) + result = result and False + self.logger.info('Verifying traffic direction in flow records') + match = inspect_h1.match_item_in_flowrecord( + flow_rec1, 'direction', 'ingress') + if match is False: + self.logger.error( + 'Test Failed. Traffic direction is wrong should be ingress. Flow details %s' % + (flow_rec1)) + result = result and False + else: + self.logger.error( + 'Test Failed. Required ingress Traffic flow not found') + result = result and False + + # Verify Egress Traffic + # Check VMs are in same agent or not. Need to compute next hop + # accordingly + if self.compute_1 is self.compute_2: + vn_fq_name=fvn_fixture.get_vn_fq_name() + nh_id=fvn_vm1_fixture.tap_intf[vn_fq_name]['flow_key_idx'] + else: + nh_id=vn1_vm1_fixture.tap_intf[vn_fq_name]['flow_key_idx'] + self.logger.info('Verifying Egress Flow Records') + flow_rec2 = inspect_h1.get_vna_fetchflowrecord( + nh=nh_id, + sip=fvn_vm1_fixture.vm_ip, + dip=fip_fixture.fip[fip_id], + sport=dst_port, + dport=src_port, + protocol='132') + if flow_rec2 is not None: + self.logger.info('Verifying NAT in flow records') + match = inspect_h1.match_item_in_flowrecord( + flow_rec2, 'nat', 'enabled') + if match is False: + self.logger.error( + 'Test Failed. NAT is not enabled in given flow. Flow details %s' % + (flow_rec2)) + result = result and False + if self.compute_1 is not self.compute_2: + self.logger.info('Verifying traffic direction in flow records') + match = inspect_h1.match_item_in_flowrecord( + flow_rec2, 'direction', 'egress') + if match is False: + self.logger.error( + 'Test Failed. Traffic direction is wrong should be Egress. Flow details %s' % + (flow_rec1)) + result = result and False + else: + self.logger.error( + 'Test Failed. Required Egress Traffic flow not found') + result = result and False + + return result + # end test_sctp_traffic_between_vm + class TestBasicIPv6VMVNx(TestBasicVMVNx): @classmethod @@ -3296,26 +3181,54 @@ def setUpClass(cls): @test.attr(type=['sanity', 'quick_sanity']) @preposttest_wrapper - def test_vn_add_delete(self): - super(TestBasicIPv6VMVNx, self).test_vn_add_delete() + def test_vm_file_trf_scp_tests(self): + super(TestBasicIPv6VMVNx, self).test_vm_file_trf_scp_tests() + + @test.attr(type=['sanity', 'quick_sanity']) + @preposttest_wrapper + def test_vm_file_trf_tftp_tests(self): + super(TestBasicIPv6VMVNx, self).test_vm_file_trf_tftp_tests() + +class TestBasicIPv6VMVN(test_vm_basic.TestBasicVMVN): + @classmethod + def setUpClass(cls): + super(TestBasicIPv6VMVN, cls).setUpClass() + cls.inputs.set_af('v6') + + @preposttest_wrapper + @skip_because(orchestrator = 'vcenter') + def test_metadata_service(self): + super(TestBasicIPv6VMVN, self).test_metadata_service() @test.attr(type=['sanity','quick_sanity']) @preposttest_wrapper - def test_ping_within_vn(self): - super(TestBasicIPv6VMVNx, self).test_ping_within_vn() + @skip_because(orchestrator = 'vcenter',address_family = 'v6') + def test_ipam_add_delete(self): + super(TestBasicIPv6VMVN, self).test_ipam_add_delete() @test.attr(type=['sanity']) @preposttest_wrapper - def test_vm_add_delete(self): - super(TestBasicIPv6VMVNx, self).test_vm_add_delete() + @skip_because(orchestrator = 'vcenter',address_family = 'v6') + def test_ping_within_vn_two_vms_two_different_subnets(self): + super(TestBasicIPv6VMVN, self).test_ping_within_vn_two_vms_two_different_subnets() @test.attr(type=['sanity', 'quick_sanity']) @preposttest_wrapper - def test_vm_file_trf_scp_tests(self): - super(TestBasicIPv6VMVNx, self).test_vm_file_trf_scp_tests() + def test_vn_add_delete(self): + super(TestBasicIPv6VMVN, self).test_vn_add_delete() - @test.attr(type=['sanity', 'quick_sanity']) + @test.attr(type=['sanity']) @preposttest_wrapper - def test_vm_file_trf_tftp_tests(self): - super(TestBasicIPv6VMVNx, self).test_vm_file_trf_tftp_tests() + def test_vm_add_delete(self): + super(TestBasicIPv6VMVN, self).test_vm_add_delete() + + @test.attr(type=['sanity','quick_sanity']) + @preposttest_wrapper + def test_ping_within_vn(self): + super(TestBasicIPv6VMVN, self).test_ping_within_vn() + + @preposttest_wrapper + @skip_because(orchestrator = 'vcenter',address_family = 'v6') + def test_generic_link_local_service(self): + super(TestBasicIPv6VMVN, self).test_generic_link_local_service() diff --git a/scripts/vpc/base.py b/scripts/vpc/base.py index e8d00b955..fc13549b2 100644 --- a/scripts/vpc/base.py +++ b/scripts/vpc/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 from common import isolated_creds from common import create_public_vn from vn_test import * @@ -9,40 +9,30 @@ from vpc_vm_fixture import VPCVMFixture -class VpcBaseTest(test.BaseTestCase): +class VpcBaseTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(VpcBaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.admin_inputs = cls.isolated_creds.get_admin_inputs() - cls.admin_connections = cls.isolated_creds.get_admin_connections() cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib cls.agent_inspect = cls.connections.agent_inspect cls.cn_inspect = cls.connections.cn_inspect cls.analytics_obj = cls.connections.analytics_obj + if cls.inputs.admin_username: + public_creds = cls.admin_isolated_creds + else: + public_creds = cls.isolated_creds cls.public_vn_obj = create_public_vn.PublicVn( - cls.admin_connections.username, - cls.admin_connections.password, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) + public_creds, + cls.inputs, + ini_file=cls.ini_file, + logger=cls.logger) # end setUpClass @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_tenant() super(VpcBaseTest, cls).tearDownClass() # end tearDownClass diff --git a/scripts/vpc/test_vpc.py b/scripts/vpc/test_vpc.py index 7cc770609..4dfd46185 100644 --- a/scripts/vpc/test_vpc.py +++ b/scripts/vpc/test_vpc.py @@ -32,7 +32,6 @@ class VpcSanityTests(base.VpcBaseTest): def setUpClass(cls): super(VpcSanityTests, cls).setUpClass() - @test.attr(type=['sanity']) @preposttest_wrapper def test_create_delete_vpc(self): """Validate create VPC """ @@ -159,7 +158,6 @@ class VpcSanityTests1(base.VpcBaseTest): def setUpClass(cls): super(VpcSanityTests1, cls).setUpClass() - @test.attr(type=['sanity']) @preposttest_wrapper def test_acl_with_association(self): """Create ACL, associate it with a subnet, add and replace rules """ @@ -277,7 +275,6 @@ def test_acl_with_association(self): return result # end test_acl_with_association - @test.attr(type=['sanity']) @preposttest_wrapper def test_security_group(self): """Create Security Groups, Add and Delete Rules """ @@ -564,7 +561,6 @@ def test_route_using_nat_instance(self): # Just Read the existing vpc as a fixture vpc1_contrail_fixture = self.useFixture( ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=vpc1_id, username=self.admin_inputs.stack_user, password=self.admin_inputs.stack_password, @@ -643,7 +639,6 @@ def setUpClass(cls): super(VpcSanityTests2, cls).setUpClass() - @test.attr(type=['sanity']) @preposttest_wrapper def test_ping_between_instances(self): """Test ping between instances in subnet """ @@ -691,7 +686,6 @@ def test_ping_between_instances(self): return True # end test_ping_between_instances - @test.attr(type=['sanity']) @preposttest_wrapper def test_subnet_create_delete(self): """Validate create subnet in vpc with valid CIDR """ @@ -849,7 +843,6 @@ class VpcSanityTests3(base.VpcBaseTest): def setUpClass(cls): super(VpcSanityTests3, cls).setUpClass() - @test.attr(type=['sanity']) @preposttest_wrapper def test_allocate_floating_ip(self): """Allocate a floating IP""" @@ -980,7 +973,6 @@ def test_route_using_gateway(self): # Just Read the existing vpc as a fixture vpc1_contrail_fixture = self.useFixture( ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=vpc1_id, username=self.admin_inputs.stack_user, password=self.admin_inputs.stack_password, diff --git a/common/__init__.py b/scripts/vrouter/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from common/__init__.py rename to scripts/vrouter/__init__.py diff --git a/scripts/vrouter/test_disable_policy.py b/scripts/vrouter/test_disable_policy.py new file mode 100644 index 000000000..e385258c7 --- /dev/null +++ b/scripts/vrouter/test_disable_policy.py @@ -0,0 +1,329 @@ +#Testcases for disabling policy on VMIs: +#PR https://bugs.launchpad.net/juniperopenstack/+bug/1558920 and PR https://bugs.launchpad.net/juniperopenstack/+bug/1566650 +from tcutils.wrappers import preposttest_wrapper +from common.vrouter.base import BaseVrouterTest +import test +from tcutils.util import get_random_cidr, get_random_name, is_v6 +import random +from security_group import get_secgrp_id_from_name +from common.servicechain.config import ConfigSvcChain +from common.servicechain.verify import VerifySvcChain +from netaddr import IPNetwork + +AF_TEST = 'v6' + +class DisablePolicy(BaseVrouterTest, ConfigSvcChain, VerifySvcChain): + + @classmethod + def setUpClass(cls): + super(DisablePolicy, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(DisablePolicy, cls).tearDownClass() + + @preposttest_wrapper + def test_disable_policy_with_aap(self): + """ + Description: Verify disabling policy with allowed address pair + Steps: + 1. launch 1 VN and launch 3 VMs in it.1 client VMs and 2 server VMs. + 2. disable the policy on all the VMIs. + 3. from client VMs,send udp traffic to servers and + verify mastership and no flow + 4. Induce mastership switch and verify no flow again + Pass criteria: + 1. flow and mastership verification should pass + """ + vn1_fixture = self.create_vns(count=1)[0] + vm1_name = get_random_name('vm1') + vm2_name = get_random_name('vm2') + result = False + vIP = self.get_random_ip_from_vn(vn1_fixture)[0] + image = 'ubuntu-traffic' + + port1_obj = self.create_port(net_id=vn1_fixture.vn_id) + port2_obj = self.create_port(net_id=vn1_fixture.vn_id) + vm1_fixture = self.create_vm(vn1_fixture, vm1_name, + image_name=image, + port_ids=[port1_obj['id']]) + vm2_fixture = self.create_vm(vn1_fixture, vm2_name, + image_name=image, + port_ids=[port2_obj['id']]) + + client_fixture = self.create_vms(vn_fixture= vn1_fixture,count=1, + image_name=image)[0] + vm_fix_list = [client_fixture, vm1_fixture, vm2_fixture] + self.verify_vms(vm_fix_list) + + proto = 'udp' + dport = 53 + baseport = random.randint(12000, 65000) + sport = str(baseport) + compute_node_ips = [] + compute_fixtures = [] + + #Get all the VMs compute IPs + for vm in vm_fix_list: + if vm.vm_node_ip not in compute_node_ips: + compute_node_ips.append(vm.vm_node_ip) + + #Get the compute fixture for all the concerned computes + for ip in compute_node_ips: + compute_fixtures.append(self.compute_fixtures_dict[ip]) + + self.disable_policy_for_vms(vm_fix_list) + + self.config_aap(port1_obj, port2_obj, vIP) + self.config_vrrp(vm1_fixture, vIP, '20') + self.config_vrrp(vm2_fixture, vIP, '10') + vrrp_master = vm1_fixture + if is_v6(vIP): + #current version of vrrpd does not support IPv6, as a workaround add the vIP + # on one of the VM and start ping6 to make the VM as master + assert vm1_fixture.add_ip_on_vm(vIP) + assert client_fixture.ping_with_certainty(vIP), 'Ping to vIP failure' + + + assert self.vrrp_mas_chk(vrrp_master, vn1_fixture, vIP) + + assert self.send_nc_traffic(client_fixture, vrrp_master, + sport, dport, proto, ip=vIP) + + for fixture in compute_fixtures: + vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0]) + self.verify_flow_on_compute(fixture, client_fixture.vm_ip, + vIP, vrf_id, vrf_id, sport, dport, proto, + ff_exp=0, rf_exp=0) + + if is_v6(vIP): + #Skip further verification as current version of vrrpd does not support IPv6 + return True + self.logger.info('We will induce a mastership switch') + port_dict = {'admin_state_up': False} + self.update_port(port1_obj['id'], port_dict) + self.logger.info( + '%s should become the new VRRP master' % vm2_fixture.vm_name) + vrrp_master = vm2_fixture + assert self.vrrp_mas_chk(vrrp_master, vn1_fixture, vIP) + + assert self.send_nc_traffic(client_fixture, vrrp_master, + sport, dport, proto, ip=vIP) + + for fixture in compute_fixtures: + vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0]) + self.verify_flow_on_compute(fixture, client_fixture.vm_ip, + vIP, vrf_id, vrf_id, sport, dport, proto, + ff_exp=0, rf_exp=0) + + self.disable_policy_for_vms(vm_fix_list, disable=False) + + assert self.send_nc_traffic(client_fixture, vrrp_master, + sport, dport, proto, ip=vIP) + + for fixture in compute_fixtures: + vrf_id = fixture.get_vrf_id(vrrp_master.vn_fq_names[0]) + self.verify_flow_on_compute(fixture, client_fixture.vm_ip, + vIP, vrf_id, vrf_id, sport, dport, proto, + ff_exp=1, rf_exp=1) + + @preposttest_wrapper + def test_disable_policy_sg_inter_vn(self): + """ + Description: Verify disabling policy for inter VN,inter/intra node traffic with SG + Steps: + 1. launch 2 VNs and launch 3 VMs in it. + 2. disable policy only on destination VMs and add/remove SG and start traffic + Pass criteria: + 1. traffic should go through and flow should not be created + """ + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case,\ + this test needs atleast 2 compute nodes") + + vn_fixtures = self.create_vns(count=2, rt_number='10000') + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + vn2_fixture = vn_fixtures[1] + + #Launch 1 VM in first VN and 2 VMs in another VN + image = 'ubuntu-traffic' + src_vm_fixture = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[0], image_name=image)[0] + dst_vm_fixture1 = self.create_vms(vn_fixture= vn2_fixture,count=1, + node_name=compute_hosts[0], image_name=image)[0] + dst_vm_fixture2 = self.create_vms(vn_fixture= vn2_fixture,count=1, + node_name=compute_hosts[1], image_name=image)[0] + self.verify_vms([src_vm_fixture, dst_vm_fixture1, dst_vm_fixture2]) + + self.disable_policy_for_vms([dst_vm_fixture1, dst_vm_fixture2]) + + default_sg_id = get_secgrp_id_from_name( + self.connections, + ':'.join([self.inputs.domain_name, + self.inputs.project_name, + 'default'])) + rule = [{'direction': '<>', + 'protocol': 'udp', + 'dst_addresses': [{'subnet': {'ip_prefix': '10.1.1.0', 'ip_prefix_len': 24}}], + 'dst_ports': [{'start_port': 0, 'end_port': -1}], + 'src_ports': [{'start_port': 0, 'end_port': -1}], + 'src_addresses': [{'security_group': 'local'}], + }] + sg_fixture = self.create_sg(entries=rule) + self.verify_sg(sg_fixture) + proto = 'udp' + #For Inter node traffic test, use src_vm_fixture and dst_vm_fixture2 + #For Intra node, use src_vm_fixture and dst_vm_fixture1 + for vm in [dst_vm_fixture1, dst_vm_fixture2]: + if (vm == dst_vm_fixture1): + #Intra Node + ff_exp = 1 + rf_exp = 1 + else: + #Inter Node + ff_exp = 0 + rf_exp = 0 + #1. receiver VMI SG with allow rule, use default SG + self.send_traffic_verify_flow_dst_compute(src_vm_fixture, + vm, proto, ff_exp=ff_exp, rf_exp=rf_exp) + + #2. receiver VMI SG with only egress rule + self.remove_sg_from_vms([vm], sg_id=default_sg_id) + self.add_sg_to_vms([vm], sg_id=sg_fixture.secgrp_id) + self.send_traffic_verify_flow_dst_compute(src_vm_fixture, + vm, proto, ff_exp=ff_exp, rf_exp=rf_exp) + + #3. receiver VMI SG without any rule + sg_fixture.delete_all_rules() + self.send_traffic_verify_flow_dst_compute(src_vm_fixture, + vm, proto, ff_exp=ff_exp, rf_exp=rf_exp) + + #4. receiver VMI without SG + self.remove_sg_from_vms([vm], sg_id=sg_fixture.secgrp_id) + self.send_traffic_verify_flow_dst_compute(src_vm_fixture, + vm, proto, ff_exp=ff_exp, rf_exp=rf_exp) + + @preposttest_wrapper + def test_disable_policy_with_vn_policy(self): + """ + Description: Verify disabling policy for inter VN,inter/intra node traffic with VNs policy + Steps: + 1. launch 2 VNs and launch 5 VMs in it + 2. disable policy only on destination VMs + 3. policy deny rule: deny udp protocol and allow others + Pass criteria: + 1. udp traffic should be denied and other proto like ping should succeed. + 2. flow should be created for intra node and not for inter node on dest compute + """ + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case,\ + this test needs atleast 2 compute nodes") + + vn_fixtures = self.create_vns(count=2) + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + vn2_fixture = vn_fixtures[1] + + image = 'ubuntu-traffic' + vm_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2, + node_name=compute_hosts[0], image_name=image) + src_vm_fixture = vm_fixtures[0] + vm_vn1_fixture1 = vm_fixtures[1] + vm_vn1_fixture2 = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[1], image_name=image)[0] + dst_vm_fixture1 = self.create_vms(vn_fixture= vn2_fixture,count=1, + node_name=compute_hosts[0], image_name=image)[0] + dst_vm_fixture2 = self.create_vms(vn_fixture= vn2_fixture,count=1, + node_name=compute_hosts[1], image_name=image)[0] + self.verify_vms(vm_fixtures) + self.verify_vms([vm_vn1_fixture2, dst_vm_fixture1, dst_vm_fixture2]) + + self.disable_policy_for_vms([vm_vn1_fixture1, vm_vn1_fixture2, + dst_vm_fixture1, dst_vm_fixture2]) + + #Inter VN without policy attached + proto = 'udp' + sport = 10000 + dport = 11000 + + assert self.send_nc_traffic(src_vm_fixture, dst_vm_fixture1, sport, dport, + proto, exp=False) + + compute_fix = self.compute_fixtures_dict[dst_vm_fixture1.vm_node_ip] + src_vrf = compute_fix.get_vrf_id(src_vm_fixture.vn_fq_names[0]) + dst_vrf = compute_fix.get_vrf_id(dst_vm_fixture1.vn_fq_names[0]) + + self.verify_flow_on_compute(compute_fix, src_vm_fixture.vm_ip, + dst_vm_fixture1.vm_ip, src_vrf, dst_vrf, sport=sport, dport=dport, proto=proto, + ff_exp=0, rf_exp=0) + + rules = [ + { + 'direction': '<>', 'simple_action': 'deny', + 'protocol': 'udp', 'src_ports': 'any', + 'dst_ports': 'any', + 'source_network': vn1_fixture.vn_fq_name, + 'dest_network': vn2_fixture.vn_fq_name, + }, + { + 'direction': '<>', + 'protocol': 'udp', + 'dest_subnet': str(IPNetwork(vm_vn1_fixture1.vm_ip)), + 'source_subnet': str(IPNetwork(src_vm_fixture.vm_ip)), + 'dst_ports': 'any', + 'simple_action': 'deny', + 'src_ports': 'any' + }, + { + 'direction': '<>', + 'protocol': 'udp', + 'dest_subnet': str(IPNetwork(vm_vn1_fixture2.vm_ip)), + 'source_subnet': str(IPNetwork(src_vm_fixture.vm_ip)), + 'dst_ports': 'any', + 'simple_action': 'deny', + 'src_ports': 'any' + }, + { + 'direction': '<>', 'simple_action': 'pass', + 'protocol': 'any', 'src_ports': 'any', + 'dst_ports': 'any', + 'source_network': vn1_fixture.vn_fq_name, + 'dest_network': vn2_fixture.vn_fq_name, + } + ] + policy_name = get_random_name("policy1") + policy_fixture = self.config_policy(policy_name, rules) + vn1_policy_fix = self.attach_policy_to_vn( + policy_fixture, vn1_fixture) + vn2_policy_fix = self.attach_policy_to_vn( + policy_fixture, vn2_fixture) + + for vm in [vm_vn1_fixture1, vm_vn1_fixture2, dst_vm_fixture1, dst_vm_fixture2]: + errmsg = "Ping to VM ip %s from VM ip %s failed" % ( + vm.vm_ip, src_vm_fixture.vm_ip) + + if (vm == vm_vn1_fixture1) or (vm == dst_vm_fixture1): + #Intra Node + ff_exp = 1 + rf_exp = 1 + else: + #Inter Node + ff_exp = 0 + rf_exp = 0 + self.send_traffic_verify_flow_dst_compute(src_vm_fixture, + vm, proto, ff_exp=ff_exp, rf_exp=rf_exp, exp=False) + assert src_vm_fixture.ping_with_certainty(vm.vm_ip), errmsg + +class DisablePolicyIpv6(DisablePolicy): + @classmethod + def setUpClass(cls): + super(DisablePolicyIpv6, cls).setUpClass() + cls.inputs.set_af(AF_TEST) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) diff --git a/scripts/vrouter/test_fat_flow.py b/scripts/vrouter/test_fat_flow.py new file mode 100644 index 000000000..64b292649 --- /dev/null +++ b/scripts/vrouter/test_fat_flow.py @@ -0,0 +1,353 @@ +from tcutils.wrappers import preposttest_wrapper +from common.vrouter.base import BaseVrouterTest +import test +from tcutils.util import get_random_name, is_v6 +import random + +AF_TEST = 'v6' + +class FatFlow(BaseVrouterTest): + + @classmethod + def setUpClass(cls): + super(FatFlow, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(FatFlow, cls).tearDownClass() + + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_fat_flow_intra_vn_inter_node(self): + """ + Description: Verify Fat flow for intra-VN inter-Node traffic + Steps: + 1. create 1 VN and launch 3 VMs in it.client VMs on same node and server VM on different node. + 2. on server VM, config Fat flow for udp port 53. + 3. from both client VM, send UDP traffic to server on port 53 twice with diff. src ports + Pass criteria: + 1. on client VMs compute, 4 set of flows and on server compute, 2 set of flows should be created + 2. on server compute, flow's source port should be 0 for Fat flow + """ + vn_fixtures = self.create_vns(count=1) + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case," + "this test needs atleast 2 compute nodes") + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2, + node_name=compute_hosts[0]) + server_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[1]) + self.verify_vms(client_fixtures) + self.verify_vms(server_fixtures) + + #Configure Fat flow on server VM + proto = 'udp' + port = 53 + server_vmi_id = server_fixtures[0].get_vmi_ids().values() + fat_flow_config = {'proto':proto,'port':port} + self.add_fat_flow_to_vmis(server_vmi_id, fat_flow_config) + + self.verify_fat_flow_with_traffic(client_fixtures,server_fixtures[0], + proto, port) + + @preposttest_wrapper + def test_fat_flow_intra_vn_intra_node(self): + """ + Description: Verify Fat flow for intra-VN intra-Node traffic + Steps: + 1. create 1 VN and launch 3 VMs in it.All VMs on same node. + 2. on server VM, config Fat flow for udp port 53. + 3. from both client VM, send UDP traffic to server on port 53 twice with diff. src ports + Pass criteria: + 1. total 4 set of flows should be created + 2. there should not be Fat flow with source port 0 + """ + vn_fixtures = self.create_vns(count=1) + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + + compute_hosts = self.orch.get_hosts() + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2, + node_name=compute_hosts[0]) + server_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[0]) + self.verify_vms(client_fixtures) + self.verify_vms(server_fixtures) + + #Configure Fat flow on server VM + proto = 'udp' + port = 53 + server_vmi_id = server_fixtures[0].get_vmi_ids().values() + fat_flow_config = {'proto':proto,'port':port} + self.add_fat_flow_to_vmis(server_vmi_id, fat_flow_config) + + self.verify_fat_flow_with_traffic(client_fixtures,server_fixtures[0], + proto, port) + + @preposttest_wrapper + def test_fat_flow_icmp_error(self): + """ + Description: Verify Fat flow when server port is not reachable, bug 1542207 + Steps: + 1. launch 1 VN and launch 2 VMs in it.All VMs on same node + 2. on server VM, config Fat flow for udp port 53, but no process on that port + 3. from client VM, send UDP traffic to server on port 53 + 4. server will send icmp error for port 53 traffic + Pass criteria: + 1. Fat and non-Fat flows should be created + 2. there should not be HOLD flows + """ + vn_fixtures = self.create_vns(count=1) + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + + compute_hosts = self.orch.get_hosts() + image = 'ubuntu-traffic' + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[0], image_name=image) + server_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[0], image_name=image) + self.verify_vms(client_fixtures) + self.verify_vms(server_fixtures) + + #Configure Fat flow on server VM + proto = 'udp' + port = 53 + srcport = 10000 + server_vmi_id = server_fixtures[0].get_vmi_ids().values() + fat_flow_config = {'proto':proto,'port':port} + self.add_fat_flow_to_vmis(server_vmi_id, fat_flow_config) + + #Start the traffic without any receiver, dest VM will send icmp error + nc_options = '-4' if (self.inputs.get_af() == 'v4') else '-6' + nc_options = nc_options + ' -q 2 -u' + client_fixtures[0].nc_send_file_to_ip('icmp_error', server_fixtures[0].vm_ip, + local_port=srcport, remote_port=port, + nc_options=nc_options) + + #Verify the flows + assert self.verify_fat_flow(client_fixtures, server_fixtures[0], + proto, port, fat_flow_count=1, + unidirectional_traffic=False) + + compute_fix = self.compute_fixtures_dict[client_fixtures[0].vm_node_ip] + vrf_id = compute_fix.get_vrf_id(client_fixtures[0].vn_fq_names[0]) + + #Verify NO hold flows + action = 'HOLD' + self.verify_flow_action(compute_fix, action, + src_ip=client_fixtures[0].vm_ip, dst_ip=server_fixtures[0].vm_ip, + sport=srcport, dport=port, src_vrf=vrf_id, proto=proto, exp=False) + + @preposttest_wrapper + def test_fat_flow_icmp_error_inter_node(self): + """ + Description: Verify Fat flow when server port is not reachable, bug 1542207 + Steps: + 1. launch 1 VN and launch 2 VMs in it.All VMs on different node + 2. on server VM, config Fat flow for udp port 53, but no process on that port + 3. from client VM, send UDP traffic to server on port 53 + 4. server will send icmp error for port 53 traffic + Pass criteria: + 1. Fat and non-Fat flows should be created + 2. there should not be HOLD flows + """ + vn_fixtures = self.create_vns(count=1) + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case," + "this test needs atleast 2 compute nodes") + + image = 'ubuntu-traffic' + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[0], image_name=image) + server_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[1], image_name=image) + self.verify_vms(client_fixtures) + self.verify_vms(server_fixtures) + + #Configure Fat flow on server VM + proto = 'udp' + port = 53 + srcport = 10000 + server_vmi_id = server_fixtures[0].get_vmi_ids().values() + fat_flow_config = {'proto':proto,'port':port} + self.add_fat_flow_to_vmis(server_vmi_id, fat_flow_config) + + #Start the traffic without any receiver, dest VM will send icmp error + nc_options = '-4' if (self.inputs.get_af() == 'v4') else '-6' + nc_options = nc_options + ' -q 2 -u' + client_fixtures[0].nc_send_file_to_ip('icmp_error', server_fixtures[0].vm_ip, + local_port=srcport, remote_port=port, + nc_options=nc_options) + + #Verify the flows + assert self.verify_fat_flow(client_fixtures, server_fixtures[0], + proto, port, fat_flow_count=1, + unidirectional_traffic=False) + + compute_fix = self.compute_fixtures_dict[client_fixtures[0].vm_node_ip] + vrf_id = compute_fix.get_vrf_id(client_fixtures[0].vn_fq_names[0]) + + #Verify NO hold flows + action = 'HOLD' + self.verify_flow_action(compute_fix, action, + src_ip=client_fixtures[0].vm_ip, dst_ip=server_fixtures[0].vm_ip, + sport=srcport, dport=port, src_vrf=vrf_id, proto=proto, exp=False) + + @preposttest_wrapper + def test_fat_flow_no_tcp_eviction(self): + """ + Description: Verify Fat flows are not evicted on connection closure + Steps: + 1. launch 1 VN and launch 2 VMs in it.client VM and server VM on different node. + 2. on server VM, config Fat flow for tcp. + 3. from client VM,send tcp traffic to server + Pass criteria: + 1. Fat flow should not be evicted after connection closure + """ + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case," + "this test needs atleast 2 compute nodes") + + vn_fixtures = self.create_vns(count=1) + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + + image = 'ubuntu' + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[0], image_name=image) + server_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[1], image_name=image) + self.verify_vms(client_fixtures) + self.verify_vms(server_fixtures) + + #Configure Fat flow on server VM + proto = 'tcp' + port = 10000 + sport = 9000 + server_vmi_id = server_fixtures[0].get_vmi_ids().values() + fat_flow_config = {'proto':proto,'port':port} + self.add_fat_flow_to_vmis(server_vmi_id, fat_flow_config) + + assert self.send_nc_traffic(client_fixtures[0], server_fixtures[0], + sport, port, proto) + + #FAT flow verification + assert self.verify_fat_flow(client_fixtures, server_fixtures[0], + proto, port, fat_flow_count=1) + + @preposttest_wrapper + def test_fat_flow_with_aap(self): + """ + Description: Verify Fat flows with allowed address pair + Steps: + 1. launch 1 VN and launch 4 VMs in it.2 client VMs and 2 server VMs on different node. + 2. on server VMs, config Fat flow for udp. + 3. from client VMs,send udp traffic to servers and + verify mastership and Fat flow + 4. Induce mastership switch and verify the Fat flow again + Pass criteria: + 1. Fat flow and mastership verification should pass + """ + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case," + "this test needs atleast 2 compute nodes") + + vn1_fixture = self.create_vns(count=1)[0] + vm1_name = get_random_name('vm1') + vm2_name = get_random_name('vm2') + result = False + vIP = self.get_random_ip_from_vn(vn1_fixture)[0] + image = 'ubuntu-traffic' + + port1_obj = self.create_port(net_id=vn1_fixture.vn_id) + port2_obj = self.create_port(net_id=vn1_fixture.vn_id) + vm1_fixture = self.create_vm(vn1_fixture, vm1_name, + image_name=image, + port_ids=[port1_obj['id']], + node_name=compute_hosts[0]) + vm2_fixture = self.create_vm(vn1_fixture, vm2_name, + image_name=image, + port_ids=[port2_obj['id']], + node_name=compute_hosts[0]) + + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2, + node_name=compute_hosts[1], image_name=image) + assert vm1_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + assert vm2_fixture.wait_till_vm_is_up(), 'VM does not seem to be up' + self.verify_vms(client_fixtures) + + proto = 'udp' + dport = 53 + baseport = random.randint(12000, 65000) + sport = [str(baseport), str(baseport+1)] + fat_flow_config = {'proto':proto,'port':dport} + self.add_fat_flow_to_vmis([port1_obj['id'], port2_obj['id']], fat_flow_config) + + self.config_aap(port1_obj, port2_obj, vIP) + self.config_vrrp(vm1_fixture, vIP, '20') + self.config_vrrp(vm2_fixture, vIP, '10') + vrrp_master = vm1_fixture + if is_v6(vIP): + #current version of vrrpd does not support IPv6, as a workaround add the vIP + # on one of the VM and start ping6 to make the VM as master + assert vm1_fixture.add_ip_on_vm(vIP) + assert client_fixtures[0].ping_with_certainty(vIP), 'Ping to vIP failure' + + assert self.vrrp_mas_chk(vrrp_master, vn1_fixture, vIP) + + for vm in client_fixtures: + for port in sport: + assert self.send_nc_traffic(vm, vrrp_master, + port, dport, proto, ip=vIP) + + dst_compute_fix = self.compute_fixtures_dict[vrrp_master.vm_node_ip] + vrf_id_dst = dst_compute_fix.get_vrf_id(vrrp_master.vn_fq_names[0]) + for vm in client_fixtures: + self.verify_fat_flow_on_compute(dst_compute_fix, vm.vm_ip, + vIP, dport, proto, vrf_id_dst, + fat_flow_count=1) + + if is_v6(vIP): + #Skip further verification as current version of vrrpd does not support IPv6 + return True + self.logger.info('We will induce a mastership switch') + port_dict = {'admin_state_up': False} + self.update_port(port1_obj['id'], port_dict) + self.logger.info( + '%s should become the new VRRP master' % vm2_fixture.vm_name) + vrrp_master = vm2_fixture + assert self.vrrp_mas_chk(vrrp_master, vn1_fixture, vIP) + + for vm in client_fixtures: + for port in sport: + assert self.send_nc_traffic(vm, vrrp_master, + port, dport, proto, ip=vIP) + + dst_compute_fix = self.compute_fixtures_dict[vrrp_master.vm_node_ip] + vrf_id_dst = dst_compute_fix.get_vrf_id(vrrp_master.vn_fq_names[0]) + for vm in client_fixtures: + self.verify_fat_flow_on_compute(dst_compute_fix, vm.vm_ip, + vIP, dport, proto, vrf_id_dst, + fat_flow_count=1) + +class FatFlowIpv6(FatFlow): + @classmethod + def setUpClass(cls): + super(FatFlow, cls).setUpClass() + cls.inputs.set_af(AF_TEST) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) diff --git a/serial_scripts/__init__.py b/serial_scripts/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/serial_scripts/analytics/base.py b/serial_scripts/analytics/base.py index 4ec8f9883..22e823e56 100644 --- a/serial_scripts/analytics/base.py +++ b/serial_scripts/analytics/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 from common import isolated_creds from vn_test import * from vm_test import * @@ -10,17 +10,11 @@ from traffic.core.helpers import Sender, Receiver from tcutils.util import Singleton -class AnalyticsBaseTest(test.BaseTestCase): +class AnalyticsBaseTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(AnalyticsBaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, cls.inputs, ini_file = cls.ini_file, logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -35,7 +29,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.res.cleanUp() - cls.isolated_creds.delete_tenant() super(AnalyticsBaseTest, cls).tearDownClass() #end tearDownClass diff --git a/serial_scripts/analytics/test_analytics_resource.py b/serial_scripts/analytics/test_analytics_resource.py index d3dde6959..84ec9fe97 100644 --- a/serial_scripts/analytics/test_analytics_resource.py +++ b/serial_scripts/analytics/test_analytics_resource.py @@ -504,10 +504,10 @@ def test_verify_flow_series_table_query_range(self): self.recv_host = Host(self.res.vn1_vm2_fixture.local_ip, self.res.vn1_vm2_fixture.vm_username, self.res.vn1_vm2_fixture.vm_password) + # Create traffic stream start_time = self.analytics_obj.getstarttime(self.tx_vm_node_ip) self.logger.info("start time= %s" % (start_time)) - self.logger.info("Creating streams...") dport = 11000 stream = Stream( @@ -692,6 +692,9 @@ def test_verify_flow_tables(self): direction='in') if not pkts_before_traffic: pkts_before_traffic = 0 + + self.res.vn1_vm1_fixture.wait_till_vm_is_up() + self.res.vn1_vm2_fixture.wait_till_vm_is_up() # Create traffic stream self.logger.info("Creating streams...") stream = Stream( @@ -887,7 +890,7 @@ def test_verify_flow_tables(self): self.res1 = self.analytics_obj.ops_inspect[ip].post_query( 'FlowSeriesTable', start_time=str(s_time), - end_time=str(e_time), + end_time='now', select_fields=[ 'sourcevn', 'sourceip', diff --git a/serial_scripts/analytics/test_analytics_serial.py b/serial_scripts/analytics/test_analytics_serial.py index 6cc235292..1e38c951b 100644 --- a/serial_scripts/analytics/test_analytics_serial.py +++ b/serial_scripts/analytics/test_analytics_serial.py @@ -24,6 +24,52 @@ def runTest(self): pass # end runTest + @test.attr(type=['sanity', 'vcenter']) + def test_cfgm_alarms(self): + ''' Test whether contrail config alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_cfgm_alarms() + return True + + @test.attr(type=['sanity', 'vcenter']) + def test_db_alarms(self): + ''' Test whether contrail database alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_db_alarms() + return True + + @test.attr(type=['sanity', 'vcenter']) + def test_analytics_alarms(self): + ''' Test whether contrail analytics alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_analytics_alarms() + return True + + @test.attr(type=['sanity', 'vcenter']) + def test_control_alarms(self): + ''' Test whether contrail control alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_control_alarms() + return True + + @test.attr(type=['sanity', 'vcenter']) + def test_vrouter_alarms(self): + ''' Test whether contrail vrouter alarms are generated + after executing alarms triggering operations + + ''' + assert self.analytics_obj.verify_vrouter_alarms() + return True + + @preposttest_wrapper def test_verify_bgp_peer_object_logs(self): ''' Test to validate bgp_peer_object logs diff --git a/serial_scripts/backup_restore/base.py b/serial_scripts/backup_restore/base.py index da8566cb4..71e7f7bcd 100644 --- a/serial_scripts/backup_restore/base.py +++ b/serial_scripts/backup_restore/base.py @@ -1,20 +1,13 @@ -import test +import test_v1 from common.connections import ContrailConnections from common.contrail_test_init import ContrailTestInit -from common import isolated_creds from upgrade.verify import BaseResource -class BackupRestoreBaseTest(test.BaseTestCase): +class BackupRestoreBaseTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BackupRestoreBaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, cls.inputs, ini_file = cls.ini_file, logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -28,7 +21,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.res.cleanUp() - cls.isolated_creds.delete_tenant() super(BackupRestoreBaseTest, cls).tearDownClass() #end tearDownClass @@ -56,4 +48,4 @@ def create(self): return TestBackupRestoreResource() def runTest(self): pass -#End resource \ No newline at end of file +#End resource diff --git a/serial_scripts/backup_restore/test_backup_restore.py b/serial_scripts/backup_restore/test_backup_restore.py index 823291394..3bc168a06 100644 --- a/serial_scripts/backup_restore/test_backup_restore.py +++ b/serial_scripts/backup_restore/test_backup_restore.py @@ -1,3 +1,4 @@ +#Define environment variable FABRIC_UTILS_PATH and provide path to fabric_utils before running import time import os from contrail_fixtures import * @@ -11,6 +12,7 @@ import test from upgrade.verify import VerifyFeatureTestCases from base import BackupRestoreBaseTest +from tcutils.contrail_status_check import ContrailStatusChecker class TestBackupRestore(BackupRestoreBaseTest,VerifyFeatureTestCases): ''' backup and restore the configurations ''' @@ -46,32 +48,35 @@ def test_to_backup_restore(self): host_string='%s@%s' % ( username, self.inputs.cfgm_ips[0]), password = password, warn_only=True, abort_on_prompts=False, debug=True): + + fab_path = os.environ.get('FABRIC_UTILS_PATH', '/opt/contrail/utils') + backup_cmd = "cd " +fab_path +";fab backup_data " + restore_cmd = "cd " +fab_path +";fab restore_data " + reset_cmd = "cd " +fab_path +";fab reset_config " - backup_cmd = "cd /opt/contrail/utils;fab backup_data " - restore_cmd = "cd /opt/contrail/utils;fab restore_data " - reset_cmd = "cd /opt/contrail/utils;fab reset_config " - - self.logger.info("starting backup") + self.logger.info("Starting backup") status = run(backup_cmd) self.logger.debug("LOG for fab backup_data : %s" % status) assert not(status.return_code), 'Failed while running backup_data' result = result and not(status.return_code) - self.logger.info("backup completed") + self.logger.info("Backup completed") - self.logger.info("starting reset config") + self.logger.info("Starting reset config") status = run(reset_cmd) self.logger.debug("LOG for fab reset_config : %s" % status) assert not(status.return_code), 'Failed while running reset_config' result = result and not(status.return_code) - self.logger.info("reset configuration completed") + self.logger.info("Reset configuration completed") - self.logger.info("starting restore") - restore_cmd = "cd /root/fabric-utils;fab restore_data " + self.logger.info("Starting restore") status = run(restore_cmd) self.logger.debug("LOG for fab restore_data: %s" % status) assert not(status.return_code), 'Failed while running restore_data' result=result and not(status.return_code) - self.logger.info("restore of data and configuration completed") + self.logger.info("Restore of data and configuration completed") + #Check contrail-services status + status = ContrailStatusChecker().wait_till_contrail_cluster_stable() + assert status[0],'Contrail-services are not active' return result #end test_backup_restore diff --git a/serial_scripts/control_node_scaling/base.py b/serial_scripts/control_node_scaling/base.py index 2e3b0c960..8bd439ecb 100644 --- a/serial_scripts/control_node_scaling/base.py +++ b/serial_scripts/control_node_scaling/base.py @@ -1,26 +1,16 @@ -import test +import test_v1 from common import isolated_creds -class BaseBGPScaleTest(test.BaseTestCase): +class BaseBGPScaleTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseBGPScaleTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.inputs.set_af('v4') #end setUpClass @classmethod def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseBGPScaleTest, cls).tearDownClass() #end tearDownClass diff --git a/serial_scripts/control_node_scaling/commands.py b/serial_scripts/control_node_scaling/commands.py index c280e0e38..7dd932b59 100755 --- a/serial_scripts/control_node_scaling/commands.py +++ b/serial_scripts/control_node_scaling/commands.py @@ -18,8 +18,8 @@ def __init__(self, cmd, env = None): self.fstderr = tempfile.NamedTemporaryFile(mode='w', prefix='CMD_ERR_') self.env = copy.deepcopy(os.environ) - if not env: - self.env.update({i.split('=')[0]:i.split('=')[1] for i in e.split()}) + if env: + self.env.update({i.split('=')[0]:i.split('=')[1] for i in env.split()}) def start(self): """Launches a local command as background process.""" diff --git a/serial_scripts/control_node_scaling/setup.rb b/serial_scripts/control_node_scaling/setup.rb index b468c487a..18abfff26 100644 --- a/serial_scripts/control_node_scaling/setup.rb +++ b/serial_scripts/control_node_scaling/setup.rb @@ -194,7 +194,7 @@ def configure_mx_peer next if type !~ /vsrx/ rsh(@nodes["config1"][:public_ip], -"python /opt/contrail/utils/provision_mx.py --router_name #{node[:host]} --router_ip #{node[:private_ip]} --router_asn 64512 --api_server_ip #{@nodes["config1"][:private_ip]} --api_server_port 8082 --oper add --admin_user admin --admin_password c0ntrail123 --admin_tenant_name admin") +"python /usr/share/contrail-utils/provision_mx.py --router_name #{node[:host]} --router_ip #{node[:private_ip]} --router_asn 64512 --api_server_ip #{@nodes["config1"][:private_ip]} --api_server_port 8082 --oper add --admin_user admin --admin_password c0ntrail123 --admin_tenant_name admin") } end diff --git a/serial_scripts/control_node_scaling/test_flap_agent_scale.py b/serial_scripts/control_node_scaling/test_flap_agent_scale.py index b0d5be592..bff7e7bfb 100755 --- a/serial_scripts/control_node_scaling/test_flap_agent_scale.py +++ b/serial_scripts/control_node_scaling/test_flap_agent_scale.py @@ -65,7 +65,8 @@ def __init__(self, inputs=None, args_str=None, pre_scale_setup=0, params_ini_fil self.ini_file = 'sanity_params.ini' self.inputs = ContrailTestInit( self.ini_file, stack_user=self._args.username, - stack_password=self._args.password, project_fq_name=['default-domain', 'default-project']) + stack_password=self._args.password, + stack_tenant='default-project') else: self.inputs = inputs self.logger = self.inputs.logger @@ -3981,7 +3982,7 @@ def test_bgp_scale(self): # # Init # - # self._log_print("INFO:******ABC*********") + # self._log_print("INFO:%%%%%%ABC%%%%%%%%%") self.obj = FlapAgentScaleInit(args_str='', inputs=self.inputs) # diff --git a/serial_scripts/discovery_regression/base.py b/serial_scripts/discovery_regression/base.py index 77a5d3378..68f1f7bd4 100644 --- a/serial_scripts/discovery_regression/base.py +++ b/serial_scripts/discovery_regression/base.py @@ -1,22 +1,14 @@ -import test +import test_v1 from common.connections import ContrailConnections from common import isolated_creds import threading import time -class BaseDiscoveryTest(test.BaseTestCase): +class BaseDiscoveryTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseDiscoveryTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -28,7 +20,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_tenant() super(BaseDiscoveryTest, cls).tearDownClass() #end tearDownClass @@ -77,8 +68,9 @@ def verify_service_status(self, service, base_ip, no_of_services=1, expected_sta result = result and True self.logger.info("Verifying if the service is " + expected_status) svc_status = self.ds_obj.get_service_status( - self.inputs.cfgm_ip, service_tuple=elem) - if (svc_status == expected_status): + self.inputs.cfgm_ip, service_tuple=elem, + expected_status = expected_status) + if svc_status: self.logger.info("svc is " + expected_status) result = result and True else: diff --git a/serial_scripts/discovery_regression/test_discovery_serial.py b/serial_scripts/discovery_regression/test_discovery_serial.py index 93eefdfaf..5e45e5a06 100644 --- a/serial_scripts/discovery_regression/test_discovery_serial.py +++ b/serial_scripts/discovery_regression/test_discovery_serial.py @@ -7,7 +7,11 @@ import base import test import time +from time import sleep import threading +from tcutils.config.discovery_util import DiscoveryServerUtils +from tcutils.contrail_status_check import ContrailStatusChecker +from multiprocessing import Process class TestDiscoverySerial(base.BaseDiscoveryTest): @@ -30,11 +34,11 @@ def test_control_node_restart_and_validate_status_of_the_service(self): svc_lst = [] svc_lst = self.ds_obj.get_all_control_services(self.inputs.cfgm_ip) for elem in svc_lst: - if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'): + if self.ds_obj.get_service_status(self.inputs.cfgm_ip, elem, 'up'): self.logger.info("Service %s is up" % (elem,)) result = result and True else: - self.logger.warn("Service %s is down" % (elem,)) + self.logger.error("Service %s is down" % (elem,)) result = result and False svc_lst.remove(elem) # Stopping the control node service @@ -42,39 +46,31 @@ def test_control_node_restart_and_validate_status_of_the_service(self): ip = elem[0] self.logger.info("Stopping service %s.." % (elem,)) self.inputs.stop_service('contrail-control', [ip]) - time.sleep(20) + for elem in svc_lst: ip = elem[0] - if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'): - self.logger.warn("Service %s is still up" % (elem,)) - result = result and False - else: + if self.ds_obj.get_service_status(self.inputs.cfgm_ip, elem, 'down', 8): self.logger.info("Service %s is down" % (elem,)) - result = result and True + result = result and True + else: + self.logger.error("Service %s is still up" % (elem,)) + result = result and False # Starting the control node service for elem in svc_lst: ip = elem[0] self.logger.info("Starting service %s.." % (elem,)) self.inputs.start_service('contrail-control', [ip]) - retry = 0 for elem in svc_lst: ip = elem[0] - while True: - svc_status = self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) - if svc_status == 'up': - self.logger.info( + svc_status = self.ds_obj.get_service_status(self.inputs.cfgm_ip, elem, 'up', 6) + if svc_status: + self.logger.info( "Service %s came up after service was started" % (elem,)) - result = result and True - break - else: - retry = retry + 1 - time.sleep(1) - self.logger.warn("Service %s isn't up yet " % (elem,)) - if retry > 30: - self.logger.info( - "Service %s is down even after service was started" % (elem,)) - result = result and False - break + result = result and True + else: + self.logger.error("Service %s isn't up yet " % (elem,)) + self.logger.error("Service %s is down even after service was started" % (elem,)) + result = result and False assert result return True @@ -141,15 +137,15 @@ def test_scale_test(self): result = result and True self.logger.info("Verifying if the service is up") svc_status = self.ds_obj.get_service_status( - self.inputs.cfgm_ip, service_tuple=elem) - if (svc_status == 'up'): + self.inputs.cfgm_ip, elem, "up") + if svc_status: self.logger.info("svc is up") result = result and True else: result = result and False - self.logger.warn("svc not up") + self.logger.error("svc not up") else: - self.logger.warn("%s is NOT added to discovery service" % + self.logger.error("%s is NOT added to discovery service" % (elem,)) result = result and False # Verify instnaces == 0 will send all services @@ -159,7 +155,7 @@ def test_scale_test(self): resp = resp[service] if len(resp) < 100: result = result and False - self.logger.warn("Not all services returned") + self.logger.error("Not all services returned") self.logger.info( "Sending 100 subscription message to discovery..") subs_threads = [] @@ -217,7 +213,7 @@ def test_send_admin_state_in_publish(self): no_of_services = 25 result = True msg = '' - self.ds_obj.change_ttl_short_and_hc_max_miss() + self.ds_obj.modify_discovery_conf_file_params(operation='change_ttl_short_and_hc_max_miss') assert self.analytics_obj.verify_cfgm_uve_module_state( self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery') self.publish_service_with_admin_state(service, base_ip, port, 'down', no_of_services) @@ -279,7 +275,8 @@ def test_send_admin_state_in_publish(self): self.logger.exception("Got exception %s"%(e)) raise finally: - self.ds_obj.change_ttl_short_and_hc_max_miss(ttl_short=1, hc_max_miss=3) + self.ds_obj.modify_discovery_conf_file_params(operation='change_ttl_short_and_hc_max_miss',\ + ttl_short=1, hc_max_miss=3) self.logger.info("%s"%(msg)) resp = self.ds_obj.cleanup_service_from_discovery( self.inputs.cfgm_ip) @@ -296,7 +293,7 @@ def test_publish(self): ''' self.logger.info( - "********TEST WILL FAIL IF RAN MORE THAN ONCE WITHOUT CLEARING THE ZOOKEEPER DATABASE*********") + "%%%%%%%%TEST WILL FAIL IF RAN MORE THAN ONCE WITHOUT CLEARING THE ZOOKEEPER DATABASE%%%%%%%%%") service = 'dummy_service23' port = 65093 result = True @@ -323,20 +320,16 @@ def test_publish(self): base_ip = '192.168.1.' expected_ttl = 2 cuuid = uuid.uuid4() - while(expected_ttl <= 32): + for i in range(1,5): resp = None resp = self.ds_obj.subscribe_service_from_discovery( self.inputs.cfgm_ip, service=service, instances=1, client_id=str(cuuid)) ttl = resp['ttl'] self.logger.info("ttl : %s" % (ttl)) - if (ttl <= expected_ttl): - result = result and True - else: - result = result and False self.logger.info("Waiting for %s sec..." % (expected_ttl)) time.sleep(expected_ttl) expected_ttl = expected_ttl * 2 - + self.logger.info("Expected ttl is %s..." % (expected_ttl)) self.logger.info("Verifying that the ttl sablizes at 32 sec..") resp = None resp = self.ds_obj.subscribe_service_from_discovery( @@ -375,7 +368,7 @@ def test_publish(self): resp = resp[service] if len(resp) < 3: result = result and False - self.logger.warn("Not all services returned") + self.logger.error("Not all services returned") expected_ip_list = ['192.168.1.1', '192.168.1.2', '192.168.1.3'] result1 = True @@ -431,7 +424,7 @@ def test_change_parameters_in_contrail_discovery_conf(self): -policy ''' - # Changing the hc_max_miss=5 and verifying that the services are down + # Changing the hc_max_miss=10 and verifying that the services are down # after 25 sec try: cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss.*=.*/c\hc_max_miss = 10\' contrail-discovery.conf' @@ -445,11 +438,11 @@ def test_change_parameters_in_contrail_discovery_conf(self): svc_lst = [] svc_lst = self.ds_obj.get_all_control_services(self.inputs.cfgm_ip) for elem in svc_lst: - if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'): + if self.ds_obj.get_service_status(self.inputs.cfgm_ip, elem, 'up'): self.logger.info("Service %s is up" % (elem,)) result = result and True else: - self.logger.warn("Service %s is down" % (elem,)) + self.logger.error("Service %s is down" % (elem,)) result = result and False svc_lst.remove(elem) # Stopping the control node service @@ -457,27 +450,29 @@ def test_change_parameters_in_contrail_discovery_conf(self): ip = elem[0] self.logger.info("Stopping service %s.." % (elem,)) self.inputs.stop_service('contrail-control', [ip]) - time.sleep(15) + first_cfgm = True for elem in svc_lst: ip = elem[0] - if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'): - self.logger.info("Service %s is still up" % (elem,)) - result = result and True - else: - self.logger.warn("Service %s is down before 25 sec" % + retry_count = 8 if first_cfgm else 0 + first_cfgm = False + if self.ds_obj.get_service_status(self.inputs.cfgm_ip, elem, 'down', retry_count): + self.logger.error("Service %s is down before 50 sec" % (elem,)) result = result and False - time.sleep(45) + else: + self.logger.info("Service %s is still up" % + (elem,)) + result = result and True for elem in svc_lst: ip = elem[0] - if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_tuple=elem) == 'up'): - self.logger.warn("Service %s is still up after 30 secs" % + if self.ds_obj.get_service_status(self.inputs.cfgm_ip, elem, 'down', 2): + self.logger.info("Service %s is down after 50 sec" % (elem,)) - result = result and False + result = result and True else: - self.logger.info("Service %s is down after 30 sec" % + self.logger.error("Service %s is still up after 50 secs" % (elem,)) - result = result and True + result = result and False # Starting the control node service for elem in svc_lst: ip = elem[0] @@ -487,8 +482,8 @@ def test_change_parameters_in_contrail_discovery_conf(self): except Exception as e: print e finally: - # Changing the hc_max_miss=3 - cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss.*=.*/c\hc_max_miss = 3\' contrail-discovery.conf' + # Changing the hc_max_miss=7 + cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss.*=.*/c\hc_max_miss = 7\' contrail-discovery.conf' for ip in self.inputs.cfgm_ips: self.inputs.run_cmd_on_server( ip, cmd, username='root', password='c0ntrail123') @@ -527,5 +522,2185 @@ def test_change_parameters_in_contrail_discovery_conf(self): self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery') assert self.ds_obj.verify_bgp_connection() return True + + @preposttest_wrapper + def test_rule_for_vrouter_with_xmpp_server(self): + ''' Validate that applied rules takes effect correctly for + contrail-vrouter-agent and its subscription to XMPP Server. + Steps: + 1. Create rules for all contrail-vrouter-agent of 1 network + to subscribe to XMPP Servers of same network. + 2. Verify if rule is working as expected or not + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of XMPP servers running in different subnets + Also, setup requirement of this test case is to have at + least 2 publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,\ + "change_min_max_ttl") + if len(self.inputs.bgp_control_ips) > 0: + self.logger.info("Creating rules corresponding to control node *xmpp-server*") + self.logger.info(" Subscribers are *vrouter agent* running in same subnets") + for i in range(0,len(self.inputs.bgp_control_ips)): + bgp_control_ip = self.inputs.bgp_control_ips[i].split('.') + bgp_control_ip[3] = '0' + bgp_control_ip = ".".join(bgp_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule(bgp_control_ip,'xmpp-server',\ + bgp_control_ip, 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + self.logger.debug("#### Verifying clients subscribed to publishers ###") + try: + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0', \ + self.inputs.compute_control_ips[i], 'xmpp-server') + if verification == False: + result = False + except Exception as e: + self.logger.error(e) + result = False + for i in range(0,len(self.inputs.bgp_control_ips)): + bgp_control_ip = self.inputs.bgp_control_ips[i].split('.') + bgp_control_ip[3] = '0' + bgp_control_ip = ".".join(bgp_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule( bgp_control_ip,\ + 'xmpp-server', bgp_control_ip,'contrail-vrouter-agent:0') + if rule_status == False: + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_for_vrouter_with_dns_server(self): + ''' Validate that applied rules takes effect correctly for + contrail-vrouter-agent and its subscription to DNS Server. + Steps: + 1. Create rules for all contrail-vrouter-agent of 1 network to + subscribe to DNS Servers of same network. + 2. Verify if rule is working as expected or not + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of DNS servers running in different subnets + Also, setup requirement of this test case is to have at least + 2 publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("dns-server", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + if len(self.inputs.bgp_control_ips) > 0: + self.logger.info("Creating rules corresponding to control node *DNS-Server*") + self.logger.info(" Subscribers are *vrouter agent* running in same subnets") + for i in range(0,len(self.inputs.bgp_control_ips)): + bgp_control_ip = self.inputs.bgp_control_ips[i].split('.') + bgp_control_ip[3] = '0' + bgp_control_ip = ".".join(bgp_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule( bgp_control_ip,\ + 'dns-server',bgp_control_ip, 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + self.logger.debug("#### Verifying clients subscribed to publishers ###") + try: + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0', \ + self.inputs.compute_control_ips[i], 'dns-server') + if verification == False: + result = False + except Exception as e: + self.logger.error(e) + result = False + for i in range(0,len(self.inputs.bgp_control_ips)): + bgp_control_ip = self.inputs.bgp_control_ips[i].split('.') + bgp_control_ip[3] = '0' + bgp_control_ip = ".".join(bgp_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule( bgp_control_ip,\ + 'dns-server', bgp_control_ip,'contrail-vrouter-agent:0') + if rule_status == False: + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_for_control_with_ifmap_server(self): + ''' Validate that applied rules takes effect correctly for + "contrail-control" and its subscription to IfmapServer. + Steps: + 1. Create rules for all contrail-control of 1 network to subscribe + to Ifmap Servers of same network. + 2. Verify if rule is working as expected or not + Precondition: Assumption is that setup is having a contrail-control + connected to 2 instances of Ifmap servers running in different subnets + Also, setup requirement of this test case is to have at least 2 + publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("IfmapServer", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-control') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + if len(self.inputs.cfgm_control_ips) > 0: + self.logger.info("Creating rules corresponding to config node *IfmapServer*") + self.logger.info(" Subscribers are *contrail-control* running in same subnets") + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule( cfgm_control_ip,\ + 'IfmapServer', cfgm_control_ip, 'contrail-control') + if rule_status == False: + result = False + self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + self.logger.debug("#### Verifying clients subscribed to publishers ###") + try: + for i in range(0,len(self.inputs.bgp_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-control', self.inputs.bgp_control_ips[i],\ + 'IfmapServer') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + except Exception as e: + self.logger.error(e) + result = False + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule( cfgm_control_ip,\ + 'IfmapServer', cfgm_control_ip, 'contrail-control') + if rule_status == False: + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_for_webui_with_op_server(self): + ''' Validate that applied rules takes effect correctly for + "contrailWebUI" and its subscription to Op Server. + Steps: + 1. Create rules for all contrailWebUI of 1 network to + subscribe to Op Servers of same network. + 2. Verify if rule is working as expected or not + Precondition: Assumption is that setup is having ContrailWebUI + connected to OP server running in different subnet. + ''' + self.ds_obj.skip_discovery_test("OpServer", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'supervisor-webui') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + if len(self.inputs.collector_control_ips) > 0: + self.logger.info("Creating rules corresponding to collector node *OpServer*") + self.logger.info(" Subscribers are *contrailWebUI* running in same subnets") + for i in range(0,len(self.inputs.collector_control_ips)): + collector_control_ip = self.inputs.collector_control_ips[i].split('.') + collector_control_ip[3] = '0' + collector_control_ip = ".".join(collector_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule(collector_control_ip,\ + 'OpServer', collector_control_ip,'contrailWebUI') + if rule_status == False: + result = False + self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + self.logger.debug("#### Verifying clients subscribed to publishers ###") + try: + for i in range(0,len(self.inputs.webui_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrailWebUI', self.inputs.webui_control_ips[i], 'OpServer') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + except Exception as e: + self.logger.error(e) + result = False + for i in range(0,len(self.inputs.collector_control_ips)): + collector_control_ip = self.inputs.collector_control_ips[i].split('.') + collector_control_ip[3] = '0' + collector_control_ip = ".".join(collector_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule(collector_control_ip,\ + 'OpServer', collector_control_ip,'contrailWebUI') + if rule_status == False: + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_for_webui_with_api_server(self): + ''' Validate that applied rules takes effect correctly for + "contrailWebUI" and its subscription to API Server. + Steps: + 1. Create rules for all contrailWebUI of 1 network to subscribe + to Op Servers of same network. + 2. Verify if rule is working as expected or not + Precondition: Assumption is that setup is having ContrailWebUI + connected to API server running in different subnet. + ''' + self.ds_obj.skip_discovery_test("ApiServer", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'supervisor-webui') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + if len(self.inputs.cfgm_control_ips) > 0: + self.logger.info("Creating rules corresponding to config node *ApiServer*") + self.logger.info(" Subscribers are *contrailWebUI* running in same subnets") + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule(cfgm_control_ip,\ + 'ApiServer', cfgm_control_ip, 'contrailWebUI') + if rule_status == False: + result = False + self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + self.logger.debug("#### Verifying clients subscribed to publishers ###") + try: + for i in range(0,len(self.inputs.webui_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrailWebUI', self.inputs.webui_control_ips[i], 'ApiServer') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + except Exception as e: + self.logger.error(e) + result = False + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule(cfgm_control_ip,\ + 'ApiServer', cfgm_control_ip, 'contrailWebUI') + if rule_status == False: + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_for_vrouter_with_collector(self): + ''' Validate that applied rules takes effect correctly for + "contrail-vrouter-agent" and its subscription to Collector. + Steps: + 1. Create rules for all contrail-vrouter-agent of 1 network + to subscribe to Collector of same network. + 2. Verify if rule is working as expected or not + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of Collectors running in different subnets + Also, setup requirement of this test case is to have at least + 2 publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("Collector", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + if len(self.inputs.collector_control_ips) > 0: + self.logger.info("Creating rules corresponding to collector node *Collector*") + self.logger.info(" Subscribers are *vrouter-agent* running in same subnets") + for i in range(0,len(self.inputs.collector_control_ips)): + collector_control_ip = self.inputs.collector_control_ips[i].split('.') + collector_control_ip[3] = '0' + collector_control_ip = ".".join(collector_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule(collector_control_ip,\ + 'Collector', collector_control_ip,'contrail-vrouter-agent:0') + if rule_status == False: + result = False + self.logger.info("#### Waiting for 60 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + self.logger.debug("#### Verifying clients subscribed to publishers ###") + try: + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0', \ + self.inputs.compute_control_ips[i], 'Collector') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + except Exception as e: + self.logger.error(e) + result = False + for i in range(0,len(self.inputs.collector_control_ips)): + collector_control_ip = self.inputs.collector_control_ips[i].split('.') + collector_control_ip[3] = '0' + collector_control_ip = ".".join(collector_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule(collector_control_ip,\ + 'Collector', collector_control_ip,'contrail-vrouter-agent:0') + if rule_status == False: + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_for_collector_with_multi_clients(self): + ''' Validate that applied rules takes effect correctly for multiple + clients mentioned sequentially in a single rule for Collector as + a Server/Publisher. + Steps: + 1. Create s single rule for multiple types of clients to subscribe + to single Publisher. Mention all subscriber in that rule. + 2. Verify if rule is working as expected or not. Verify that all + clients subscribe to single publisher only. + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of Collectors running in different subnets + Also, setup requirement of this test case is to have at least + 2 publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("Collector", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl(30, 30, 'contrail-vrouter-agent',\ + 'contrail-topology', 'contrail-control', 'contrail-api') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + if len(self.inputs.collector_control_ips) > 0: + self.logger.info("Creating rules corresponding to collector node *Collector*") + self.logger.info("Subscribers are mulitple services running in same subnets") + for i in range(0,len(self.inputs.collector_control_ips)): + collector_control_ip = self.inputs.collector_control_ips[i].split('.') + collector_control_ip[3] = '0' + collector_control_ip = ".".join(collector_control_ip) + "/24" + self.ds_obj.discovery_rule_config( "add_rule", + 'default-discovery-service-assignment', collector_control_ip,\ + 'Collector', collector_control_ip, 'contrail-vrouter-agent:0',\ + collector_control_ip, 'contrail-topology', collector_control_ip,\ + 'contrail-control', collector_control_ip, 'contrail-api') + result1 = self.ds_obj.discovery_rule_config( "find_rule",\ + 'default-discovery-service-assignment', collector_control_ip,\ + 'Collector', collector_control_ip, 'contrail-vrouter-agent:0',\ + collector_control_ip, 'contrail-topology', collector_control_ip,\ + 'contrail-control', collector_control_ip, 'contrail-api') + if result1 == False: + self.logger.error("# While searching, rule not found. Configuration failed #") + result = False + self.ds_obj.read_rule('default-discovery-service-assignment') + self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep(30) + self.logger.debug("#### Verifying clients subscribed to publishers ###") + try: + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher(\ + ds_ip, 'contrail-vrouter-agent:0', \ + self.inputs.compute_control_ips[i], 'Collector') + if verification == False: + self.logger.error("# Rule not behaving as expected #") + result = False + for i in range(0,len(self.inputs.bgp_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-control', \ + self.inputs.bgp_control_ips[i], 'Collector') + if verification == False: + self.logger.error("# Rule not behaving as expected #") + result = False + for i in range(0,len(self.inputs.collector_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-topology',\ + self.inputs.collector_control_ips[i], 'Collector') + if verification == False: + self.logger.error("# Rule not behaving as expected #") + result = False + for i in range(0,len(self.inputs.cfgm_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-api', \ + self.inputs.cfgm_control_ips[i], 'Collector') + if verification == False: + self.logger.error("# Rule not behaving as expected #") + result = False + except Exception as e: + self.logger.error(e) + result = False + for i in range(0,len(self.inputs.collector_control_ips)): + collector_control_ip = self.inputs.collector_control_ips[i].split('.') + collector_control_ip[3] = '0' + collector_control_ip = ".".join(collector_control_ip) + "/24" + self.ds_obj.discovery_rule_config( 'del_rule',\ + 'default-discovery-service-assignment', collector_control_ip,\ + 'Collector', collector_control_ip, 'contrail-vrouter-agent:0',\ + collector_control_ip, 'contrail-topology', collector_control_ip,\ + 'contrail-control', collector_control_ip, 'contrail-api') + result1 = self.ds_obj.discovery_rule_config( "find_rule",\ + 'default-discovery-service-assignment', collector_control_ip,\ + 'Collector', collector_control_ip, 'contrail-vrouter-agent:0',\ + collector_control_ip, 'contrail-topology', collector_control_ip,\ + 'contrail-control', collector_control_ip, 'contrail-api') + if result1 == True: + self.logger.error("# While searching for the deleted rule, it was found. Deletion failed #") + result = False + self.ds_obj.read_rule("default-discovery-service-assignment") + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_subscribe_request_with_diff_instances_rules(self): + ''' Validate that different instances of Publishers are assigned to + client based on the instance value requested by clients. + Also validate that if rules are present, requested instances are + restricted based on rules. + Steps: + 1. Use a non contrail synthetic subscribe request to test this. + 2. Use some instance value in subscribe request and verify that + requested instances of publisher are assigned. + 3. Create a rule with same requested Publisher and subscribe request. + 4. Verify that even if instances asked are more but as rule is present, + the request will be restricted to get only 1 instance of that publisher. + 5. Delete the rule. + 6. Again test that same subscribe request will again get all instances requested. + Precondition: Assumption is that setup is having a subscriber + connected to 3 instances of XMPP, all running in different subnets + Also, setup requirement of this test case is to have at least 3 publishers + All publishers should be in different network. + ''' + self.ds_obj.skip_discovery_test("IfmapServer", min_instances=2, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + self.logger.debug("#### Changing min and max TTL values for testing purpose ##") + assert self.ds_obj.modify_discovery_conf_file_params('change_min_max_ttl',\ + ttl_min=30, ttl_max=30) + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + try: + self.logger.info("#### Sending a dummy client request with instance value 3 ##") + self.logger.info("### Client will subscribe to IfmapServer #####") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer", \ + instances="3", min_instances="0",\ + client_id=self.inputs.compute_names[0]+":TestClient",\ + remote_addr= self.inputs.compute_control_ips[0], \ + client_type= "TestClient") + sleep(2) + self.logger.debug("# Verifying the number of instances of publishers granted to the client #") + ifmap_server_count = len(self.inputs.cfgm_control_ips) + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "TestClient"), service="IfmapServer") + instances_allocated = len(client_subscribed_service_id) + self.logger.debug("# The instances of publishers allocated to TestClient are %d #" \ + % instances_allocated) + self.logger.debug("# The total number of publishers running of such types are %d #" \ + % ifmap_server_count) + if ifmap_server_count == instances_allocated or (ifmap_server_count > 3 and instances_allocated == 3): + self.logger.info("# Instance field working as expected #") + else: + self.logger.error("# Instance field not working as expected. #") + result = False + except Exception as e: + self.logger.error(e) + result = False + try: + self.logger.info("# Now creating a rule to verify that even if multiple\ + instances are requested but if a rule is present, it will limit the instances #") + self.ds_obj.add_and_verify_rule(self.inputs.cfgm_control_ips[0], \ + 'IfmapServer', self.inputs.compute_control_ips[0], 'TestClient') + except Exception as e: + self.logger.error(e) + result = False + self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep(30) + try: + self.logger.info("#### Sending a dummy client request with instance value 3 ##") + self.logger.info("### Client will subscribe to IfmapServer #####") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\ + instances="3", min_instances="0",\ + client_id=self.inputs.compute_names[0]+":TestClient",\ + remote_addr= self.inputs.compute_control_ips[0],\ + client_type= "TestClient") + sleep(2) + self.logger.debug("# Verifying the number of instances of publishers granted to the client #") + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "TestClient"), service="IfmapServer") + instances_allocated = len(client_subscribed_service_id) + service_IPs = [] + for i in range (0,instances_allocated): + service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_service_id[i]) + service_IPs.append(service_endpoint[0][0]) + self.logger.debug("# Number of instances of Publishers used by TestClient are %d" \ + % (instances_allocated)) + self.logger.debug("# IPs of those publishers are %s #" % service_IPs) + if instances_allocated==1 and service_IPs[0]==self.inputs.cfgm_control_ips[0]: + self.logger.info("# As expected, TestClient is subscribed to only 1 instance of\ + IfmapServer even if it is requesting for 3 instances. This happened because of rule present #") + pass + else: + result = False + self.logger.error("# TestClient is subscribed to less/more than 1 instance of IfmapServer.#") + self.logger.error("#Something went wrong. Expectedly, rules are not working.#") + except Exception as e: + self.logger.error(e) + result = False + try: + self.logger.info("# Now deleting a rule to verify that after rule is deleted,\ + instances requested are granted without any restriction #") + self.ds_obj.delete_and_verify_rule(self.inputs.cfgm_control_ips[0], \ + 'IfmapServer', self.inputs.compute_control_ips[0], 'TestClient') + except Exception as e: + self.logger.error(e) + result = False + self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + try: + self.logger.info("#### Sending a dummy client request with instance value 3 ##") + self.logger.info("### Client will subscribe to IfmapServer #####") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\ + instances="3", min_instances="0",\ + client_id=self.inputs.compute_names[0]+":TestClient",\ + remote_addr= self.inputs.compute_control_ips[0],\ + client_type= "TestClient") + sleep(2) + self.logger.debug("# Verifying the number of instances of publishers granted to the client #") + ifmap_server_count = len(self.inputs.cfgm_control_ips) + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "TestClient"), service="IfmapServer") + instances_allocated = len(client_subscribed_service_id) + self.logger.debug("# The instances of publishers allocated to TestClient are %d #" \ + % instances_allocated) + self.logger.debug("# The total number of publishers running of such types are %d #"\ + % ifmap_server_count) + if ifmap_server_count == instances_allocated or (ifmap_server_count > 3 and instances_allocated == 3): + self.logger.info("# Instance field working as expected #") + else: + self.logger.error(" # Instance field not working as expected.#") + result = False + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_when_service_admin_down(self): + ''' Validate that when publisher mentioned in rule is administratively + down, the subscriber mentioned in rule, do not subscribe to any + other publisher. + Also verify that when publisher comes up, the applicable instance + of that client get a subscription from that Publisher. + For testing purpose, i have use DNS-SERVER as publisher and + contrail-vrouter-agent as client. + Steps: + 1. Create a rule using any Publisher and subscriber pair. + 2. Make the Publisher mentioned in the rule as admin down. + 3. Verify that as service is down, the subscriber will not get any + other instance of that service because rule still holds true. + 4. Make the Publisher as admin UP. + 5. Verify that as soon as Publisher is made admin UP, the subscriber + will get that instance of service. + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of DNS servers running in different subnets + Also, setup requirement of this test case is to have at least + 2 publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("dns-server", min_instances=2, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + try: + self.logger.info("# Create a rule for control node Dns-Server ##") + self.logger.info("# Subscriber in rule as contrail-vrouter-agent#") + self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0], 'dns-server',\ + self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0') + self.logger.info("# Making the admin state of dsn-server as *down*# ") + self.ds_obj.update_service(ds_ip,service="dns-server",\ + ip=self.inputs.bgp_control_ips[0],admin_state="down") + except Exception as e: + self.logger.error(e) + result = False + self.logger.debug("#### Waiting for 45 seconds so that TTL expiry for all subscriber happens ###") + sleep (45) + try: + self.logger.debug("# Verifying that as publisher is admin down,\ + the mentioned subscriber in rule do not get any instance of Publisher #") + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "contrail-vrouter-agent:0"), service="dns-server") + instances_allocated = len(client_subscribed_service_id) + if instances_allocated==0: + self.logger.info("# \n As expected, contrail-vrouter-agent running on %s\n \ + is not subscribed to any dns-server as the rule is restricting it to do\n \ + that and publisher mentioned in rule is admin *down*. #" % self.inputs.compute_control_ips[0]) + pass + else: + result = False + self.logger.error("# \n Even if rule is present and publisher in rule\n \ + is admin *down*, some publisher got assigned to the subscriber\n \ + contrail-vrouter-agent running on %s .#", self.inputs.compute_control_ips[0]) + service_IPs = [] + for i in range (0,instances_allocated): + service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_service_id[i]) + service_IPs.append(service_endpoint[0][0]) + self.logger.warn("# The publisher assigned to the client are running at following IPs: %s ###"\ + % service_IPs) + self.logger.info("# Making the admin state of dsn-server as *up*# ") + self.ds_obj.update_service(ds_ip,service="dns-server",\ + ip=self.inputs.bgp_control_ips[0],admin_state="up") + self.logger.debug("\n #### Waiting for 5 seconds so that the client \n \ + subscribe to the new subscriber as soon as it comes administratively up ###") + sleep(5) + self.logger.debug("\n # Verifying that as publisher is admin up,\n \ + the mentioned subscriber in rule gets the same instance of Publisher \n \ + as mentione din rule #") + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "contrail-vrouter-agent:0"), service="dns-server") + instances_allocated = len(client_subscribed_service_id) + service_IPs = [] + for i in range (0,instances_allocated): + service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_service_id[i]) + service_IPs.append(service_endpoint[0][0]) + if instances_allocated==1 and service_IPs[0]==self.inputs.bgp_control_ips[0]: + self.logger.info("\n # As expected, contrail-vrouter-agent running \n \ + on %s is subscribed to single dns-server as the rule is \n \ + restricting it to do that. #" % self.inputs.compute_control_ips[0]) + pass + else: + result = False + self.logger.error("\n # Even if rule is present and publisher in rule\n \ + is admin *up*, some different publishers or no publisher got \n \ + assigned to the subscriber contrail-vrouter-agent running on %s .#"\ + % self.inputs.compute_control_ips[0]) + self.logger.error("# The publisher assigned to the client are running at following IPs: %s###" \ + % service_IPs) + except Exception as e: + self.logger.error(e) + result = False + self.logger.info("# Now deleting the rule before starting new test case #") + self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0], 'dns-server',\ + self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0') + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_multiple_rule_same_subscriber(self): + ''' Validate that rule restrict the subscriber irrespective of number + of instances requested by the client. + Also verify that, if multiple rules are present for same client, + more instances of service gets allocated to that client. + For testing purpose, i have used XMPP-SERVER as publisher and + contrail-vrouter-agent as client. + Steps: + 1. Create different rules with same subscriber values and different Publishers. + 2. Verify if rule is working as expected or not + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of XMPP servers running in different subnets + Also, setup requirement of this test case is to have at least 2 + publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + try: + self.logger.info("\n # Create a rule for xmpp-server running on\n \ + control node and subscriber as contrail-vrouter-agent #") + self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0],'xmpp-server',\ + self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0') + except Exception as e: + self.logger.error(e) + result = False + self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + try: + self.logger.debug("# Verifying that client is only subscribed to mentioned Publisher in the rule #") + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "contrail-vrouter-agent:0"), service="xmpp-server") + instances_allocated = len(client_subscribed_service_id) + service_IPs=[] + for i in range (0,instances_allocated): + service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_service_id[i]) + service_IPs.append(service_endpoint[0][0]) + if instances_allocated==1 and service_IPs[0]==self.inputs.bgp_control_ips[0]: + self.logger.info("\n # Client contrail-vrouter-agent running on %s\n \ + is subscribed to expected xmpp-server as the rule is restricting \n \ + it to do that #" % self.inputs.compute_control_ips[0]) + pass + else: + result = False + self.logger.error("\n # Even if rule is present, subscription\n \ + not happening as expected for contrail-vrouter-agent running on %s#"\ + % self.inputs.compute_control_ips[0]) + self.logger.error("\n # The publisher assigned to the client are\n \ + running at following IPs: %s ###" % service_IPs) + self.logger.error("\n # Expected was that client will subscribe only\n \ + to xmpp-server running on %s node" % self.inputs.bgp_control_ips[0]) + except Exception as e: + self.logger.error(e) + result = False + try: + self.logger.info("\n # Create another rule for xmpp-server running on\n \ + control node and subscriber as contrail-vrouter-agent so that \n \ + 2nd instance of xmpp-server gets a Publisher #") + self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[1],'xmpp-server',\ + self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0') + except Exception as e: + self.logger.error(e) + result = False + self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + try: + self.logger.debug("\n # Verifying that 2nd instance of the client is\n \ + subscribed to mentioned Publisher in the rule #") + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "contrail-vrouter-agent:0"),service="xmpp-server") + instances_allocated = len(client_subscribed_service_id) + service_IPs=[] + for i in range (0,instances_allocated): + service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_service_id[i]) + service_IPs.append(service_endpoint[0][0]) + if instances_allocated==2 and service_IPs[0] in self.inputs.bgp_control_ips\ + and service_IPs[1] in self.inputs.bgp_control_ips: + self.logger.info("\n # Client contrail-vrouter-agent running on %s\n \ + is subscribed to expected xmpp-server as the rule is restricting\n \ + it to do that #" % self.inputs.compute_control_ips[0]) + pass + else: + result = False + self.logger.error("\n # Even if 2 rules are present, subscription\n \ + not happening as expected for contrail-vrouter-agent running on %s#"\ + % self.inputs.compute_control_ips[0]) + self.logger.error("\n # The publisher assigned to the client are running\n \ + at following IPs: %s ###" % service_IPs) + self.logger.error("\n # Expected was that client will subscribe to\n \ + xmpp-server running on %s and %s node" \ + % (self.inputs.bgp_control_ips[0],self.inputs.bgp_control_ips[1])) + except Exception as e: + self.logger.error(e) + result = False + try: + self.logger.info("# Now deleting the rule before starting new test case #") + self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0],\ + 'xmpp-server', self.inputs.compute_control_ips[0],'contrail-vrouter-agent:0') + self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[1],\ + 'xmpp-server', self.inputs.compute_control_ips[0],'contrail-vrouter-agent:0') + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_on_xmpp_do_not_impact_dns(self): + ''' This test case is specifically written to test Bug ID "#1548771" + [Discovery-Rel3.0-Centos-1]: Applying rule on DNS-server affects the rule + entry already applied to XMPP server and vice versa. + (Tested for client type : vrouter-agent) + Steps: + 1. Create 2 different rules with same subscriber as + "contrail-vrouter-agent" and using xmpp-server in rule + 1 and dns-server in rule 2. + 2. Verify that both the rules work independently without impacting each other. + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of XMPP and DNS servers running in different subnets + Also, setup requirement of this test case is to have at least 2 publishers + and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + try: + self.logger.info("\n # Create 2 rules for xmpp-server and dns-server\n \ + running on control node and subscriber as contrail-vrouter-agent#") + self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0],\ + 'xmpp-server', self.inputs.compute_control_ips[0],\ + 'contrail-vrouter-agent:0') + self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0],\ + 'dns-server', self.inputs.compute_control_ips[0],\ + 'contrail-vrouter-agent:0') + except Exception as e: + self.logger.error(e) + result = False + self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + try: + self.logger.debug("\n# Verifying that client is only subscribed to\n \ + mentioned Publishers in the rule #") + client_subscribed_xmpp_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "contrail-vrouter-agent:0"), service="xmpp-server") + client_subscribed_dns_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[0],\ + "contrail-vrouter-agent:0"), service="dns-server") + instances_allocated_xmpp = len(client_subscribed_xmpp_service_id) + instances_allocated_dns = len(client_subscribed_dns_service_id) + service_IPs_xmpp=[] + service_IPs_dns=[] + for i in range (0,instances_allocated_xmpp): + service_endpoint_xmpp = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_xmpp_service_id[i]) + service_IPs_xmpp.append(service_endpoint_xmpp[0][0]) + for i in range (0,instances_allocated_dns): + service_endpoint_dns = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_dns_service_id[i]) + service_IPs_dns.append(service_endpoint_dns[0][0]) + if instances_allocated_xmpp==1 and service_IPs_xmpp[0]==self.inputs.bgp_control_ips[0]: + self.logger.info("\n # Client contrail-vrouter-agent running on %s\n \ + is subscribed to expected xmpp-server as the rule is restricting\n \ + it to do that #" % self.inputs.compute_control_ips[0]) + pass + else: + result = False + self.logger.error("\n # Even if rule is present, subscription not\n \ + happening as expected for contrail-vrouter-agent running on %s .#" \ + % self.inputs.compute_control_ips[0]) + self.logger.debug("\n # The publisher assigned to the client are \n \ + running at following IPs: %s ###" % service_IPs_xmpp) + self.logger.debug("\n # Expected was that client will subscribe only\n \ + to xmpp-server running on %s node" % self.inputs.bgp_control_ips[0]) + if instances_allocated_dns==1 and service_IPs_dns[0]==self.inputs.bgp_control_ips[0]: + self.logger.info("\n # Client contrail-vrouter-agent running on %s \n \ + is subscribed to expected dns-server as the rule is restricting\n \ + it to do that #" % self.inputs.compute_control_ips[0]) + pass + else: + result = False + self.logger.error("\n# Even if rule is present, subscription not\n \ + happening as expected for contrail-vrouter-agent running on %s .#" \ + % self.inputs.compute_control_ips[0]) + self.logger.debug("\n# The publisher assigned to the client are \n \ + running at following IPs: %s ###" % service_IPs_xmpp) + self.logger.debug("\n# Expected was that client will subscribe only\n \ + to dns-server running on %s node" % self.inputs.bgp_control_ips[0]) + except Exception as e: + self.logger.error(e) + result = False + try: + self.logger.info("# Now deleting the rule before starting new test case #") + self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0],\ + 'xmpp-server', self.inputs.compute_control_ips[0],\ + 'contrail-vrouter-agent:0') + self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0],\ + 'dns-server', self.inputs.compute_control_ips[0],\ + 'contrail-vrouter-agent:0') + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + def test_rule_with_vrouter_agent_do_not_impact_other_subscriptions(self): + ''' This test case is specifically written to test Bug ID "#1541321" + [Discovery_R3.0_ubuntu_2704] : Rule mentioning contrail-vrouter-agent + affects all the subscriptions of that client with all Publishers + irrespective of the publisher mentioned in the rule. This happens for + 1/2 cycle of TTL and things recover after that. + Steps: + 1. Create a rule and mention subscriber as "contrail-vrouter-agent" + and using dns-server as publisher. + 2. Verify that the configured rule do not impact subscription of + "contrail-vrouter-agent" to xmpp-server even for one TTL cycle . + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of XMPP servers running in different subnets + Also, setup requirement of this test case is to have at least + 2 publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + try: + self.logger.info("\n # Find the instances of subscription of \n \ + contrail-vrouter-agent to the xmpp-server server #") + xmpp_vrouter_subscription_list = self.ds_obj.get_all_xmpp_servers(ds_ip) + self.logger.info("\n # Create a rule for dns-server running on \n \ + control node and subscriber as contrail-vrouter-agent #") + compute_control_ip = self.inputs.compute_control_ips[0].split('.') + compute_control_ip[2:4] = '0','0' + compute_control_ip = ".".join(compute_control_ip) + "/16" + self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0],\ + 'dns-server', compute_control_ip, 'contrail-vrouter-agent:0') + self.logger.debug("\n # Verify that subscription of vrouter-agent\n \ + to xmpp-server is not impacted due to the above rule for 90 seconds #") + for i in range(1,60): + new_xmpp_vrouter_subscription_list=self.ds_obj.get_all_xmpp_servers(ds_ip) + sleep(1) + if xmpp_vrouter_subscription_list == new_xmpp_vrouter_subscription_list: + pass + else: + self.logger.warn("\n #### Some assignment change has happened\n \ + for vrouter agent subscription to xmpp-server #####") + self.logger.warn("\n #### Earlier service IDs in use were %s\n \ + and after waiting for %i seconds, the service ID has changed to %s #####"\ + % (xmpp_vrouter_subscription_list,i,new_xmpp_vrouter_subscription_list)) + result = False + break + except Exception as e: + self.logger.error(e) + result = False + try: + self.logger.info("# Now deleting the rule before starting new test case #") + self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0],\ + 'dns-server', compute_control_ip, 'contrail-vrouter-agent:0') + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_discovery_server_restart_rule_present(self): + ''' Validate that rules are followed even after discovery server restarts. + Steps: + 1. Create rule for any Publisher and subscriber pair and verify + that rule is behaving properly. + 2. Restart the discovery server on all config nodes. + 3. Verify that after discovery server comes up again, rules are + still followed. + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of XMPP servers running in different subnets + Also, setup requirement of this test case is to have at least + 2 publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("IfmapServer", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-control') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + if len(self.inputs.cfgm_control_ips) > 0: + self.logger.info("\n Creating rules corresponding to *IfmapServer*\n \ + running on all Config nodes for *contrail-control* running in same subnets") + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule(cfgm_control_ip,\ + 'IfmapServer', cfgm_control_ip, 'contrail-control') + if rule_status == False: + result = False + self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + try: + self.logger.debug("#### Verifying clients subscribed to publishers ###") + for i in range(0,len(self.inputs.cfgm_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-control',\ + self.inputs.cfgm_control_ips[i], 'IfmapServer') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + self.logger.debug("#### Stopping the discovery server process on all nodes ###") + for ip in self.inputs.cfgm_ips: + self.inputs.stop_service('contrail-discovery', [ip]) + self.logger.debug("\n #### Waiting for 60 seconds so that all clients\n \ + again try to resubscribe when discovery server is down ###") + sleep(60) + self.logger.debug("#### Starting the discovery server process on all nodes ###") + for ip in self.inputs.cfgm_ips: + self.inputs.start_service('contrail-discovery', [ip]) + for ip in self.inputs.cfgm_ips: + client_status = self.inputs.confirm_service_active(\ + 'contrail-discovery',ip) + if client_status == False: + self.logger.error("Some issue happened after restart of discovery process") + result = False + assert result + self.logger.debug("\n #### Verifying clients subscribed to publishers\n \ + as per rules, after discovery server restart ###") + for i in range(0,len(self.inputs.cfgm_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher(\ + ds_ip, 'contrail-control',\ + self.inputs.cfgm_control_ips[i],'IfmapServer') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + except Exception as e: + self.logger.error(e) + result = False + try: + self.logger.info("#### Stopping the discovery server process on all nodes ###") + for i in range(0,len(self.inputs.cfgm_control_ips)): + cfgm_control_ip = self.inputs.cfgm_control_ips[i].split('.') + cfgm_control_ip[3] = '0' + cfgm_control_ip = ".".join(cfgm_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule(cfgm_control_ip,\ + 'IfmapServer', cfgm_control_ip, 'contrail-control') + if rule_status == False: + result = False + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_publisher_restart_rule_present(self): + ''' Validate that rules are followed even after Publisher servers restarts. + Steps: + 1. Create multiple rules for Publisher and subscriber pairs and + verify that all rules are behaving properly. + 2. Restart the Publishers mentioned in the rules on all the + corresponding nodes. + 3. Verify that after Publisher service restart, rules are still followed. + Precondition: Assumption is that setup is having a contrail-control + connected to 2 instances of Ifmap servers running in different subnets + Also, setup requirement of this test case is to have at least 2 + publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("xmpp-server", min_instances=2, different_subnet_flag=True ) + self.ds_obj.skip_discovery_test("Collector", min_instances=2, different_subnet_flag=True ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("\n Creating rules corresponding to *xmpp-server*,\n \ + *dns-server* and *Collector* running on all control nodes for \n \ + *contrail-vrouter-agent* running in same subnets") + for i in range(0,len(self.inputs.bgp_control_ips)): + bgp_control_ip = self.inputs.bgp_control_ips[i].split('.') + bgp_control_ip[3] = '0' + bgp_control_ip = ".".join(bgp_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule(bgp_control_ip, \ + 'xmpp-server', bgp_control_ip, 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + rule_status = self.ds_obj.add_and_verify_rule(bgp_control_ip, \ + 'dns-server', bgp_control_ip, 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + for i in range(0,len(self.inputs.collector_control_ips)): + collector_control_ip = self.inputs.collector_control_ips[i].split('.') + collector_control_ip[3] = '0' + collector_control_ip = ".".join(collector_control_ip) + "/24" + rule_status = self.ds_obj.add_and_verify_rule(collector_control_ip,\ + 'Collector', collector_control_ip, 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + self.logger.debug("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + try: + self.logger.debug("#### Verifying clients subscribed to publishers ###") + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0',\ + self.inputs.compute_control_ips[i], 'xmpp-server') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0',\ + self.inputs.compute_control_ips[i], 'dns-server') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0',\ + self.inputs.compute_control_ips[i], 'Collector') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + self.logger.info("#### Restarting the xmpp, dns and Collector server process on all nodes ###") + for ip in self.inputs.collector_ips: + self.inputs.restart_service('contrail-collector', [ip]) + for ip in self.inputs.bgp_ips: + self.inputs.restart_service('contrail-control', [ip]) + self.inputs.restart_service('contrail-dns', [ip]) + for ip in self.inputs.collector_ips: + client_status = self.inputs.confirm_service_active(\ + 'contrail-collector', ip) + if client_status == False: + self.logger.error("Some issue happened after restart of server process") + result = False + assert result + for ip in self.inputs.bgp_ips: + client_status = self.inputs.confirm_service_active(\ + 'contrail-control', ip) + if client_status == False: + self.logger.error("Some issue happened after restart of server process") + result = False + assert result + for ip in self.inputs.bgp_ips: + client_status = self.inputs.confirm_service_active(\ + 'contrail-dns', ip) + if client_status == False: + self.logger.error("Some issue happened after restart of server process") + result = False + assert result + self.logger.debug("\n #### Waiting for 30 seconds so that all clients\n \ + again try to resubscribe when discovery server is down ###") + sleep(30) + self.logger.debug("\n #### Verifying clients subscribed to publishers\n \ + should follow rules even after publisher process restart ###") + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0',\ + self.inputs.compute_control_ips[i], 'xmpp-server') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0',\ + self.inputs.compute_control_ips[i], 'dns-server') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + for i in range(0,len(self.inputs.compute_control_ips)): + verification = self.ds_obj.verify_client_subscription_to_expected_publisher\ + (ds_ip, 'contrail-vrouter-agent:0',\ + self.inputs.compute_control_ips[i], 'Collector') + if verification == False: + self.logger.error("Rule not behaving as expected") + result = False + except Exception as e: + self.logger.error(e) + result = False + try: + self.logger.info("#### Deleting the rules at end of test acse ###") + for i in range(0,len(self.inputs.bgp_control_ips)): + bgp_control_ip = self.inputs.bgp_control_ips[i].split('.') + bgp_control_ip[3] = '0' + bgp_control_ip = ".".join(bgp_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule( bgp_control_ip,\ + 'xmpp-server', bgp_control_ip, 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + rule_status = self.ds_obj.delete_and_verify_rule( bgp_control_ip,\ + 'dns-server', bgp_control_ip, 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + for i in range(0,len(self.inputs.collector_control_ips)): + collector_control_ip = self.inputs.collector_control_ips[i].split('.') + collector_control_ip[3] = '0' + collector_control_ip = ".".join(collector_control_ip) + "/24" + rule_status = self.ds_obj.delete_and_verify_rule(collector_control_ip,\ + 'Collector', collector_control_ip, 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_auto_load_balance_Ifmap(self): + ''' Validate that auto load balance works correctly for IfmapServer. + Steps: + 1. Verify that normal load balancing is working correctly by + default on IfmapServer. + 2. Set auto load balance as *True* and stop any one of the IfmapServers. + 3. Verify that stopped Server loses all it's subscribers. + 4. Again start the IfmapServer which was stopped earlier. + 5. Verify auto load balancing takes place. + Precondition: Assumption is that setup is having at least 3 Ifmap Servers + ''' + self.ds_obj.skip_discovery_test("IfmapServer", min_instances=3, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'supervisor-control') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("# Setting auto load balance to true in contrail-discovery.conf file #") + assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="IFMAPSERVER",policy='dynamic-load-balance') + try: + self.logger.debug("# Verifying that discovery server auto load balance for 'IfmapServer' #") + self.logger.info("# Stopping the IfmapServer on one of the config node until it looses all subscribers #") + self.inputs.stop_service('supervisor-config',\ + host_ips=[self.inputs.cfgm_ips[0]]) + self.logger.debug("# Waiting for 45 seconds to wait for server to lose all subscriptions #") + sleep(45) + count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.cfgm_control_ips[0],\ + 'IfmapServer')) + if count == 0: + pass + else: + self.logger.error("\n # Even if Server is not running, it still\n \ + has %d *in use* subscription. Something is wrong #" % count) + self.inputs.start_service('supervisor-config',\ + host_ips=[self.inputs.cfgm_ips[0]]) + self.inputs.confirm_service_active(\ + 'supervisor-config',self.inputs.cfgm_ips[0]) + self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="IFMAPSERVER",policy='load-balance') + result = False + assert result + self.logger.info("\n # Starting the IfmapServer on one of the config node\n \ + expecting that subscriptions will happen again #") + self.inputs.start_service('supervisor-config',\ + host_ips=[self.inputs.cfgm_ips[0]]) + client_status = self.inputs.confirm_service_active(\ + 'supervisor-config',self.inputs.cfgm_ips[0]) + if client_status == False: + self.logger.error("# Some issue happened after restart of config server #") + self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="IFMAPSERVER",policy='load-balance') + result = False + assert result + self.logger.debug("# Waiting for 30 seconds for restarted server to again get all subscriptions #") + sleep(30) + self.logger.debug("# Verifying that auto load balance worked properly or not after service restart #") + load_balance = self.ds_obj.check_load_balance(ds_ip, 'IfmapServer') + if load_balance == False: + result=False + except Exception as e: + self.logger.error(e) + result = False + self.logger.info("# Setting policy to 'load-balance' in contrail-discovery.conf file #") + assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="IFMAPSERVER",policy='load-balance') + try: + self.logger.debug("\n # Verifying that discovery server do not do\n \ + auto load balance for *IfmapServer* as policy is set to 'load-balance' #") + self.logger.info("\n # Stopping the IfmapServer on one of the config\n \ + node until it looses all subscribers #") + self.inputs.stop_service('supervisor-config',\ + host_ips=[self.inputs.cfgm_ips[0]]) + self.logger.debug("# Waiting for 45 seconds to wait for server to lose all subscriptions #") + sleep(45) + count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.cfgm_control_ips[0],\ + 'IfmapServer')) + if count == 0: + pass + else: + self.logger.error("\n # Even if Server is not running, it still has %d\n \ + *in use* subscription. Something is wrong #" % count) + result = False + self.inputs.start_service('supervisor-config',\ + host_ips=[self.inputs.cfgm_ips[0]]) + self.inputs.confirm_service_active(\ + 'supervisor-config',self.inputs.cfgm_ips[0]) + assert result + self.logger.info("\n # Starting the IfmapServer on one of the config node\n \ + expecting that re-subscription will not happen again as auto load balance is off #") + self.inputs.start_service('supervisor-config',\ + host_ips=[self.inputs.cfgm_ips[0]]) + client_status = self.inputs.confirm_service_active(\ + 'supervisor-config',self.inputs.cfgm_ips[0]) + if client_status == False: + self.logger.error("# Some issue happened after restart of config server #") + result = False + assert result + self.logger.debug("\n # Waiting for 30 seconds to wait for restarted server\n \ + to give time in case any client subscribes to this server. Not expecting this to happen #") + sleep(30) + self.logger.debug("\n # Verifying that as auto load balance was off,\n \ + the restarted service is not used by any subscriber #") + count=self.ds_obj.get_service_in_use(ds_ip, (self.inputs.cfgm_control_ips[0],\ + 'IfmapServer')) + if count == 0: + pass + else: + self.logger.error("\n # Even if Server has just restarted and \n \ + auto load balance is off, it has got new subscriptions. Something is wrong #") + self.logger.error("# Total subscribers which got attached to restarted service are %d #"\ + % count) + result = False + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_auto_load_balance_xmpp(self): + ''' Validate that auto load balance works correctly for XmppServer. + This script also validates Bug 1395099 : Trigger subscription + from discovery client for faster convergence + Steps: + 1. Verify that normal load balancing is working correctly by default + on Xmpp-Server. + 2. Set auto load balance as *True* and stop any one of the Xmpp-Server. + 3. Verify that stopped Server loses all it's subscribers. + 4. Again start the Xmpp-Server which was stopped earlier. + 5. Verify auto load balancing takes place. + Precondition: Assumption is that setup is having at least 3 XMPP Servers + ''' + self.ds_obj.skip_discovery_test("xmpp-server", min_instances=3, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("# Setting auto load balance to true in contrail-discovery.conf file #") + assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="XMPP-SERVER",policy='dynamic-load-balance') + try: + self.logger.debug("# Verifying that discovery server auto load balance for 'XmppServer' #") + self.logger.info("# Stopping the XmppServer on one of the control node until it looses all subscribers #") + self.inputs.stop_service('contrail-control',\ + host_ips=[self.inputs.bgp_ips[0]]) + self.logger.debug("# Waiting for 20 seconds to wait for server to lose all subscriptions #") + sleep(20) + count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.bgp_control_ips[0],\ + 'xmpp-server')) + if count == 0: + self.logger.info("## After XMPP server is made down, it looses all subscriptions within 20 seconds") + pass + else: + self.logger.error("\n # Even if Server is not running, it still has %d\n \ + *in use* subscription. Something is wrong #" % count) + result = False + self.inputs.start_service('contrail-control',\ + host_ips=[self.inputs.bgp_ips[0]]) + self.inputs.confirm_service_active(\ + 'contrail-control',self.inputs.bgp_ips[0]) + self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="XMPP-SERVER",policy='load-balance') + assert result + self.logger.info("\n# Starting the XmppServer on one of the control node\n \ + expecting that subscriptions will happen again #") + self.inputs.start_service('contrail-control',\ + host_ips=[self.inputs.bgp_ips[0]]) + client_status = self.inputs.confirm_service_active(\ + 'contrail-control',self.inputs.bgp_ips[0]) + if client_status == False: + self.logger.error("# Some issue happened after restart of control server #") + self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="XMPP-SERVER",policy='load-balance') + result = False + assert result + self.logger.debug("# Waiting for 30 seconds for restarted server to again get all subscriptions#") + sleep(30) + self.logger.debug("# Verifying that auto load balance worked properly or not after service restart #") + load_balance = self.ds_obj.check_load_balance(ds_ip, 'xmpp-server') + if load_balance == False: + result=False + except Exception as e: + self.logger.error(e) + result = False + self.logger.info("# Setting policy as 'load-balance' in contrail-discovery.conf file #") + assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="XMPP-SERVER",policy='load-balance') + try: + self.logger.debug("\n# Verifying that discovery server do not do\n \ + auto load balance for *XmppServer* as policy is set to 'load-balance' #") + self.logger.info("\n# Stopping the XmppServer on one of the control \n \ + node until it looses all subscribers #") + self.inputs.stop_service('contrail-control',\ + host_ips=[self.inputs.bgp_ips[0]]) + self.logger.debug("# Waiting for 20 seconds to wait for server to lose all subscriptions #") + sleep(20) + count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.bgp_control_ips[0],\ + 'xmpp-server')) + if count == 0: + self.logger.info("## After XMPP server is made down, it looses all subscriptions within 20 seconds") + pass + else: + self.logger.error("\n# Even if Server is not running, it still has %d\n\ + *in use* subscription. Something is wrong #" % count) + self.inputs.start_service('contrail-control',\ + host_ips=[self.inputs.bgp_ips[0]]) + self.inputs.confirm_service_active(\ + 'contrail-control',self.inputs.bgp_ips[0]) + result = False + assert result + self.logger.info("\n# Starting the XmppServer on one of the control node\n \ + expecting that re-subscription will not happen again as auto load balance is off #") + self.inputs.start_service('contrail-control',\ + host_ips=[self.inputs.bgp_ips[0]]) + client_status = self.inputs.confirm_service_active(\ + 'contrail-control',self.inputs.bgp_ips[0]) + if client_status == False: + self.logger.error("# Some issue happened after restart of control server #") + result = False + assert result + self.logger.debug("\n# Waiting for 30 seconds for restarted server\n \ + to give time in case any client subscribes to this server. \n \ + Not expecting this to happen# ") + sleep(30) + self.logger.debug("\n# Verifying that as auto load balance was off,\n \ + the restarted service is not used by any subscriber #") + count=self.ds_obj.get_service_in_use(ds_ip,(self.inputs.bgp_control_ips[0],\ + 'xmpp-server')) + if count == 0: + pass + else: + self.logger.error("\n# Even if Server has just restarted and \n \ + auto load balance is off, it has got new subscriptions. Something is wrong #") + self.logger.error("# Total subscribers which got attached to restarted service are %d #"\ + % count) + result = False + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_auto_load_balance_collector(self): + ''' Validate that auto load balance works correctly for Collector. + Steps: + 1. Set auto load balance as *True* and stop any one of the Collector. + 2. Verify that stopped Server loses all it's subscribers. + 3. Again start the Collector which was stopped earlier. + 4. Verify auto load balancing takes place. + Precondition: Assumption is that setup is having at least 3 Collectors + ''' + self.ds_obj.skip_discovery_test("Collector", min_instances=3, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + self.logger.debug("#### Changing min and max TTL values for testing purpose ##") + assert self.ds_obj.modify_discovery_conf_file_params('change_min_max_ttl',\ + ttl_min=30, ttl_max=30) + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("#### Restarting the required subscriber services so that TTL takes effect immediately ###") + for ip in self.inputs.collector_ips: + self.inputs.restart_service('supervisor-analytics', [ip]) + for ip in self.inputs.compute_ips: + self.inputs.restart_service('supervisor-vrouter', [ip]) + for ip in self.inputs.bgp_ips: + self.inputs.restart_service('supervisor-control', [ip]) + for ip in self.inputs.cfgm_ips: + self.inputs.restart_service('supervisor-config', [ip]) + for ip in self.inputs.webui_ips: + self.inputs.restart_service('supervisor-webui', [ip]) + for ip in self.inputs.database_ips: + self.inputs.restart_service('contrail-database', [ip]) + self.inputs.restart_service('contrail-database-nodemgr', [ip]) + client_status = ContrailStatusChecker() + client_status.wait_till_contrail_cluster_stable(self.inputs.host_ips) + self.logger.info("# Setting auto load balance to true in contrail-discovery.conf file #") + assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="COLLECTOR",policy='dynamic-load-balance') + try: + self.logger.debug("# Verifying that discovery server auto load balance for 'Collector'#") + self.logger.info("# Stopping the Collector on one of the Analytic node until it looses all subscribers #") + self.inputs.stop_service('contrail-collector',\ + host_ips=[self.inputs.collector_ips[0]]) + self.logger.debug("# Waiting for 45 seconds to wait for server to lose all subscriptions #") + sleep(45) + count=self.ds_obj.get_service_in_use(ds_ip,\ + (self.inputs.collector_control_ips[0],'Collector')) + if count == 0: + pass + else: + self.logger.error("\n # Even if Server is not running,\n \ + it still has %d *in use* subscription. Something is wrong #" % count) + result = False + self.inputs.start_service('contrail-collector',\ + host_ips=[self.inputs.collector_ips[0]]) + self.inputs.confirm_service_active(\ + 'contrail-collector',self.inputs.collector_ips[0]) + self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="COLLECTOR",policy='load-balance') + assert result + self.logger.info("\n # Starting the Collector on one of the Analytic node\n \ + expecting that subscriptions will happen again #") + self.inputs.start_service('contrail-collector',\ + host_ips=[self.inputs.collector_ips[0]]) + client_status = self.inputs.confirm_service_active(\ + 'contrail-collector',self.inputs.collector_ips[0]) + if client_status == False: + self.logger.error("# Some issue happened after restart of Collector#") + self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="COLLECTOR",policy='load-balance') + result = False + assert result + self.logger.debug("# Waiting for 30 seconds for restarted server to again get all subscriptions #") + sleep(30) + self.logger.debug("# Verifying that auto load balance worked properly or not after service restart #") + load_balance = self.ds_obj.check_load_balance(ds_ip, 'Collector') + if load_balance == False: + result=False + except Exception as e: + self.logger.error(e) + result = False + self.logger.info("# Setting policy as 'load-balance' in contrail-discovery.conf file #") + assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="COLLECTOR",policy='load-balance') + try: + self.logger.debug("\n # Verifying that discovery server do not do\n \ + auto load balance for *Collector* as it is set to load-balance #") + self.logger.info("\n # Stopping the Collector on one of the Analytic node\n \ + until it looses all subscribers #") + self.inputs.stop_service('contrail-collector',\ + host_ips=[self.inputs.collector_ips[0]]) + self.logger.debug("# Waiting for 45 seconds to wait for server to lose all subscriptions #") + sleep(45) + count=self.ds_obj.get_service_in_use(ds_ip,\ + (self.inputs.collector_control_ips[0],'Collector')) + if count == 0: + pass + else: + self.logger.error("\n # Even if Server is not running, it still has %d\n \ + *in use* subscription. Something is wrong #" % count) + self.inputs.start_service('contrail-collector',\ + host_ips=[self.inputs.collector_ips[0]]) + self.inputs.confirm_service_active(\ + 'contrail-collector',self.inputs.collector_ips[0]) + result = False + assert result + self.logger.info("\n # Starting the Collector on one of the Analytic node\n \ + expecting that re-subscription will not happen again as auto load balance is off # ") + self.inputs.start_service('contrail-collector',\ + host_ips=[self.inputs.collector_ips[0]]) + client_status = self.inputs.confirm_service_active(\ + 'contrail-collector',self.inputs.collector_ips[0]) + if client_status == False: + self.logger.error("# Some issue happened after restart of Collector #") + result = False + assert result + self.logger.debug("\n # Waiting for 30 seconds for restarted server\n \ + to give time in case any client subscribes to this server. Not expecting this to happen #") + sleep(30) + self.logger.debug("\n # Verifying that as auto load balance was off,\n \ + the restarted service is not used by any subscriber #") + count = self.ds_obj.get_service_in_use(ds_ip,\ + (self.inputs.collector_control_ips[0],'Collector')) + if count == 0: + pass + else: + self.logger.error("\n # Even if Server has just restarted and \n \ + auto load balance is off, it has got new subscriptions. Something is wrong #" ) + self.logger.error("# Total subscribers which got attached to restarted service are %d #" % count) + result = False + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rules_preferred_over_auto_load_balance(self): + ''' Validate that rules always takes precedence over auto load balance. + Also verify that when rules are deleted, auto load balance takes its effect. + Steps: + 1. Verify that normal load balancing is working correctly by default + on XMpp-Server. + 2. Set auto load balance as *True* and stop any one of the Xmpp-Server. + 3. Create multiple rules with single xmpp-server to subscribe to all + vrouter-agents in the topology. + 4. Verify that rule is preferred over load balancing and no other + xmpp-server in the topology gets any subscription. + 5. Delete the rules and verify that auto load balancing takes place. + Precondition: Assumption is that setup is having at least 3 XMPP Servers + Also, all XMPP Servers should be in different subnet + ''' + self.ds_obj.skip_discovery_test("xmpp-server", min_instances=3, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("# Setting auto load balance to true in contrail-discovery.conf file #") + assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="XMPP-SERVER",policy='dynamic-load-balance') + self.logger.debug("# Waiting for 30 seconds to wait for auto load balance to happen #") + sleep(30) + try: + self.logger.info("# Verifying that discovery server is properly load balancing for 'XmppServer' # ") + load_balance = self.ds_obj.check_load_balance(ds_ip,'xmpp-server') + if load_balance == False: + result=False + except Exception as e: + self.logger.error(e) + result = False + if len(self.inputs.bgp_control_ips) > 0: + self.logger.info("\n # Creating rules corresponding to *xmpp-server*\n \ + so that all *contrail-vrouter-agent* on any network connects to\n \ + *xmpp-server* running on cfgm0 #") + for i in range(0,len(self.inputs.compute_control_ips)): + rule_status = self.ds_obj.add_and_verify_rule(\ + self.inputs.bgp_control_ips[0], 'xmpp-server',\ + self.inputs.compute_control_ips[i], 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriber happens ###") + sleep (30) + self.logger.info("#### Verifying that all vrouter-agents subscribe to control node xmpp-server only ###") + try: + in_use_list = [] + for i in range(0,len(self.inputs.bgp_control_ips)): + in_use_list_elem = self.ds_obj.get_service_in_use\ + (ds_ip, (self.inputs.bgp_control_ips[i],'xmpp-server')) + in_use_list.append(in_use_list_elem) + if in_use_list[0] > 0 and sum(in_use_list[1:len(in_use_list)]) == 0: + self.logger.info("# Rule working as expected. All clients subscribed only to cfgm0 xmpp-server #") + self.logger.info("# Even if Auto load balance is *True*, rule is taking the priority #") + pass + else: + self.logger.error("\n# Even if rule is applied, rule is not working as expected.\n \ + May be auto load balance being *True* is creating issue #") + self.logger.error("\n# It was expected that only cfgm0 xmpp-server\n \ + will have subscriptions and rest of the xmpp-servers will not have any subscriptions #") + self.logger.error("\n# The *in-use* list for all xmpp-servers is %s#"\ + % in_use_list) + result = False + except Exception as e: + self.logger.error(e) + result = False + for i in range(0,len(self.inputs.compute_control_ips)): + rule_status = self.ds_obj.delete_and_verify_rule(\ + self.inputs.bgp_control_ips[0], 'xmpp-server',\ + self.inputs.compute_control_ips[i], 'contrail-vrouter-agent:0') + if rule_status == False: + result = False + try: + self.logger.info("\n # Waiting for 60 seconds(2 TTL cycles)\n \ + to wait for re-subscription and load-balancing to happen after deleting rules #") + sleep(60) + self.logger.info("\n # Verifying that discovery server \n \ + auto load balance for 'XmppServer' as soon as rules are deleted #") + load_balance = self.ds_obj.check_load_balance(ds_ip,'xmpp-server') + if load_balance == False: + result=False + except Exception as e: + self.logger.error(e) + result = False + self.logger.info(" # Deleting the policy configuration from contrail-discovery.conf file #") + assert self.ds_obj.modify_discovery_conf_file_params( 'del_policy',\ + publisher_type="XMPP-SERVER") + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_service_in_use_list(self): + ''' Validate that subscribe request with instance value as 0 and having + service-in-use-list is considered a subscription request and + publishers are assigned to it properly. + Steps: + 1. Get in-use count of publishers before sending a subscribe + request having service-in-use list + 2. Send a subscribe request with instance value as '0' and + service-in-use list present in that subscribe request. + 3. See if the in-use count of the publisher increases and client + get subscribed successfully. + Precondition: Assumption is that setup is having at least 3 Ifmap Servers + ''' + self.ds_obj.skip_discovery_test("IfmapServer", min_instances=3, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + try: + self.logger.debug("#### Changing min and max TTL values for testing purpose ##") + assert self.ds_obj.modify_discovery_conf_file_params(operation='change_min_max_ttl',\ + ttl_min=30, ttl_max=30) + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("\n# Verifying that if a subscriber has a service in use list,\n\ + same publishers are assigned to it as mentioned in the list.# ") + self.logger.info("\n#### Getting the in-use count of all Ifmap Servers \n\ + before sending dummy subscribe request ###") + in_use_list = [] + for i in range(0,len(self.inputs.cfgm_control_ips)): + in_use_list_elem = self.ds_obj.get_service_in_use(ds_ip,\ + (self.inputs.cfgm_control_ips[i],'IfmapServer')) + in_use_list.append(in_use_list_elem) + sum_in_use_bfr_subscribe_request = sum(in_use_list) + self.logger.info("\n#### Total in-use clients subscribed to IfmapServer are %d #####"\ + % sum_in_use_bfr_subscribe_request) + self.logger.info("\n#### Sending a dummy client request with instance value as 0\n\ + to subscribe to IfmapServer #####") + self.logger.info("\n#### The dummy request will have a service-in-use-list \n\ + containing IPs of all Ifmap Server present in the network #####") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\ + instances="0", min_instances=len(self.inputs.cfgm_control_ips),\ + client_id=self.inputs.compute_names[0]+":TestClient",\ + remote_addr=self.inputs.compute_control_ips[0],client_type="TestClient",\ + svc_in_use_list_present=True,svc_in_use_list=self.inputs.cfgm_control_ips) + sleep(2) + self.logger.info("\n#### Getting the in-use count of all Ifmap Servers \n\ + after sending dummy subscribe request ###") + in_use_list = [] + for i in range(0,len(self.inputs.cfgm_control_ips)): + in_use_list_elem = self.ds_obj.get_service_in_use(ds_ip, \ + (self.inputs.cfgm_control_ips[i],'IfmapServer')) + in_use_list.append(in_use_list_elem) + sum_in_use_aftr_subscribe_request = sum(in_use_list) + self.logger.info("\n Total in-use clients subscribed to IfmapServer after dummy request are %d"\ + % sum_in_use_aftr_subscribe_request) + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id(\ + ds_ip, client=(self.inputs.compute_control_ips[0],\ + "TestClient"), service="IfmapServer") + instances_allocated = len(client_subscribed_service_id) + service_IPs=[] + for i in range (0,instances_allocated): + service_endpoint = self.ds_obj.get_service_endpoint_by_service_id(\ + ds_ip,client_subscribed_service_id[i]) + service_IPs.append(service_endpoint[0][0]) + self.logger.info("\n# The publishers mentioned in service-in-use list are %s\n\ + and the client is actually subscribed to following publishers %s.######## " \ + % (self.inputs.cfgm_control_ips,service_IPs)) + if instances_allocated == len(self.inputs.cfgm_control_ips) and \ + sum_in_use_aftr_subscribe_request > sum_in_use_bfr_subscribe_request: + self.logger.info("\n# The subscribe request with instance as 0 \n\ + and service-in-use list has subscribed to expected publishers.######## ") + else: + self.logger.info("\n# Something went wrong. \n \ + Expected Publishers not assigned to client request having service in use list ######## ") + result=False + self.logger.info("\n##### Waiting for 30 seconds so that dummy client request\n \ + ages out and do not interfere with other test cases ######") + sleep(30) + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_white_list_security(self): + ''' To prevent unauthorized publish or subscribe requests to effect + discovery server state (and assuming such requests are coming through + load-balancer such ha-proxy), discovery server to apply configured + publish and subscribe white-lists to incoming IP addresses as obtained + from X-Forwarded-For header. + Load-Balancer must be enabled to forward client's real IP address + in X-Forwarded-For header to discovery servers. + Steps: + 1. Configure subscriber and publisher white list and save it in + contrail-discovery.conf file. + 2. Send publish/subscribe requests with X-Forwarded-for headers with + IPs same as present in white list + 3. Verify that publish/subscribe requests are processed correctly + by discovery server + 4. Send publish/subscribe requests with X-Forwarded-for headers + with IPs not present in white list + 5. Verify that publish/subscribe requests are rejected by discovery server. + 6. Delete the white list configurations from contrail-discovery.conf file. + 7. Send publish/subscribe requests with X-Forwarded-for headers + with IPs not present in white list + 8. Verify that publish/subscribe requests are processed correctly + by discovery server + ''' + result = True + ds_ip = self.inputs.cfgm_ip + try: + self.logger.debug("#### Changing min and max TTL values for testing purpose ##") + assert self.ds_obj.modify_discovery_conf_file_params('change_min_max_ttl',\ + ttl_min=30, ttl_max=30) + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("\n # Configure white list for publishers\n \ + and subscriber in contrail-discovery.conf file # ") + self.ds_obj.white_list_conf_file("publisher", '1.1.1.0/24', '2.2.2.0/24') + self.ds_obj.white_list_conf_file("subscriber", '1.1.1.0/24', '2.2.2.0/24') + DiscoveryServerUtils.POST_HEADERS={'Content-type': 'application/json'\ + , 'X-Forwarded-For': "1.1.1.1"} + self.logger.info("Sending a synthetic publish request to verify publishers white list") + response = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="Test_Pub_1",ip="1.1.1.1", port ="123") + if self.ds_obj.get_all_services_by_service_name(ds_ip, service="Test_Pub_1")==[]: + result = False + self.logger.error("\n#### Failure!! The requested publish request\n\ + not accepted by discovery server even if the IP was present in\n \ + Publisher white list ###") + else: + self.logger.info("\n#### Success!! The requested publish request\n\ + accepted by discovery server as IP was present in Publisher white list") + sleep(2) + DiscoveryServerUtils.POST_HEADERS = {'Content-type': 'application/json',\ + 'X-Forwarded-For': "3.3.3.3"} + response = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="Test_Pub_2",ip="3.3.3.3", port ="123") + if self.ds_obj.get_all_services_by_service_name(ds_ip,\ + service="Test_Pub_2") == []: + self.logger.info("\n#### Success!! The requested publish request\n\ + not accepted by discovery as IP was not present in Publisher white list") + else: + result = False + self.logger.error("\n#### Failure!! The requested publish request\n\ + accepted by discovery server even if the IP was not present in Publisher white list") + self.logger.info("Sending a synthetic subscribe request to verify subscribers white list") + DiscoveryServerUtils.POST_HEADERS = {'Content-type': 'application/json',\ + 'X-Forwarded-For': "2.2.2.2"} + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\ + instances="2", client_id=self.inputs.compute_names[0]+\ + ":TestClient_1",remote_addr=self.inputs.compute_control_ips[0],\ + client_type= "TestClient_1") + if self.ds_obj.get_subscribed_service_id(ds_ip,client=(\ + self.inputs.compute_control_ips[0], "TestClient_1"),\ + service="IfmapServer") == []: + result = False + self.logger.error("\n#### Failure!! The requested subscribe request\n\ + not accepted by discovery server even if the IP was present\n\ + in Subscriber white list ###") + else: + self.logger.info("\n#### Success!! The requested subscribe request\n\ + accepted by discovery server as IP was present in Subscriber white list") + DiscoveryServerUtils.POST_HEADERS={'Content-type': 'application/json',\ + 'X-Forwarded-For': "3.3.3.3"} + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\ + instances="2",client_id=self.inputs.compute_names[0]+ + ":TestClient_2",remote_addr= self.inputs.compute_control_ips[0],\ + client_type= "TestClient_2") + if self.ds_obj.get_subscribed_service_id(ds_ip, client=(self.inputs.compute_control_ips[0],\ + "TestClient_2"), service="IfmapServer") == []: + self.logger.info("\n#### Success!! The requested subscribe request \n\ + not accepted by discovery server as IP was not present in Subscriber white list") + else: + result = False + self.logger.error("\n#### Failure!! The requested subscribe request\n\ + accepted by discovery server even if the IP was not present in Subscriber white list") + self.logger.info("Deleting the configurations of white list to clean up for next test case") + assert self.ds_obj.modify_discovery_conf_file_params( 'delete_white_list',\ + publish=True, subscribe=True) + self.logger.info("\n# Verify that when white list is deleted, \n\ + then X-Forwarded-Header does not hold relevance and all requests are accepted") + response = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="Test_Pub_2",ip="3.3.3.3", port ="123") + if self.ds_obj.get_all_services_by_service_name(ds_ip, service="Test_Pub_2") == []: + result = False + self.logger.error("\nFailure!! The requested publish request \n\ + not accepted by discovery server even after deleting publish white list") + else: + self.logger.info("\n#### Success!! The requested publish request\n\ + accepted by discovery server as Publisher white list has been deleted") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="IfmapServer",\ + instances="2",client_id=self.inputs.compute_names[0]+\ + ":TestClient_2",remote_addr= self.inputs.compute_control_ips[0],\ + client_type= "TestClient_2") + if self.ds_obj.get_subscribed_service_id(ds_ip,client=(self.inputs.compute_control_ips[0],\ + "TestClient_2"), service="IfmapServer") == []: + result = False + self.logger.error("\nFailure!! The requested subscribe request\n\ + not accepted by discovery server even if Subscriber white list has been deleted") + else: + self.logger.info("\nSuccess!! The requested subscribe request\n\ + accepted by discovery server as Subscriber white list has been deleted") + self.logger.info("\nWaiting for 30 seconds so that dummy client request\n\ + ages out and do not interfere with other test cases ######") + sleep(30) + except Exception as e: + self.logger.error(e) + result = False + DiscoveryServerUtils.POST_HEADERS={'Content-type': 'application/json'} + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_keystone_auth_security(self): + ''' + Discovery server to require admin keystone credentials to perform + load-balance and setting of admin state. Discovery server will expect + admin token in X-Auth-Token header of incoming request. The token + is sent to keystone for validation and action is only performed if a + valid admin token is present. Otherwise 401 HTTP code is returned + Steps: + 1. Configure authentication as keystone in contrail-dicovery.conf file. + Don't configure the credentials + 2. Attempt admin-state change, oper-state change and load-balance + trigger and expect them to fail as only auth has been configured. + 3. Configure authentication as keystone in contrail-dicovery.conf file. + Configure the credentials as well. + 4. Attempt admin-state change, oper-state change and load-balance + trigger and expect them to pass as auth and it's credentials has + been configured. + ''' + result = True + ds_ip = self.inputs.cfgm_ip + try: + self.logger.info("# Configure authentication as *True* in contrail-discovery.conf file # ") + assert self.ds_obj.modify_discovery_conf_file_params('add_keystone_auth',\ + auth="keystone", add_values = "False") + self.logger.debug("#Verify that all requests fails if Auth is True and credentials are not mentioned#") + response = self.ds_obj.publish_requests_with_keystone(ds_ip,\ + operation="oper-state",operation_status="up",\ + service_id=self.inputs.cfgm_names[0],\ + service_type="IfmapServer") + if response != 200: + self.logger.info("\nSuccess!! As authetication is True and credentials are not configured,\n\ + the oper-state change request has failed") + else: + self.logger.error("\nFailure!! Even if authetication is True and credentials are not configured,\n\ + the oper-state change request is successful") + result = False + response = self.ds_obj.publish_requests_with_keystone(ds_ip,\ + operation="admin-state",operation_status="up",\ + service_id=self.inputs.cfgm_names[0],\ + service_type="IfmapServer") + if response != 200: + self.logger.info("\nSuccess!! As authetication is True and credentials are not configured,\n\ + the admin-state change request has failed") + else: + self.logger.error("\nFailure!! Even if authetication is True and credentials are not configured,\n\ + the admin-state change request is successful") + result = False + response = self.ds_obj.publish_requests_with_keystone(ds_ip,\ + operation="load-balance",service_id=\ + self.inputs.cfgm_names[0],service_type="IfmapServer") + if response != 200: + self.logger.info("\n Success!! As authetication is True and credentials are not configured,\n\ + the load-balance request has failed") + else: + self.logger.error("\n Failure!! Even if authetication is True and credentials are not configured,\n\ + the load-balance request is successful") + result = False + self.logger.info("\n # Configure authentication as *True* as well as \n \ + configuring all the required credentials in contrail-discovery.conf file # ") + assert self.ds_obj.modify_discovery_conf_file_params(operation='add_keystone_auth',\ + auth="keystone", add_values = "True") + self.logger.info("\n # Verify that all requests are passed if Auth is True\n\ + and credentials are mentioned # ") + response = self.ds_obj.publish_requests_with_keystone(ds_ip,\ + operation="oper-state",operation_status="up",\ + service_id=self.inputs.cfgm_names[0],\ + service_type="IfmapServer") + if response == 200: + self.logger.info("\n Success!! As authetication is True and credentials are configured,\n\ + the oper-state change request has been processed successfully") + else: + self.logger.error("\n Failure!! Even if authetication is True and credentials are configured,\n\ + the oper-state change request has failed") + result = False + response = self.ds_obj.publish_requests_with_keystone(ds_ip\ + ,operation="admin-state",operation_status="up",\ + service_id=self.inputs.cfgm_names[0],\ + service_type="IfmapServer") + if response == 200: + self.logger.info("\n Success!! As authetication is True and credentials are configured,\n\ + the admin-state change request has been processed successfully") + else: + self.logger.error("\n Failure!! Even if authetication is True and credentials are configured,\n\ + the admin-state change request has failed") + result = False + response = self.ds_obj.publish_requests_with_keystone(ds_ip,\ + operation="load-balance",service_id=\ + self.inputs.cfgm_names[0],service_type="IfmapServer") + if response == 200: + self.logger.info("\n Success!! As authetication is True and credentials are configured,\n\ + the load-balance request has been processed successfully") + else: + self.logger.error("\n Failure!! Even if authetication is True and credentials are configured,\n\ + the load-balance request has failed") + result = False + except Exception as e: + self.logger.error(e) + result = False + self.logger.debug("# Deleting the auth configurations from contrail-discovery.conf file # ") + assert self.ds_obj.modify_discovery_conf_file_params(operation='delete_keystone_auth'\ + ,auth="keystone") + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_policy_fixed(self): + ''' + This test case is specifically written to automate Bug "#1401304 : + discovery fixed policy breaks if service stays down for extended period" + Discovery has fixed policy for service assignment in which services + are assigned to consumers in a fixed, static or constant manner. + For example if there are "n" publishers of a service and there are + "m" consumers that are interested in "k" instances (say 2) of service, + then all "m" consumers will get service instances. + This is akin to priority order. + If an instance, say "ni" such that 0 <= i <= k went down for an + extended period (> 15 seconds) and comes back up, it should no longer + be assigned to a new consumer because it should go to the bottom of + the prioritized list. + It should not retain its position. + Steps: + 1. Set the policy of publisher named TEST_PUB as fixed in + contrail-discovery.conf file. + 2. Create 3 different synthetic Publisher request of Publisher named + TEST_PUB. + 3. Create 3 different synthetic Subscribe request asking for 2 instances + of TEST_PUB each. Verify that policy as fixed works as expected. + 4. Now make one of the publisher which was used by subscribers as + down for more than extended period. + 5. Again send 3 different synthetic requests asking for 2 instances + each and verify that the publisher which was made down is not + assigned to the clients as it's priority got reduced in the earlier step. + ''' + result = True + ds_ip = self.inputs.cfgm_ip + try: + self.logger.debug("#### Changing min and max TTL values for testing purpose ##") + assert self.ds_obj.modify_discovery_conf_file_params(operation='change_min_max_ttl',\ + ttl_min=30, ttl_max=30) + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("#### Making policy as *fixed* for test publisher ##") + assert self.ds_obj.modify_discovery_conf_file_params( 'set_policy',\ + publisher_type="TEST_PUB",policy='fixed') + self.logger.info("#### Sending 3 synthetic publish requests of same Publisher type ###") + def publish_request(): + for i in range(0,100): + response_1 = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="TEST_PUB",ip="1.1.1.1",port="123") + response_2 = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="TEST_PUB",ip="2.2.2.2",port="123") + response_3 = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="TEST_PUB",ip="3.3.3.3",port="123") + sleep(5) + obj_1 = Process(target=publish_request) + obj_1.start() + sleep(1) + if self.ds_obj.get_service_status(ds_ip,\ + service_tuple=("1.1.1.1","TEST_PUB"),expected_status= 'up') \ + and self.ds_obj.get_service_status(ds_ip,\ + service_tuple=("2.2.2.2","TEST_PUB"),expected_status= 'up') \ + and self.ds_obj.get_service_status(ds_ip,\ + service_tuple=("3.3.3.3","TEST_PUB"),expected_status= 'up'): + self.logger.info("#### All publishers have registered to discovery server successfully.###") + else: + self.logger.error("\n#### Either or all Publishers have not registered to discovery server.\n \ + No sense of proceeding the test case. Exiting. ###") + self.ds_obj.modify_discovery_conf_file_params( 'del_policy',\ + publisher_type="TEST_PUB") + obj_1.terminate() + result = False + assert result + self.logger.info("\n#### Sending 3 synthetic subscribe requests with instance value 2\n \ + to subscribe to Publisher *TEST_PUB* ###") + self.ds_obj.subscribe_service_from_discovery(ds_ip,service="TEST_PUB",\ + instances="2",client_id="1.1.1.1:TestClient",\ + remote_addr= "1.1.1.1", client_type= "TestClient") + self.ds_obj.subscribe_service_from_discovery(ds_ip,service="TEST_PUB",\ + instances="2",client_id="2.2.2.2:TestClient",\ + remote_addr= "2.2.2.2", client_type= "TestClient") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="TEST_PUB",\ + instances="2",client_id="3.3.3.3:TestClient",\ + remote_addr= "3.3.3.3", client_type= "TestClient") + self.logger.debug("#### Verifying the in use count of publishers are subscribe request ###") + p1_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("1.1.1.1","TEST_PUB")) + p2_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("2.2.2.2","TEST_PUB")) + p3_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("3.3.3.3","TEST_PUB")) + publisher_in_use_list=[p1_in_use_count,p2_in_use_count,p3_in_use_count] + if sum(publisher_in_use_list) == 6 and 0 in publisher_in_use_list: + self.logger.info("\n#### Clients subscribed successfully to publishers\n \ + and policy as *fixed* working as expected ##") + else: + self.logger.error("#### Subscription not as expected. The in use list looks like %s ##"\ + % publisher_in_use_list) + result = False + self.logger.debug("\n#### Stopping one of the in use Publisher for extended period\n \ + (> 15 seconds) to decrease it's priority ##") + obj_1.terminate() + index_first_pub_used = publisher_in_use_list.index(3) + def new_publish_request(): + for i in range(0,100): + if index_first_pub_used == 0: + response_2 = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="TEST_PUB",ip="2.2.2.2", port ="123") + response_3 = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="TEST_PUB",ip="3.3.3.3", port ="123") + elif index_first_pub_used == 1: + response_1 = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="TEST_PUB",ip="1.1.1.1", port ="123") + response_3 = self.ds_obj.publish_service_to_discovery(ds_ip,\ + service="TEST_PUB",ip="3.3.3.3", port ="123") + sleep(5) + new_obj=Process(target =new_publish_request) + new_obj.start() + self.logger.debug("#### Waiting for 60 seconds so that all subscriptions are lost ##") + sleep(60) + self.logger.debug("\n#### Again starting the stopped publishers\n \ + and hoping that its priority has been reduced and it will not be used by the clients any more##") + new_obj.terminate() + obj_2 = Process(target=publish_request) + obj_2.start() + sleep(1) + self.logger.info("\n#### Again sending 3 synthetic subscribe requests\n \ + with instance value 2 to subscribe to Publisher *TEST_PUB* ###") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="TEST_PUB",\ + instances="2",client_id="1.1.1.1:TestClient",\ + remote_addr= "1.1.1.1",client_type= "TestClient") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="TEST_PUB",\ + instances="2",client_id="2.2.2.2:TestClient",\ + remote_addr= "2.2.2.2",client_type= "TestClient") + self.ds_obj.subscribe_service_from_discovery(ds_ip, service="TEST_PUB",\ + instances="2",client_id="3.3.3.3:TestClient",\ + remote_addr= "3.3.3.3",client_type= "TestClient") + self.logger.debug("#### Verifying the in use count of publishers are subscribe request ###") + p1_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("1.1.1.1","TEST_PUB")) + p2_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("2.2.2.2","TEST_PUB")) + p3_in_use_count = self.ds_obj.get_service_in_use(ds_ip,("3.3.3.3","TEST_PUB")) + publisher_in_use_list=[p1_in_use_count,p2_in_use_count,p3_in_use_count] + if sum(publisher_in_use_list) == 6 and publisher_in_use_list.index(index_first_pub_used) == 0: + self.logger.info("\n#### Clients subscribed successfully to publishers\n \ + and policy as *fixed* working as expected ##") + self.logger.info("\n#### Clients not subscribed to publisher \n \ + which went down for time more than extended period as it's priority has been decreased ##") + else: + self.logger.error("#### Subscription not as expected. The in use list looks like %s ##"\ + % publisher_in_use_list) + self.logger.error("\n#### Clients might have subscribed to publisher which went down.\n \ + This means priority of that publisher was not decreased ##") + result = False + obj_2.terminate() + self.logger.info("#### Deleting the policy configurations from .conf file ##") + assert self.ds_obj.modify_discovery_conf_file_params( 'del_policy',\ + publisher_type="TEST_PUB") + self.logger.debug("#### Waiting for dummy Publish and subscribe requests to expire ##") + sleep(30) + self.ds_obj.cleanup_service_from_discovery(ds_ip) + except Exception as e: + self.logger.error(e) + result = False + assert result, "Test case failed due to some error. Please refer to logs" + + @preposttest_wrapper + def test_rule_do_not_affect_other_dns_subscriptions(self): + ''' + This test case is specifically written to automate Bug + "#1548638 : [Discovery-Rel3.0-Centos-1]: All clients re-subscribe + to a different publisher when a rule is added which was supposed + to affect only 1 subscriber (No Auto load balance) " + Steps: + 1. Search for the DNS-Server to which vrouter agents are subscribed to. + 2. Create a rule entry for nay one of the vrouter agent and Publisher. + 3. Again search for DNS-Server to which vrouter agent is subscribed to + and match it to values before creating rule. + Precondition: Assumption is that setup is having a vrouter connected + to 2 instances of DNS servers running in different subnets + Also, setup requirement of this test case is to have at least + 2 publishers and 2 subscribers. + Both set of publisher and subscriber should be in different network. + ''' + self.ds_obj.skip_discovery_test("dns-server", min_instances=3, different_subnet_flag=False ) + result = True + ds_ip = self.inputs.cfgm_ip + assert self.ds_obj.resubscribe_with_new_ttl( 30, 30, 'contrail-vrouter-agent') + self.addCleanup(self.ds_obj.modify_discovery_conf_file_params,"change_min_max_ttl") + self.logger.info("# Finding the subscriptions of all vrouter-agents to DNS-server before creating a rule# ") + all_vrouter_pub_IPs_bfr_rule = [] + for i in range(0,len(self.inputs.compute_control_ips)): + client_subscribed_service_id = self.ds_obj.get_subscribed_service_id\ + (ds_ip, client=(self.inputs.compute_control_ips[i],\ + "contrail-vrouter-agent:0"),service="dns-server") + instances_allocated = len(client_subscribed_service_id) + service_IPs = [] + for k in range (0,instances_allocated): + service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_service_id[k]) + service_IPs.append(service_endpoint[0][0]) + self.logger.debug("Contrail-vrouter-agent running on %s is subscribed to DNS-server running at %s" \ + % (self.inputs.compute_control_ips[i],service_IPs)) + all_vrouter_pub_IPs_bfr_rule.append(service_IPs) + self.logger.info("## Creating a rule for 1 of the vrouter-agent subscriber") + self.ds_obj.add_and_verify_rule(self.inputs.bgp_control_ips[0], 'dns-server',\ + self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0') + self.logger.info("#### Waiting for 30 seconds so that TTL expiry for all subscriptions to happens###") + sleep (30) + self.logger.info("# Finding the subscriptions of all vrouter-agents to DNS-server after creating a rule# ") + all_vrouter_pub_IPs_aftr_rule = [] + for i in range(0,len(self.inputs.compute_control_ips)): + client_subscribed_service_id=self.ds_obj.get_subscribed_service_id(ds_ip,\ + client=(self.inputs.compute_control_ips[i],\ + "contrail-vrouter-agent:0"),service="dns-server") + instances_allocated = len(client_subscribed_service_id) + service_IPs = [] + for k in range (0,instances_allocated): + service_endpoint = self.ds_obj.get_service_endpoint_by_service_id\ + (ds_ip,client_subscribed_service_id[k]) + service_IPs.append(service_endpoint[0][0]) + self.logger.debug("Contrail-vrouter-agent running on %s is subscribed to DNS-server running at %s" \ + % (self.inputs.compute_control_ips[i],service_IPs)) + all_vrouter_pub_IPs_aftr_rule.append(service_IPs) + if all_vrouter_pub_IPs_aftr_rule[0][0] == self.inputs.bgp_control_ips[0] \ + and len(all_vrouter_pub_IPs_aftr_rule[0]) == 1: + self.logger.debug("The rule has worked properly") + for i in range(1,len(all_vrouter_pub_IPs_aftr_rule)): + if all_vrouter_pub_IPs_aftr_rule[i] == all_vrouter_pub_IPs_bfr_rule[i]: + self.logger.debug("No change has happened in other subscriptions due to rule.") + else: + result = False + self.logger.error("\n The publisher assigned to contrail-vrouter\n \ + running on %s were %s and has changed to %s"\ + % (self.inputs.compute_control_ips[i],\ + all_vrouter_pub_IPs_bfr_rule[i],all_vrouter_pub_IPs_aftr_rule[i])) + else: + self.logger.error("Rule has not worked as expected") + self.logger.debug("Subscriber %s has subscribed to %s Publisher instead of subscribing only to %s"\ + % (self.inputs.compute_control_ips[i],\ + all_vrouter_pub_IPs_aftr_rule[0],self.inputs.bgp_control_ips[0]) ) + result = False + self.logger.info("# Deleting the rule after the test is complete # ") + self.ds_obj.delete_and_verify_rule(self.inputs.bgp_control_ips[0], 'dns-server',\ + self.inputs.compute_control_ips[0], 'contrail-vrouter-agent:0') + assert result, "Test case failed due to some error. Please refer to logs" # end TestDiscoveryFixture diff --git a/serial_scripts/dynamic_vgw/__init__.py b/serial_scripts/dynamic_vgw/__init__.py new file mode 100644 index 000000000..9a38c362d --- /dev/null +++ b/serial_scripts/dynamic_vgw/__init__.py @@ -0,0 +1 @@ +"""Dynamic VGW tests.""" diff --git a/serial_scripts/dynamic_vgw/base.py b/serial_scripts/dynamic_vgw/base.py new file mode 100644 index 000000000..6114e8057 --- /dev/null +++ b/serial_scripts/dynamic_vgw/base.py @@ -0,0 +1,77 @@ +import test +from common.connections import ContrailConnections +from common import isolated_creds +from project_test import * +from vn_test import * +from vm_test import * + + +class BaseVgwTest(test.BaseTestCase): + + @classmethod + def setUpClass(cls): + super(BaseVgwTest, cls).setUpClass() + cls.connections = ContrailConnections( + cls.inputs, + project_name=cls.inputs.project_name, + username=cls.inputs.stack_user, + password=cls.inputs.stack_password, + logger=cls.logger) + cls.quantum_h = cls.connections.quantum_h + cls.nova_h = cls.connections.nova_h + cls.vnc_lib = cls.connections.vnc_lib + cls.agent_inspect = cls.connections.agent_inspect + cls.cn_inspect = cls.connections.cn_inspect + cls.analytics_obj = cls.connections.analytics_obj + cls.setup_common_objects() + # end setUpClass + + @classmethod + def tearDownClass(cls): + for vn in cls.vn_fixture_dict: + vn.verify_is_run = False + vn.cleanUp() + super(BaseVgwTest, cls).tearDownClass() + # end tearDownClass + + @classmethod + def setup_common_objects(cls): + + cls.project_fixture = ProjectFixture( + project_name=cls.inputs.project_name, + connections=cls.connections) + cls.project_fixture.setUp() + cls.logger.info( + 'Default SG to be edited for allow all on project: %s' % + cls.inputs.project_name) + cls.project_fixture.set_sec_group_for_allow_all( + cls.inputs.project_name, 'default') + + # Formin the VGW VN dict for further test use + cls.vgw_vn_list = {} + cls.vn_fixture_dict = [] + if cls.inputs.vgw_data != []: + for key in cls.inputs.vgw_data[0]: + for vgw in cls.inputs.vgw_data[0][key]: + cls.vgw_vn_list[cls.inputs.vgw_data[0][key][vgw]['vn']] = {} + cls.vgw_vn_list[cls.inputs.vgw_data[0][key][vgw]['vn']][ + 'subnet'] = cls.inputs.vgw_data[0][key][vgw]['ipam-subnets'] + cls.vgw_vn_list[cls.inputs.vgw_data[0] + [key][vgw]['vn']]['host'] = key + if 'gateway-routes' in cls.inputs.vgw_data[0][key][vgw]: + cls.vgw_vn_list[cls.inputs.vgw_data[0][key][vgw]['vn']][ + 'route'] = cls.inputs.vgw_data[0][key][vgw]['gateway-routes'] + + # Creating VN + cls.vn_fixture_dict = [] + for key in cls.vgw_vn_list: + vn = VNFixture( + project_name=cls.inputs.project_name, + connections=cls.connections, + inputs=cls.inputs, + vn_name=key.split(":")[3], + subnets=cls.vgw_vn_list[key]['subnet']) + cls.vn_fixture_dict.append(vn) + vn.setUp() + + # end setup_common_objects diff --git a/serial_scripts/dynamic_vgw/test_dynamic_vgw.py b/serial_scripts/dynamic_vgw/test_dynamic_vgw.py new file mode 100755 index 000000000..824627174 --- /dev/null +++ b/serial_scripts/dynamic_vgw/test_dynamic_vgw.py @@ -0,0 +1,34 @@ +# Need to import path to test/fixtures and test/scripts/ +# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/' +# +# To run tests, you can do 'python -m testtools.run tests'. To run specific tests, +# You can do 'python -m testtools.run -l tests' +# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD +# +from tcutils.wrappers import preposttest_wrapper +from dynamic_vgw import base +from dynamic_vgw.verify import VerifyDynamicVgwCases + +class TestDynamicVgwCases(base.BaseVgwTest, VerifyDynamicVgwCases): + + @classmethod + def setUpClass(cls): + super(TestDynamicVgwCases, cls).setUpClass() + + def runTest(self): + pass + # end runTest + + @preposttest_wrapper + def test_dynamic_vgw_compute_ping(self): + ''' + Test to validate dynamic VGW creation and communication from overlay VM to compute IP + 1: Create VGW interface dynamicaly + 2. Create corresponding vn and launch VM + 3. Ping from VM to the compute where VGW is created + 4. Delete VGW interface + + Pass criteria: Step 3 should pass + Maintainer: chhandak@juniper.net + ''' + return self.verify_dynamic_vgw_compute_ping() diff --git a/serial_scripts/dynamic_vgw/verify.py b/serial_scripts/dynamic_vgw/verify.py new file mode 100644 index 000000000..fcf4d4cf9 --- /dev/null +++ b/serial_scripts/dynamic_vgw/verify.py @@ -0,0 +1,85 @@ +from time import sleep +import os +from vn_test import * +from vm_test import * +from floating_ip import * +from tcutils.util import get_random_name + +class VerifyDynamicVgwCases(): + + def verify_dynamic_vgw_compute_ping(self): + + result = True + host_list = [] + vgw_compute = None + vm_compute = None + vgw_intf = 'vgw1' + vgw_subnets = ['11.1.1.0/24'] + route = '0.0.0.0/0' + vgw_fq_name= 'default-domain:admin:vgwvn:vgwvn' + vm1_name= "vgw_vm" + host_list = self.connections.nova_h.get_hosts() + if len(host_list) > 1: + vm_compute = self.inputs.host_data[host_list[0]] + vgw_compute = self.inputs.host_data[host_list[1]] + else: + vm_compute = self.inputs.host_data[host_list[0]] + vgw_compute = self.inputs.host_data[host_list[0]] + + + # Configure VGW + self.logger.info("Creating VGW interface %s dynamically on %s" %(vgw_intf, vgw_compute['name'])) + self.logger.info("Configuring VGW on the Compute %s", (vgw_compute['ip'])) + cmd1 = "export PYTHONPATH=/usr/share/pyshared/contrail_vrouter_api/gen_py/instance_service" + + vgw_args = "--oper create --interface %s --subnets %s --vrf %s --routes %s" \ + %(vgw_intf,vgw_subnets[0],vgw_fq_name, route) + cmd2="python /usr/share/contrail-utils/provision_vgw_interface.py %s" %(vgw_args) + cmd= cmd1 + ";" + cmd2 + output = self.inputs.run_cmd_on_server(vgw_compute['ip'], cmd, + vgw_compute['username'], + vgw_compute['password']) + # Creating Virtual network with VGW FQ name + vn_fixture = self.useFixture( + VNFixture( + project_name=vgw_fq_name.split(":")[1], + connections=self.connections, + inputs=self.inputs, + vn_name=vgw_fq_name.split(":")[2], + subnets=vgw_subnets)) + # Verification of VN + assert vn_fixture.verify_on_setup() + + # Creation of VM and validation + vm1_fixture = self.useFixture( + VMFixture( + project_name=vgw_fq_name.split(":")[1], + connections=self.connections, + vn_obj=vn_fixture.obj, + vm_name=vm1_name, + node_name=vm_compute['name'])) + + # Verification on VM + assert vm1_fixture.verify_on_setup() + + self.logger.info("Now trying to ping underlay compute ip %s from VM %s" %(vgw_compute['ip'],vm1_name)) + if not vm1_fixture.ping_with_certainty(vgw_compute['ip']): + result = result and False + + # Delete VGW + self.logger.info("Deleting VGW interface %s on %s" %(vgw_intf, vgw_compute['name'])) + vgw_args = "--oper delete --interface %s --subnets %s --vrf %s --routes %s" \ + %(vgw_intf,vgw_subnets[0],vgw_fq_name, route) + cmd3="python /usr/share/contrail-utils/provision_vgw_interface.py %s" %(vgw_args) + cmd= cmd1 + ";" + cmd3 + output = self.inputs.run_cmd_on_server(vgw_compute['ip'], cmd, + vgw_compute['username'], + vgw_compute['password']) + + if not result: + self.logger.error( + 'Test ping to underlay compute ip from VM %s failed' % (vm1_name)) + + assert result + + return True diff --git a/serial_scripts/ecmp/base.py b/serial_scripts/ecmp/base.py index 7f58d8f50..316a65608 100644 --- a/serial_scripts/ecmp/base.py +++ b/serial_scripts/ecmp/base.py @@ -1,25 +1,15 @@ -import test +import test_v1 from common.connections import ContrailConnections from common import isolated_creds -class BaseECMPRestartTest(test.BaseTestCase): +class BaseECMPRestartTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseECMPRestartTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib -# cls.logger= cls.inputs.logger cls.agent_inspect= cls.connections.agent_inspect cls.cn_inspect= cls.connections.cn_inspect cls.analytics_obj=cls.connections.analytics_obj @@ -27,8 +17,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseECMPRestartTest, cls).tearDownClass() #end tearDownClass @@ -39,5 +27,60 @@ def remove_from_cleanups(self, fix): #break #end remove_from_cleanups + def update_hash_on_network(self, ecmp_hash, vn_fixture): + vn_config = self.vnc_lib.virtual_network_read(id = vn_fixture.uuid) + vn_config.set_ecmp_hashing_include_fields(ecmp_hash) + self.vnc_lib.virtual_network_update(vn_config) + def update_hash_on_port(self, ecmp_hash, vm_fixture): + key, vm_uuid = vm_fixture.get_vmi_ids().popitem() + vm_config = self.vnc_lib.virtual_machine_interface_read(id = str(vm_uuid)) + vm_config.set_ecmp_hashing_include_fields(ecmp_hash) + self.vnc_lib.virtual_machine_interface_update(vm_config) + + def config_all_hash(self, ecmp_hashing_include_fields): + + global_vrouter_id = self.vnc_lib.get_default_global_vrouter_config_id() + global_config = self.vnc_lib.global_vrouter_config_read(id = global_vrouter_id) + global_config.set_ecmp_hashing_include_fields(ecmp_hashing_include_fields) + self.vnc_lib.global_vrouter_config_update(global_config) + + def verify_if_hash_changed(self, vn1_fixture, vm1_fixture, vm2_fixture, ecmp_hashing_include_fields): + (domain, project, vn) = vn1_fixture.vn_fq_name.split(':') + inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip] + agent_vrf_objs = inspect_h.get_vna_vrf_objs(domain, project, vn) + agent_vrf_obj = vm1_fixture.get_matching_vrf( + agent_vrf_objs['vrf_list'], vn1_fixture.vrf_name) + vn_vrf_id = agent_vrf_obj['ucindex'] + next_hops = inspect_h.get_vna_active_route( + vrf_id=vn_vrf_id, ip=vm2_fixture.vm_ip, prefix='32')['path_list'][0]['nh']['mc_list'] + + if not next_hops: + result = False + assert result, 'Route not found in the Agent %s' % vm2_fixture.vm_node_ip + else: + self.logger.info('Route found in the Agent %s' % vm2_fixture.vm_node_ip) + next_hop_values = [] + for each_nh in next_hops: + next_hop_values.append(each_nh['dip']) + + ecmp_field = inspect_h.get_vna_active_route( + vrf_id=vn_vrf_id, ip=vm2_fixture.vm_ip, prefix='32')['path_list'][0]['ecmp_hashing_fields'] + if not(ecmp_field == ecmp_hashing_include_fields): + return False + ecmp_keys = ecmp_hashing_include_fields.split(',') + ri_fq_name = [self.inputs.domain_name, self.inputs.project_name, vn1_fixture.vn_name, vn1_fixture.vn_name] + ri_obj = self.vnc_lib.routing_instance_read(fq_name=ri_fq_name) + for node in self.inputs.bgp_ips: + route_entry = self.cn_inspect[node].get_cn_route_table_entry( + ri_name=ri_fq_name, prefix=vm2_fixture.vm_ip) + if route_entry: + for each_route_entry in route_entry: + if each_route_entry['protocol'] == 'ServiceChain': + if not(each_route_entry['next_hop'] in next_hop_values): + return False + for ecmp_key in ecmp_keys: + if ecmp_key: + if not('field-hash' in each_route_entry['load_balance'].values()[0]['decision_type'] and ecmp_key in each_route_entry['load_balance'].values()[0]['fields']): + return False diff --git a/serial_scripts/ecmp/test_ecmp.py b/serial_scripts/ecmp/test_ecmp.py index 973247f33..629e6dd22 100644 --- a/serial_scripts/ecmp/test_ecmp.py +++ b/serial_scripts/ecmp/test_ecmp.py @@ -51,7 +51,7 @@ def test_ecmp_svc_in_network_nat_scale_max_instances(self): if len(self.inputs.compute_ips) > 1: for i in range(4, 17, 4): self.logger.info( - '***** Will launch %s instances in the Service Chain *****' % i) + '%%%%%%%%%% Will launch %s instances in the Service Chain %%%%%%%%%%' % i) self.verify_svc_in_network_datapath( si_count=1, svc_scaling=True, max_inst=i, svc_mode='in-network-nat') svm_ids = self.si_fixtures[0].svm_ids diff --git a/serial_scripts/ecmp/test_ecmp_hash.py b/serial_scripts/ecmp/test_ecmp_hash.py new file mode 100644 index 000000000..b6b47effc --- /dev/null +++ b/serial_scripts/ecmp/test_ecmp_hash.py @@ -0,0 +1,466 @@ +import sys +import os +import fixtures +import testtools +import unittest +import time +from vn_test import * +from floating_ip import * +from quantum_test import * +from vnc_api_test import * +from vnc_api import vnc_api as my_vnc_api +from nova_test import * +from vm_test import * +from tcutils.wrappers import preposttest_wrapper +from tcutils.commands import ssh, execute_cmd, execute_cmd_out +from common.servicechain.firewall.verify import VerifySvcFirewall +from common.ecmp.ecmp_traffic import ECMPTraffic +from common.ecmp.ecmp_verify import ECMPVerify +sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) +from traffic.core.stream import Stream +from traffic.core.profile import create, ContinuousProfile, ContinuousSportRange +from traffic.core.helpers import Host +from traffic.core.helpers import Sender, Receiver +from fabric.state import connections as fab_connections +from common.ecmp.ecmp_test_resource import ECMPSolnSetup +from base import BaseECMPRestartTest +from common import isolated_creds +import inspect +import test +from tcutils.contrail_status_check import * +from tcutils.tcpdump_utils import * +from tcutils.commands import * +import re + +class TestECMPHash(BaseECMPRestartTest, VerifySvcFirewall, ECMPSolnSetup, ECMPTraffic, ECMPVerify): + + @classmethod + def setUpClass(cls): + super(TestECMPHash, cls).setUpClass() + + def runTest(self): + pass + #end runTest + + def is_test_applicable(self): + if len(self.inputs.compute_ips) < 1: + return (False, 'Scaling test. Will run only on multiple node setup') + return (True, None) + + def setUp(self): + super(TestECMPHash, self).setUp() + result = self.is_test_applicable() + if result[0]: + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.config_all_hash(ecmp_hashing_include_fields) + else: + return + + @preposttest_wrapper + def test_ecmp_hash_svc_transparent(self): + + self.verify_svc_transparent_datapath( + si_count=1, svc_scaling=True, max_inst=2, svc_img_name='tiny_trans_fw', ci=True) + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.config_all_hash(ecmp_hashing_include_fields) + self.update_hash_on_network(ecmp_hash = ecmp_hashing_include_fields, vn_fixture = self.vn1_fixture) + self.update_hash_on_port(ecmp_hash = ecmp_hashing_include_fields, vm_fixture = self.vm1_fixture) + self.verify_traffic_flow(self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + self.addCleanup(self.config_all_hash(ecmp_hashing_include_fields)) + return True + + @preposttest_wrapper + def test_ecmp_hash_svc_in_network(self): + """ + Description: Validate ECMP Hash with service chaining in-network mode + Test steps: + 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2. Creating a service instance in in-network mode with 4 instances and + left-interface of the service instances sharing the IP. + 3. Creating a service chain by applying the service instance as a service in a policy between the VNs. + 4. Checking for ping and tcp traffic between vm1 and vm2. + 5. Delete the Service Instances and Service Template. + 6. This testcase will be run in only multiple compute node scenario. + Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1. + """ + + self.verify_svc_in_network_datapath( + si_count=1, svc_scaling=True, max_inst=3) + svm_ids = self.si_fixtures[0].svm_ids + self.get_rt_info_tap_intf_list( + self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) + dst_vm_list = [self.vm2_fixture] + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.config_all_hash(ecmp_hashing_include_fields) + self.update_hash_on_network(ecmp_hash = ecmp_hashing_include_fields, vn_fixture = self.vn1_fixture) + self.update_hash_on_port(ecmp_hash = ecmp_hashing_include_fields, vm_fixture = self.vm1_fixture) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + self.addCleanup(self.config_all_hash(ecmp_hashing_include_fields)) + return True + # end test_ecmp_svc_in_network + + @preposttest_wrapper + def test_ecmp_hash_svc_in_network_nat(self): + """ + Description: Validate ECMP Hash with service chaining in-network-nat mode + Test steps: + 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2. Creating a service instance in in-network-nat mode with 4 instances and + left-interface of the service instances sharing the IP. + 3. Creating a service chain by applying the service instance as a service in a policy between the VNs. + 4. Checking for ping and tcp traffic between vm1 and vm2. + 5. Delete the Service Instances and Service Template. + 6. This testcase will be run in only multiple compute node scenario. + Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1. + """ + + self.verify_svc_in_network_datapath( + si_count=1, svc_scaling=True, max_inst=2, svc_mode='in-network-nat', svc_img_name='tiny_nat_fw', ci=True) + + svm_ids = self.si_fixtures[0].svm_ids + self.get_rt_info_tap_intf_list( + self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) + dst_vm_list = [self.vm2_fixture] + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.config_all_hash(ecmp_hashing_include_fields) + self.update_hash_on_network(ecmp_hash = ecmp_hashing_include_fields, vn_fixture = self.vn1_fixture) + self.update_hash_on_port(ecmp_hash = ecmp_hashing_include_fields, vm_fixture = self.vm1_fixture) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + self.addCleanup(self.config_all_hash(ecmp_hashing_include_fields)) + return True + # end test_ecmp_svc_in_network_nat + + @preposttest_wrapper + def test_ecmp_hash_svc_precedence(self): + """ + Description: Validate ECMP Hash with service chaining in-network mode + Test steps: + 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2. Creating a service instance in in-network mode with 4 instances and + left-interface of the service instances sharing the IP. + 3. Creating a service chain by applying the service instance as a service in a policy between the VNs. + 4. Checking for ping and tcp traffic between vm1 and vm2. + 5. Delete the Service Instances and Service Template. + 6. This testcase will be run in only multiple compute node scenario. + Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1. + """ + + self.verify_svc_in_network_datapath( + si_count=1, svc_scaling=True, max_inst=3) + svm_ids = self.si_fixtures[0].svm_ids + self.get_rt_info_tap_intf_list( + self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) + dst_vm_list = [self.vm2_fixture] + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True} + self.config_all_hash(ecmp_hashing_include_fields) + ecmp_hashing_include_fields = 'l3-source-address,l3-destination-address,l4-protocol,l4-destination-port,' + self.verify_if_hash_changed(self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, ecmp_hashing_include_fields) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True} + self.update_hash_on_network(ecmp_hash = ecmp_hashing_include_fields, vn_fixture = self.vn2_fixture) + ecmp_hashing_include_fields = 'l3-destination-address,l4-protocol,l4-destination-port,' + self.verify_if_hash_changed(self.vn1_fixture,self.vm1_fixture, self.vm2_fixture, ecmp_hashing_include_fields) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True} + self.update_hash_on_port(ecmp_hash = ecmp_hashing_include_fields, vm_fixture = self.vm2_fixture) + ecmp_hashing_include_fields = 'l3-destination-address,l4-destination-port,' + self.verify_if_hash_changed(self.vn1_fixture, self.vm1_fixture, self.vm2_fixture,ecmp_hashing_include_fields) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + key, vm_uuid = self.vm1_fixture.get_vmi_ids().popitem() + vm_uuid = str(vm_uuid) + add_static_route_cmd = 'python provision_static_route.py --prefix ' + self.vm2_fixture.vm_ip + '/32' + ' --virtual_machine_interface_id ' + vm_uuid + \ + ' --tenant_name ' + self.inputs.project_name + ' --api_server_ip 127.0.0.1 --api_server_port 8082 --oper add --route_table_name my_route_table' + \ + ' --user ' + "admin" + ' --password ' + "contrail123" + with settings( + host_string='%s@%s' % ( + self.inputs.username, self.inputs.cfgm_ips[0]), + password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): + + status = run('cd /opt/contrail/utils;' + add_static_route_cmd) + self.logger.debug("%s" % status) + m = re.search(r'Creating Route table', status) + assert m, 'Failed in Creating Route table' + + (domain, project, vn) = self.vn1_fixture.vn_fq_name.split(':') + inspect_h = self.agent_inspect[self.vm1_fixture.vm_node_ip] + agent_vrf_objs = inspect_h.get_vna_vrf_objs(domain, project, vn) + agent_vrf_obj = self.vm1_fixture.get_matching_vrf( + agent_vrf_objs['vrf_list'], self.vn1_fixture.vrf_name) + vn_vrf_id = agent_vrf_obj['ucindex'] + next_hops = inspect_h.get_vna_active_route( + vrf_id=vn_vrf_id, ip=self.vm2_fixture.vm_ip, prefix='32')['path_list'][0]['nh']['mc_list'] + + if not next_hops: + result = False + assert result, 'Route not found in the Agent %s' % vm2_fixture.vm_node_ip + else: + self.logger.info('Route found in the Agent %s' % vm2_fixture.vm_node_ip) + + if (len(next_hops) != 3): + result = False + assert result, 'Agent does not reflect the static route addition' + else: + self.logger.info('Agent reflects the static route addition') + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.addCleanup(self.config_all_hash(ecmp_hashing_include_fields)) + return True + + # end test_ecmp_svc_precedence + + @preposttest_wrapper + def test_static_table(self): + + self.verify_svc_in_network_datapath( + si_count=1, svc_scaling=True, max_inst=3) + svm_ids = self.si_fixtures[0].svm_ids + self.get_rt_info_tap_intf_list( + self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) + dst_vm_list = [self.vm2_fixture] + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True} + self.config_all_hash(ecmp_hashing_include_fields) + ecmp_hashing_include_fields = 'l3-source-address,l3-destination-address,l4-protocol,l4-destination-port,' + self.verify_if_hash_changed(self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, ecmp_hashing_include_fields) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True} + self.update_hash_on_network(ecmp_hash = ecmp_hashing_include_fields, vn_fixture = self.vn2_fixture) + ecmp_hashing_include_fields = 'l3-destination-address,l4-protocol,l4-destination-port,' + self.verify_if_hash_changed(self.vn1_fixture,self.vm1_fixture, self.vm2_fixture, ecmp_hashing_include_fields) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True} + self.update_hash_on_port(ecmp_hash = ecmp_hashing_include_fields, vm_fixture = self.vm2_fixture) + ecmp_hashing_include_fields = 'l3-destination-address,l4-destination-port,' + self.verify_if_hash_changed(self.vn1_fixture, self.vm1_fixture, self.vm2_fixture,ecmp_hashing_include_fields) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + key, vm_uuid = self.vm1_fixture.get_vmi_ids().popitem() + vm_uuid = str(vm_uuid) + add_static_route_cmd = 'python provision_static_route.py --prefix ' + self.vm2_fixture.vm_ip + '/32' + ' --virtual_machine_interface_id ' + vm_uuid + \ + ' --tenant_name ' + self.inputs.project_name + ' --api_server_ip 127.0.0.1 --api_server_port 8082 --oper add --route_table_name my_route_table' + \ + ' --user ' + "admin" + ' --password ' + "contrail123" + with settings( + host_string='%s@%s' % ( + self.inputs.username, self.inputs.cfgm_ips[0]), + password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): + + status = run('cd /opt/contrail/utils;' + add_static_route_cmd) + self.logger.debug("%s" % status) + m = re.search(r'Creating Route table', status) + assert m, 'Failed in Creating Route table' + + (domain, project, vn) = self.vn1_fixture.vn_fq_name.split(':') + inspect_h = self.agent_inspect[self.vm1_fixture.vm_node_ip] + agent_vrf_objs = inspect_h.get_vna_vrf_objs(domain, project, vn) + agent_vrf_obj = self.vm1_fixture.get_matching_vrf( + agent_vrf_objs['vrf_list'], self.vn1_fixture.vrf_name) + vn_vrf_id = agent_vrf_obj['ucindex'] + next_hops = inspect_h.get_vna_active_route( + vrf_id=vn_vrf_id, ip=self.vm2_fixture.vm_ip, prefix='32')['path_list'][0]['nh']['mc_list'] + if not next_hops: + result = False + assert result, 'Route not found in the Agent %s' % vm2_fixture.vm_node_ip + else: + self.logger.info('Route found in the Agent %s' % vm2_fixture.vm_node_ip) + + if (len(next_hops) != 3): + result = False + assert result, 'Agent does not reflect the static route addition' + else: + self.logger.info('Agent reflects the static route addition') + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, +"ip_protocol": True, "source_ip": True, "source_port": True} + self.addCleanup(self.config_all_hash(ecmp_hashing_include_fields)) + return True + + # end test_static_table + + @preposttest_wrapper + def test_ecmp_hardcode_path(self): + """ + Description: Validate ECMP Hash with service chaining in-network mode + Test steps: + 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2. Creating a service instance in in-network mode with 4 instances and + left-interface of the service instances sharing the IP. + 3. Creating a service chain by applying the service instance as a service in a policy between the VNs. + 4. Checking for ping and tcp traffic between vm1 and vm2. Delete "source-port" hash field in ecmp. + 5. Delete the Service Instances and Service Template. + 6. This testcase will be run in only multiple compute node scenario. + Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1. + """ + + self.verify_svc_transparent_datapath( + si_count=1, svc_scaling=True, max_inst=2, svc_img_name='tiny_trans_fw', ci=True) + + svm_ids = self.si_fixtures[0].svm_ids + dst_vm_list = [self.vm2_fixture] + svms = self.get_svms_in_si(self.si_fixtures[0], self.inputs.project_name) + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.config_all_hash(ecmp_hashing_include_fields) + i = 0 + session = [] + pcap = [] + filters = '-nn' + for svm in svms: + tap_if_of_svm = self.get_bridge_svm_tapintf(svm.name, 'right') + vm_nodeip = self.inputs.host_data[self.nova_h.get_nova_host_of_vm(self.get_svm_obj(svm.name))]['host_ip'] + compute_user = self.inputs.host_data[vm_nodeip]['username'] + compute_password = self.inputs.host_data[vm_nodeip]['password'] + session_item, pcap_item = start_tcpdump_for_intf(vm_nodeip, compute_user, compute_password, tap_if_of_svm, filters=filters) + session.append(session_item) + pcap.append(pcap_item) + i = i + 1 + cmds = ['nslookup %s %s' % (self.vm1_fixture.vm_ip, self.vm1_fixture.vm_ip)] + output = self.vm2_fixture.run_cmd_on_vm(cmds=cmds, as_sudo=True) + i = 0 + sleep(10) + for svm in svms: + cmd = 'tcpdump -r %s' % pcap[i] + cmd_check_nslookup, err = execute_cmd_out(session[i], cmd, self.logger) + send_ns = re.search("IP (.+ > .+): \d\+ PTR" , cmd_check_nslookup) + stop_tcpdump_for_vm_intf(self, session[i], pcap[i]) + if not (send_ns and (self.vm2_fixture.vm_ip in send_ns.group(0)) and (self.vm1_fixture.vm_ip in send_ns.group(0))): + self.logger.error("nslookup packets did not get ecmped across si vms") + else: + self.logger.info("nslookup packets got ecmped across si vms") + i = i + 1 + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True} + self.config_all_hash(ecmp_hashing_include_fields) + i = 0 + sess = [] + pcaps = [] + for svm in svms: + tap_if_of_svm = self.get_bridge_svm_tapintf(svm.name, 'right') + vm_nodeip = self.inputs.host_data[self.nova_h.get_nova_host_of_vm(self.get_svm_obj(svm.name))]['host_ip'] + compute_user = self.inputs.host_data[vm_nodeip]['username'] + compute_password = self.inputs.host_data[vm_nodeip]['password'] + sess_item, pcaps_item = start_tcpdump_for_intf(vm_nodeip, compute_user, compute_password, tap_if_of_svm, filters = filters) + sess.append(sess_item) + pcaps.append(pcaps_item) + i = i + 1 + cmds = ['nslookup %s %s' % (self.vm1_fixture.vm_ip, self.vm1_fixture.vm_ip)] + output = self.vm2_fixture.run_cmd_on_vm(cmds=cmds, as_sudo=True) + i = 0 + sleep(10) + some_have_nslookup = False + atleast_one_has_nslookup = False + for svm in svms: + cmd = 'tcpdump -r %s' % pcaps[i] + cmd_check_nslookup, err = execute_cmd_out(sess[i], cmd, self.logger) + send_ns = re.search("IP (.+ > .+): \d\+ PTR" , cmd_check_nslookup) + if not (send_ns and (self.vm2_fixture.vm_ip in send_ns.group(0)) and (self.vm1_fixture.vm_ip in send_ns.group(0))): + some_have_nslookup = True + else: + atleast_one_has_nslookup = True + stop_tcpdump_for_vm_intf(self, sess[i], pcaps[i]) + i = i + 1 + if not (some_have_nslookup and atleast_one_has_nslookup): + self.logger.error("nslookup packets should be hardcoded to one of the paths as source-port is removed") + + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.addCleanup(self.config_all_hash(ecmp_hashing_include_fields)) + return True + # end test_ecmp_hardcode_path + + + @preposttest_wrapper + def test_ecmp_hash_svc_in_network_restart_vrouter(self): + """ + Description: Validate ECMP Hash with service chaining in-network mode + Test steps: + 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2. Creating a service instance in in-network mode with 4 instances and + left-interface of the service instances sharing the IP. + 3. Creating a service chain by applying the service instance as a service in a policy between the VNs. + 4. Checking for ping and tcp traffic between vm1 and vm2. + 5. Delete the Service Instances and Service Template. + 6. This testcase will be run in only multiple compute node scenario. + Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1. + """ + + self.verify_svc_in_network_datapath( + si_count=1, svc_scaling=True, max_inst=3) + svm_ids = self.si_fixtures[0].svm_ids + self.get_rt_info_tap_intf_list( + self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) + dst_vm_list = [self.vm2_fixture] + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.config_all_hash(ecmp_hashing_include_fields) + self.update_hash_on_network(ecmp_hash = ecmp_hashing_include_fields, vn_fixture = self.vn1_fixture) + self.update_hash_on_port(ecmp_hash = ecmp_hashing_include_fields, vm_fixture = self.vm1_fixture) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + for node in self.inputs.compute_ips: + self.inputs.restart_service('supervisor-vrouter', [node]) + cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable(nodes = [node]) + assert cluster_status, 'Hash of error nodes and services : %s' % (error_nodes) + + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + self.addCleanup(self.config_all_hash(ecmp_hashing_include_fields)) + return True + # end test_ecmp_svc_in_network_restart_vrouter + + @preposttest_wrapper + def test_ecmp_hash_svc_in_network_restart_schema(self): + """ + Description: Validate ECMP Hash with service chaining in-network mode + Test steps: + 1. Creating vm's - vm1 and vm2 in networks vn1 and vn2. + 2. Creating a service instance in in-network mode with 4 instances and + left-interface of the service instances sharing the IP. + 3. Creating a service chain by applying the service instance as a service in a policy between the VNs. + 4. Checking for ping and tcp traffic between vm1 and vm2. + 5. Delete the Service Instances and Service Template. + 6. This testcase will be run in only multiple compute node scenario. + Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2 from vm1. + """ + + self.verify_svc_in_network_datapath( + si_count=1, svc_scaling=True, max_inst=3) + svm_ids = self.si_fixtures[0].svm_ids + self.get_rt_info_tap_intf_list( + self.vn1_fixture, self.vm1_fixture, self.vm2_fixture, svm_ids) + dst_vm_list = [self.vm2_fixture] + + ecmp_hashing_include_fields = {"destination_ip": True, "destination_port": True, "hashing_configured": True, "ip_protocol": True, "source_ip": True, "source_port": True} + self.config_all_hash(ecmp_hashing_include_fields) + self.update_hash_on_network(ecmp_hash = ecmp_hashing_include_fields, vn_fixture = self.vn1_fixture) + self.update_hash_on_port(ecmp_hash = ecmp_hashing_include_fields, vm_fixture = self.vm1_fixture) + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + for node in self.inputs.cfgm_ips: + self.inputs.restart_service('contrail-schema', [node]) + cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable(nodes = [node]) + assert cluster_status, 'Hash of error nodes and services : %s' % (error_nodes) + + self.verify_traffic_flow( + self.vm1_fixture, [self.vm2_fixture], self.si_fixtures[0], self.vn1_fixture) + + self.addCleanup(self.config_all_hash(ecmp_hashing_include_fields)) + return True + # end test_ecmp_svc_in_network_restart_schema + diff --git a/serial_scripts/encap/base.py b/serial_scripts/encap/base.py index d97501487..618cd5323 100644 --- a/serial_scripts/encap/base.py +++ b/serial_scripts/encap/base.py @@ -1,28 +1,16 @@ -import test +import test_v1 import fixtures from common import isolated_creds -class BaseEncapTest(test.BaseTestCase): +class BaseEncapTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseEncapTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib -# cls.logger= cls.inputs.logger cls.agent_inspect = cls.connections.agent_inspect cls.cn_inspect = cls.connections.cn_inspect cls.analytics_obj = cls.connections.analytics_obj @@ -30,7 +18,5 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - # cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseEncapTest, cls).tearDownClass() # end tearDownClass diff --git a/serial_scripts/encap/test_encap.py b/serial_scripts/encap/test_encap.py index c98ef0d3e..af653bf47 100644 --- a/serial_scripts/encap/test_encap.py +++ b/serial_scripts/encap/test_encap.py @@ -81,7 +81,6 @@ def test_encaps_mx_gateway(self): self.project_fixture = self.useFixture( ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) self.logger.info( @@ -233,7 +232,6 @@ def test_apply_policy_fip_on_same_vn_gw_mx(self): self.project_fixture = self.useFixture( ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) self.logger.info( @@ -467,7 +465,6 @@ def test_apply_policy_fip_vn_with_encaps_change_gw_mx(self): self.project_fixture = self.useFixture( ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) self.logger.info( diff --git a/serial_scripts/evpn/base.py b/serial_scripts/evpn/base.py index 93fc05ede..773a196e8 100644 --- a/serial_scripts/evpn/base.py +++ b/serial_scripts/evpn/base.py @@ -1,25 +1,15 @@ -import test +import test_v1 import fixtures from common import isolated_creds -class BaseEvpnTest(test.BaseTestCase): +class BaseEvpnTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseEvpnTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib -# cls.logger= cls.inputs.logger cls.agent_inspect= cls.connections.agent_inspect cls.cn_inspect= cls.connections.cn_inspect cls.analytics_obj=cls.connections.analytics_obj @@ -27,8 +17,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseEvpnTest, cls).tearDownClass() #end tearDownClass diff --git a/serial_scripts/evpn/test_evpn.py b/serial_scripts/evpn/test_evpn.py index f99838a5d..815e9b0ff 100644 --- a/serial_scripts/evpn/test_evpn.py +++ b/serial_scripts/evpn/test_evpn.py @@ -31,11 +31,11 @@ def test_with_gre_encap_dns_disabled_for_l2_vn(self): return self.verify_dns_disabled(encap='gre') @preposttest_wrapper - def test_with_gre_encap_l2_ipv6_multicast_traffic(self): + def test_with_gre_encap_l2_multicast_traffic(self): '''Test l2 multicast with gre encap Maintainer: hkumar@juniper.net ''' - return self.verify_l2_ipv6_multicast_traffic(encap='gre') + return self.verify_l2_multicast_traffic(encap='gre') #@preposttest_wrapper #def test_with_gre_encap_l2l3_ipv6_multicast_traffic(self): @@ -44,16 +44,16 @@ def test_with_gre_encap_l2_ipv6_multicast_traffic(self): # ''' # return self.verify_l2l3_ipv6_multicast_traffic(encap='gre') - #@preposttest_wrapper - #def test_with_gre_encap_change_of_l2_vn_forwarding_mode(self): - # '''Test to verify change of vn forwarding mode from l2 to l2l3 with gre encap - # Maintainer: hkumar@juniper.net - # ''' - # return self.verify_change_of_l2_vn_forwarding_mode(encap='gre') + @preposttest_wrapper + def test_with_gre_encap_change_of_l2_vn_forwarding_mode(self): + '''Test to verify change of vn forwarding mode from l2 to l2l3 with gre encap + Maintainer: hkumar@juniper.net + ''' + return self.verify_change_of_l2_vn_forwarding_mode(encap='gre') @preposttest_wrapper def test_with_gre_encap_change_of_l2l3_vn_forwarding_mode(self): - '''Test to verify change of vn forwarding mode from l2l3 to l2 with gre encap + '''Test to verify change of vn forwarding mode from l2l3 to l2 with gre encap Maintainer: hkumar@juniper.net ''' return self.verify_change_of_l2l3_vn_forwarding_mode(encap='gre') @@ -64,6 +64,12 @@ def test_with_gre_encap_to_verify_l2_vm_file_trf_by_scp(self): Maintainer: hkumar@juniper.net ''' return self.verify_l2_vm_file_trf_by_scp(encap='gre') + + @preposttest_wrapper + def test_with_gre_encap_change_of_l3_vn_forwarding_mode(self): + '''Test to verify change of vn forwarding mode from l2l3 to l3 with gre encap''' + + return self.verify_change_of_l3_vn_forwarding_mode(encap='gre') @preposttest_wrapper def test_with_gre_encap_to_verify_l2_vm_file_trf_by_tftp(self): @@ -72,12 +78,6 @@ def test_with_gre_encap_to_verify_l2_vm_file_trf_by_tftp(self): ''' return self.verify_l2_vm_file_trf_by_tftp(encap='gre') - @preposttest_wrapper - def test_with_gre_encap_ipv6_ping_for_non_ip_communication(self): - '''Test ping to to IPV6 link local address of VM to check non ip traffic communication using GRE (L2 Unicast) - ''' - return self.verify_ipv6_ping_for_non_ip_communication(encap='gre') - #@preposttest_wrapper #def test_with_gre_encap_ipv6_ping_for_configured_ipv6_address(self): # '''Test ping to to configured IPV6 address of VM with encap gre @@ -89,6 +89,12 @@ def test_with_gre_l2_mode(self): '''Test L2 forwarding mode with GRE Encap ''' return self.verify_epvn_l2_mode(encap='gre') + + @preposttest_wrapper + def test_with_gre_arp_resolution(self): + '''Test arp resolution for different forwarding modes with GRE Encap + ''' + return self.verify_l2_only_and_l3_only_arp_resolution(encap='gre') class TestEvpnCasesMplsoUdp(base.BaseEvpnTest, VerifyEvpnCases): @@ -111,11 +117,11 @@ def test_with_udp_encap_dns_disabled_for_l2_vn(self): return self.verify_dns_disabled(encap='udp') @preposttest_wrapper - def test_with_udp_encap_l2_ipv6_multicast_traffic(self): + def test_with_udp_encap_l2_multicast_traffic(self): '''Test l2 multicast with udp encap Maintainer: hkumar@juniper.net ''' - return self.verify_l2_ipv6_multicast_traffic(encap='udp') + return self.verify_l2_multicast_traffic(encap='udp') #@preposttest_wrapper #def test_with_udp_encap_l2l3_ipv6_multicast_traffic(self): @@ -124,12 +130,12 @@ def test_with_udp_encap_l2_ipv6_multicast_traffic(self): # ''' # return self.verify_l2l3_ipv6_multicast_traffic(encap='udp') - #@preposttest_wrapper - #def test_with_udp_encap_change_of_l2_vn_forwarding_mode(self): - # '''Test to verify change of vn forwarding mode from l2 to l2l3 with udp encap - # Maintainer: hkumar@juniper.net - # ''' - # return self.verify_change_of_l2_vn_forwarding_mode(encap='udp') + @preposttest_wrapper + def test_with_udp_encap_change_of_l2_vn_forwarding_mode(self): + '''Test to verify change of vn forwarding mode from l2 to l2l3 with udp encap + Maintainer: hkumar@juniper.net + ''' + return self.verify_change_of_l2_vn_forwarding_mode(encap='udp') @preposttest_wrapper def test_with_udp_encap_change_of_l2l3_vn_forwarding_mode(self): @@ -137,6 +143,12 @@ def test_with_udp_encap_change_of_l2l3_vn_forwarding_mode(self): Maintainer: hkumar@juniper.net ''' return self.verify_change_of_l2l3_vn_forwarding_mode(encap='udp') + + @preposttest_wrapper + def test_with_udp_encap_change_of_l3_vn_forwarding_mode(self): + '''Test to verify change of vn forwarding mode from l2l3 to l3 with udp encap''' + + return self.verify_change_of_l3_vn_forwarding_mode(encap='udp') @preposttest_wrapper def test_with_udp_encap_to_verify_l2_vm_file_trf_by_scp(self): @@ -152,12 +164,6 @@ def test_with_udp_encap_to_verify_l2_vm_file_trf_by_tftp(self): ''' return self.verify_l2_vm_file_trf_by_tftp(encap='udp') - @preposttest_wrapper - def test_with_udp_encap_ipv6_ping_for_non_ip_communication(self): - '''Test ping to to IPV6 link local address of VM to check non ip traffic communication using UDP(L2 Unicast) - ''' - return self.verify_ipv6_ping_for_non_ip_communication(encap='udp') - #@preposttest_wrapper #def test_with_udp_encap_ipv6_ping_for_configured_ipv6_address(self): # '''Test ping to to configured IPV6 address of VM with encap udp @@ -169,6 +175,12 @@ def test_with_udp_l2_mode(self): '''Test L2 forwarding mode with UDP Encap ''' return self.verify_epvn_l2_mode(encap='udp') + + @preposttest_wrapper + def test_with_udp_arp_resolution(self): + '''Test arp resolution for different forwarding modes with UDP Encap + ''' + return self.verify_l2_only_and_l3_only_arp_resolution(encap='udp') class TestEvpnCasesVxlan(base.BaseEvpnTest, VerifyEvpnCases): @@ -199,11 +211,11 @@ def test_with_vxlan_encap_dns_disabled_for_l2_vn(self): # return self.verify_l2l3_ipv6_multicast_traffic(encap='vxlan') @preposttest_wrapper - def test_with_vxlan_encap_l2_ipv6_multicast_traffic(self): + def test_with_vxlan_encap_l2_multicast_traffic(self): '''Test l2 multicast with vxlan encap Maintainer: hkumar@juniper.net ''' - return self.verify_l2_ipv6_multicast_traffic(encap='vxlan') + return self.verify_l2_multicast_traffic(encap='vxlan') #@preposttest_wrapper #def test_with_vxlan_encap_change_of_l2_vn_forwarding_mode(self): @@ -218,6 +230,12 @@ def test_with_vxlan_encap_change_of_l2l3_vn_forwarding_mode(self): Maintainer: hkumar@juniper.net ''' return self.verify_change_of_l2l3_vn_forwarding_mode(encap='vxlan') + + @preposttest_wrapper + def test_with_vxlan_encap_change_of_l3_vn_forwarding_mode(self): + '''Test to verify change of vn forwarding mode from l2l3 to l3 with vxlan encap''' + + return self.verify_change_of_l3_vn_forwarding_mode(encap='vxlan') @test.attr(type=['serial', 'sanity' ]) @preposttest_wrapper @@ -248,17 +266,11 @@ def test_with_vxlan_encap_to_verify_vlan_qinq_tagged_packets_for_l2_vn(self): 1. Setup eth1.100 and eth1.200 on both vms 2. Setup qinq vlans eth1.100.1000, eth1.100.2000, eth1.200.1000, eth1.200.2000 on both vms 3. Ping different vlans and expext ping to pass and verify in traffic that corresponding vlan tags show up - 4. Try to ping between vlans with different outer vlan tag and expect ping to fai + 4. Try to ping between vlans with different outer vlan tag and expect ping to fail Maintainer: hkumar@juniper.net ''' return self.verify_vlan_qinq_tagged_packets_for_l2_vn(encap='vxlan') - @preposttest_wrapper - def test_with_vxlan_encap_ipv6_ping_for_non_ip_communication(self): - '''Test ping to to IPV6 link local address of VM to check non_ip traffic communication using VXLAN(L2 Unicast) - ''' - return self.verify_ipv6_ping_for_non_ip_communication(encap='vxla') - #@preposttest_wrapper #def test_with_vxlan_encap_ipv6_ping_for_configured_ipv6_address(self): # '''Test ping to to configured IPV6 address of VM with encap VXLAN @@ -281,8 +293,8 @@ def test_with_vxlan_l2_mode(self): 1.VXLAN configured as highest encapsulation priority. 2.Configured 2 VN . EVPN-MGMT-VN(configured with default l2-l3 mode ) and EVPN-L2-VN (configured with L2 only mode) 3.Create 2 Vms. Both connected to all 2 VN. Connection with EVPN-MGMT-VN is only to access to VM - 4.Configured IPv6 address on interface which is connected L2 only vn - 5.Check the IPv6 communication between them. + 4.Send L2 Traffic on interface which is connected L2 only vn + 5.Check the communication between them. Pass criteria: Step 5 should pass Maintainer: chhandak@juniper.net @@ -296,6 +308,13 @@ def test_with_vxlan_l2_mode(self): # ''' # return self.verify_vxlan_mode_with_configured_vxlan_id_l2l3_vn() + @preposttest_wrapper + def test_with_vxlan_arp_resolution(self): + '''Test arp resolution for different forwarding modes with VXLAN Encap + Maintainer: aswanikr@juniper.net + ''' + return self.verify_l2_only_and_l3_only_arp_resolution(encap='vxlan') + class TestEvpnCasesRestart(base.BaseEvpnTest, VerifyEvpnCases): @classmethod @@ -325,9 +344,9 @@ def test_with_vxlan_encap_agent_restart(self): Description:Test agent restart with VXLAN Encap 1. Configure VXLAN as highest priority 2. Configure 2 VM under a VN configured with l2-l3 mode - 3. Check IPV6 (non ip) communication between them + 3. Send L2 traffic and verify communication between vms 4. Restart the contrail-grouter service. - 5. Again check the IPV6 (non ip) communication between them. + 5. Send L2 Traffic and verify communication between vms Pass criteria: Step 3 and 5 should pass Maintainer: chhandak@juniper.net ''' diff --git a/serial_scripts/evpn/verify.py b/serial_scripts/evpn/verify.py index 59c254551..121e55001 100644 --- a/serial_scripts/evpn/verify.py +++ b/serial_scripts/evpn/verify.py @@ -10,8 +10,10 @@ from contrail_fixtures import * import random import socket +from tcutils.tcpdump_utils import verify_tcpdump_count, search_in_pcap from tcutils.commands import ssh, execute_cmd, execute_cmd_out from fabric.operations import get, put +from string import Template class VerifyEvpnCases(): @@ -50,13 +52,16 @@ def verify_dns_disabled(self, encap): vn3_fixture = self.useFixture( VNFixture( project_name=self.inputs.project_name, connections=self.connections, - vn_name=self.vn3_name, option='api', inputs=self.inputs, subnets=self.vn3_subnets)) + vn_name=self.vn3_name,option='contrail', inputs=self.inputs, subnets=self.vn3_subnets)) vn4_fixture = self.useFixture( VNFixture( project_name=self.inputs.project_name, connections=self.connections, - vn_name=self.vn4_name, option='api', inputs=self.inputs, subnets=self.vn4_subnets, enable_dhcp=False, dhcp_option_list=dhcp_option_list)) - + vn_name=self.vn4_name, option='contrail', inputs=self.inputs, + subnets=self.vn4_subnets, dhcp_option_list=dhcp_option_list, + enable_dhcp=False,forwarding_mode='l2')) + assert vn3_fixture.verify_on_setup() + assert vn4_fixture.verify_on_setup() self.connections.vnc_lib_fixture.set_rpf_mode(vn4_fixture.vn_fq_name, 'disable') vn_l2_vm1_name = 'testvm1' @@ -66,11 +71,10 @@ def verify_dns_disabled(self, encap): VMFixture( project_name=self.inputs.project_name, connections=self.connections, - flavor='contrail_flavor_large', vn_objs=[ vn3_fixture.obj, vn4_fixture.obj], - image_name='ubuntu-dhcpdns-server', + image_name='ubuntu-dns-server', vm_name=vm1_name, node_name=compute_2)) @@ -79,7 +83,6 @@ def verify_dns_disabled(self, encap): VMFixture( project_name=self.inputs.project_name, connections=self.connections, - flavor='contrail_flavor_large', vn_objs=[ vn3_fixture.obj, vn4_fixture.obj], @@ -103,12 +106,10 @@ def verify_dns_disabled(self, encap): assert vm2_fixture.wait_till_vm_is_up() assert vn_l2_vm1_fixture.wait_till_vm_is_up() - assert vn3_fixture.verify_on_setup() - assert vn4_fixture.verify_on_setup() assert vm1_fixture.verify_on_setup() assert vm2_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() - + # Configure dhcp-server vm on eth1 and bring the intreface up # forcefully self.bringup_interface_forcefully(vm1_fixture) @@ -198,147 +199,8 @@ def verify_dns_disabled(self, encap): return result - def verify_ipv6_ping_for_non_ip_communication(self, encap): - - # Setting up default encapsulation - self.logger.info('Setting new Encap before continuing') - if (encap == 'gre'): - self.update_encap_priority('gre') - elif (encap == 'udp'): - self.update_encap_priority('udp') - elif (encap == 'vxlan'): - self.update_encap_priority('vxlan') - host_list = self.connections.nova_h.get_hosts() - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - (self.vn1_name, self.vn1_subnets) = ("EVPN-VN1", ["11.1.1.0/24"]) - vn1_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - inputs=self.inputs, - vn_name=self.vn1_name, - subnets=self.vn1_subnets)) - - vm1_name = 'EVPN_VN1_VM1' - vm2_name = 'EVPN_VN1_VM2' - - vn1_vm1_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn1_fixture.obj, - image_name='ubuntu', - vm_name=vm1_name, - node_name=compute_1)) - vn1_vm2_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn1_fixture.obj, - image_name='ubuntu', - vm_name=vm2_name, - node_name=compute_2)) - - assert vn1_fixture.verify_on_setup() - assert vn1_vm1_fixture.verify_on_setup() - assert vn1_vm2_fixture.verify_on_setup() - for i in range(0, 20): - vm2_ipv6 = vn1_vm2_fixture.get_vm_ipv6_addr_from_vm() - if vm2_ipv6 is not None: - break - if vm2_ipv6 is None: - self.logger.error('Not able to get VM link local address') - return False - self.tcpdump_start_on_all_compute() - assert vn1_vm1_fixture.ping_to_ipv6( - vm2_ipv6.split("/")[0].strip(), count='15', other_opt='-I eth0') - comp_vm2_ip = vn1_vm2_fixture.vm_node_ip - self.tcpdump_analyze_on_compute(comp_vm2_ip, encap.upper()) - self.tcpdump_stop_on_all_compute() - - return True - # End verify_ipv6_ping_for_non_ip_communication - - def verify_ping_to_configured_ipv6_address(self, encap): - '''Configure IPV6 address to VM. Test IPv6 ping to that address. - ''' - result = True - # Setting up default encapsulation - self.logger.info('Setting new Encap before continuing') - if (encap == 'gre'): - self.update_encap_priority('gre') - elif (encap == 'udp'): - self.update_encap_priority('udp') - elif (encap == 'vxlan'): - self.update_encap_priority('vxlan') - - host_list = self.connections.nova_h.get_hosts() - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - - vn1_vm1 = '1001::1/64' - vn1_vm2 = '1001::2/64' - - (self.vn1_name, self.vn1_subnets) = ("EVPN-VN1", ["11.1.1.0/24"]) - vn1_fixture = self.useFixture( - VNFixture( - project_name=self.inputs.project_name, - connections=self.connections, - inputs=self.inputs, - vn_name=self.vn1_name, - subnets=self.vn1_subnets)) - - vm1_name = 'EVPN_VN1_VM1' - vm2_name = 'EVPN_VN1_VM2' - - vn1_vm1_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn1_fixture.obj, - image_name='ubuntu', - vm_name=vm1_name, - node_name=compute_1)) - vn1_vm2_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn1_fixture.obj, - image_name='ubuntu', - vm_name=vm2_name, - node_name=compute_2)) - - assert vn1_fixture.verify_on_setup() - assert vn1_vm1_fixture.verify_on_setup() - assert vn1_vm2_fixture.verify_on_setup() - # Waiting for VM to boots up - assert vn1_vm1_fixture.wait_till_vm_is_up() - assert vn1_vm2_fixture.wait_till_vm_is_up() - cmd_to_pass1 = ['sudo ifconfig eth0 inet6 add %s' % (vn1_vm1)] - vn1_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['sudo ifconfig eth0 inet6 add %s' % (vn1_vm2)] - vn1_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - vm1_ipv6 = vn1_vm1_fixture.get_vm_ipv6_addr_from_vm(addr_type='global') - vm2_ipv6 = vn1_vm2_fixture.get_vm_ipv6_addr_from_vm(addr_type='global') - self.tcpdump_start_on_all_compute() - assert vn1_vm1_fixture.ping_to_ipv6( - vm2_ipv6.split("/")[0].strip(), count='15') - comp_vm2_ip = vn1_vm2_fixture.vm_node_ip - self.tcpdump_analyze_on_compute(comp_vm2_ip, encap.upper()) - self.tcpdump_stop_on_all_compute() - - return True - # End verify_ping_to_configured_ipv6_address - - def verify_l2_ipv6_multicast_traffic(self, encap): + def verify_l2_multicast_traffic(self, encap): '''Test ping to all hosts ''' # Setting up default encapsulation @@ -362,9 +224,7 @@ def verify_l2_ipv6_multicast_traffic(self, encap): compute_1 = host_list[0] compute_2 = host_list[1] compute_3 = host_list[1] - vn1_vm1 = '1001::1/64' - vn1_vm2 = '1001::2/64' - vn1_vm3 = '1001::3/64' + (self.vn3_name, self.vn3_subnets) = ("EVPN-MGMT-VN", ["33.1.1.0/24"]) (self.vn4_name, self.vn4_subnets) = ("EVPN-L2-VN", ["44.1.1.0/24"]) @@ -374,7 +234,7 @@ def verify_l2_ipv6_multicast_traffic(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn4_fixture = self.useFixture( VNFixture( @@ -383,7 +243,8 @@ def verify_l2_ipv6_multicast_traffic(self, encap): inputs=self.inputs, vn_name=self.vn4_name, subnets=self.vn4_subnets, - enable_dhcp=False)) + enable_dhcp=False, + forwarding_mode='l2')) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -430,54 +291,30 @@ def verify_l2_ipv6_multicast_traffic(self, encap): assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() assert vn_l2_vm3_fixture.wait_till_vm_is_up() + assert not self.verify_eth1_ip_from_vm(vn_l2_vm1_fixture),'L2 VM got IP when dhcp is disabled' + assert not self.verify_eth1_ip_from_vm(vn_l2_vm2_fixture),'L2 VM got IP when dhcp is disabled' + assert not self.verify_eth1_ip_from_vm(vn_l2_vm3_fixture),'L2 VM got IP when dhcp is disabled' + #send l2 multicast traffic + self.mac1 = vn_l2_vm1_fixture.mac_addr[vn4_fixture.vn_fq_name] + self.mac2 = '01:00:00:00:00:00' + vm2_intf = vn_l2_vm2_fixture.tap_intf[vn4_fixture.vn_fq_name]['name'] + vm3_intf = vn_l2_vm3_fixture.tap_intf[vn4_fixture.vn_fq_name]['name'] + filters = 'ether src %s' %(self.mac1) + session1,pcap1 = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=vm2_intf) + session2,pcap2 = vn_l2_vm3_fixture.start_tcpdump(filters=filters,interface=vm3_intf) + self.logger.info('waiting to get tcpdump started') + sleep(10) + self.send_l2_traffic(vn_l2_vm1_fixture,iface='eth1') + result1 = verify_tcpdump_count(self, session1, pcap1, exp_count=10,mac=self.mac2) + result2 = verify_tcpdump_count(self, session2, pcap2, exp_count=10,mac=self.mac2) + result = result1 and result2 + assert result,'Failed to send multicast traffic' + # End verify_l2_multicast_traffic - # Bring the intreface up forcefully - self.bringup_interface_forcefully(vn_l2_vm1_fixture) - self.bringup_interface_forcefully(vn_l2_vm2_fixture) - self.bringup_interface_forcefully(vn_l2_vm3_fixture) - - # Configured IPV6 address - cmd_to_pass1 = ['ifconfig eth1 inet6 add %s' % (vn1_vm1)] - vn_l2_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['ifconfig eth1 inet6 add %s' % (vn1_vm2)] - vn_l2_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - cmd_to_pass3 = ['ifconfig eth1 inet6 add %s' % (vn1_vm3)] - vn_l2_vm3_fixture.run_cmd_on_vm(cmds=cmd_to_pass3, as_sudo=True, timeout=60) - - ping_count = '10' - if encap != 'vxlan': - self.tcpdump_start_on_all_compute() - ping_output = vn_l2_vm1_fixture.ping_to_ipv6( - 'ff02::1', return_output=True, count=ping_count, other_opt='-I eth1') - self.logger.info("ping output : \n %s" % (ping_output)) - expected_result = ' 0% packet loss' - assert (expected_result in ping_output) - vm1_ipv6 = vn_l2_vm1_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='link').split('/')[0] - vm2_ipv6 = vn_l2_vm2_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='link').split('/')[0] - vm3_ipv6 = vn_l2_vm3_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='link').split('/')[0] - ip_list = [vm1_ipv6, vm2_ipv6, vm3_ipv6] - # getting count of ping response from each vm - string_count_dict = {} - string_count_dict = get_string_match_count(ip_list, ping_output) - self.logger.info("output %s" % (string_count_dict)) - self.logger.info("There should be atleast 9 echo reply from each ip") - for k in ip_list: - # this is a workaround : ping utility exist as soon as it gets one - # response''' - assert (string_count_dict[k] >= (int(ping_count) - 1)) - if encap != 'vxlan': - comp_vm2_ip = vn_l2_vm2_fixture.vm_node_ip - self.tcpdump_analyze_on_compute(comp_vm2_ip, encap.upper()) - self.tcpdump_stop_on_all_compute() - return result - # End verify_l2_ipv6_multicast_traffic - def verify_l2l3_ipv6_multicast_traffic(self, encap): - '''Test ping to all hosts + def verify_change_of_l2_vn_forwarding_mode(self, encap): + '''Change the vn forwarding mode from l2 only to l2l3 and verify l2_l3 routes get updated ''' # Setting up default encapsulation self.logger.info('Setting new Encap before continuing') @@ -492,19 +329,9 @@ def verify_l2l3_ipv6_multicast_traffic(self, encap): host_list = self.connections.nova_h.get_hosts() compute_1 = host_list[0] compute_2 = host_list[0] - compute_3 = host_list[0] - if len(host_list) > 2: - compute_1 = host_list[0] - compute_2 = host_list[1] - compute_3 = host_list[2] - elif len(host_list) > 1: + if len(host_list) > 1: compute_1 = host_list[0] compute_2 = host_list[1] - compute_3 = host_list[1] - - vn1_vm1 = '1001::1/64' - vn1_vm2 = '1001::2/64' - vn1_vm3 = '1001::3/64' (self.vn3_name, self.vn3_subnets) = ("EVPN-MGMT-VN", ["33.1.1.0/24"]) vn3_fixture = self.useFixture( VNFixture( @@ -512,88 +339,95 @@ def verify_l2l3_ipv6_multicast_traffic(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' - vn_l2_vm3_name = 'EVPN_VN_L2_VM3' + (self.vn1_name, self.vn1_subnets) = ("EVPN-Test-VN1", ["55.1.1.0/24"]) + + self.vn1_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=self.vn1_name, + subnets=self.vn1_subnets, + enable_dhcp=False, + forwarding_mode='l2')) + + assert self.vn1_fixture.verify_on_setup() vn_l2_vm1_fixture = self.useFixture( VMFixture( project_name=self.inputs.project_name, connections=self.connections, - vn_obj=vn3_fixture.obj, + vn_objs=[ + vn3_fixture.obj, + self.vn1_fixture.obj], image_name='ubuntu', vm_name=vn_l2_vm1_name, node_name=compute_1)) + vn_l2_vm2_fixture = self.useFixture( VMFixture( project_name=self.inputs.project_name, connections=self.connections, - vn_obj=vn3_fixture.obj, + vn_objs=[ + vn3_fixture.obj, + self.vn1_fixture.obj], image_name='ubuntu', vm_name=vn_l2_vm2_name, node_name=compute_2)) - vn_l2_vm3_fixture = self.useFixture( - VMFixture( - project_name=self.inputs.project_name, - connections=self.connections, - vn_obj=vn3_fixture.obj, - image_name='ubuntu', - vm_name=vn_l2_vm3_name, - node_name=compute_3)) - assert vn3_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() assert vn_l2_vm2_fixture.verify_on_setup() - assert vn_l2_vm3_fixture.verify_on_setup() # Wait till vm is up assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() - assert vn_l2_vm3_fixture.wait_till_vm_is_up() - # Configured IPV6 address - cmd_to_pass1 = ['ifconfig eth0 inet6 add %s' % (vn1_vm1)] - vn_l2_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['ifconfig eth0 inet6 add %s' % (vn1_vm2)] - vn_l2_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - cmd_to_pass3 = ['ifconfig eth0 inet6 add %s' % (vn1_vm3)] - vn_l2_vm3_fixture.run_cmd_on_vm(cmds=cmd_to_pass3, as_sudo=True, timeout=60) - # ping with multicast ipv6 ip on eth0 - ping_count = '10' - if encap != 'vxlan': - self.tcpdump_start_on_all_compute() - ping_output = vn_l2_vm1_fixture.ping_to_ipv6( - 'ff02::1', return_output=True, count=ping_count) - self.logger.info("ping output : \n %s" % (ping_output)) - expected_result = ' 0% packet loss' - assert (expected_result in ping_output) - vm1_ipv6 = vn_l2_vm1_fixture.get_vm_ipv6_addr_from_vm( - addr_type='link').split('/')[0] - vm2_ipv6 = vn_l2_vm2_fixture.get_vm_ipv6_addr_from_vm( - addr_type='link').split('/')[0] - vm3_ipv6 = vn_l2_vm3_fixture.get_vm_ipv6_addr_from_vm( - addr_type='link').split('/')[0] - ip_list = [vm1_ipv6, vm2_ipv6, vm3_ipv6] - # getting count of ping response from each vm - string_count_dict = {} - string_count_dict = get_string_match_count(ip_list, ping_output) - self.logger.info("output %s" % (string_count_dict)) - self.logger.info("There should be atleast 9 echo reply from each ip") - for k in ip_list: - # this is a workaround : ping utility exist as soon as it gets one - # response''' - assert (string_count_dict[k] >= (int(ping_count) - 1)) - if encap != 'vxlan': - comp_vm2_ip = vn_l2_vm2_fixture.vm_node_ip - self.tcpdump_analyze_on_compute(comp_vm2_ip, encap.upper()) + assert not self.verify_eth1_ip_from_vm(vn_l2_vm1_fixture),'L2 VM got IP when dhcp is disabled' + assert not self.verify_eth1_ip_from_vm(vn_l2_vm2_fixture),'L2 VM got IP when dhcp is disabled' + vn1_subnet_id=self.vn1_fixture.get_subnets()[0]['id'] + vn1_dhcp_dict = {'enable_dhcp': True} + self.vn1_fixture.update_subnet(vn1_subnet_id,vn1_dhcp_dict) + self.logger.info( + "Changing vn1 forwarding mode from l2 only to l2l3 followed by calling verify_on_setup for vms which checks if l3 routes are there or not ") + self.vn1_fixture.add_forwarding_mode( + project_fq_name=self.inputs.project_fq_name, + vn_name=self.vn1_name, + forwarding_mode='l2_l3') + assert self.verify_eth1_ip_from_vm(vn_l2_vm1_fixture),'VM did not got IP after enabling dhcp' + assert self.verify_eth1_ip_from_vm(vn_l2_vm2_fixture),'VM did not got IP after enabling dhcp' + assert self.vn1_fixture.verify_on_setup() + assert vn_l2_vm1_fixture.verify_on_setup() + assert vn_l2_vm2_fixture.verify_on_setup() + #removing ipv6 verification + #send l3 only traffic and verify + self.vn_l2_vm1_ip = vn_l2_vm1_fixture.vm_ip_dict[self.vn1_fixture.vn_fq_name][0] + self.vn_l2_vm2_ip = vn_l2_vm2_fixture.vm_ip_dict[self.vn1_fixture.vn_fq_name][0] + filters = '\'(src host %s and dst host %s and not arp)\'' \ + % (self.vn_l2_vm1_ip, self.vn_l2_vm2_ip) + tap_intf = vn_l2_vm2_fixture.tap_intf[self.vn1_fixture.vn_fq_name]['name'] + session, pcap = vn_l2_vm2_fixture.start_tcpdump(filters = filters,interface = tap_intf) + sleep(20) + self.send_l3_traffic(vn_l2_vm1_fixture) + assert verify_tcpdump_count(self,session, pcap,exp_count=10) + + #send l2 traffic and verify + self.mac1=vn_l2_vm1_fixture.mac_addr[self.vn1_fixture.vn_fq_name] + self.mac2=vn_l2_vm2_fixture.mac_addr[self.vn1_fixture.vn_fq_name] + filters = 'ether src %s' %(self.mac1) + tap_intf = vn_l2_vm2_fixture.tap_intf[self.vn1_fixture.vn_fq_name]['name'] + session,pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + sleep(20) + self.send_l2_traffic(vn_l2_vm1_fixture,iface='eth1') + result = verify_tcpdump_count(self, session, pcap, exp_count=10,mac=self.mac2) - self.tcpdump_stop_on_all_compute() return result - # End verify_l2l3_ipv6_multicast_traffic + # End verify_change_of_l2_vn_forwarding_mode - def verify_change_of_l2_vn_forwarding_mode(self, encap): - '''Change the vn forwarding mode from l2 only to l2l3 and verify l3 routes get updated + def verify_change_of_l2l3_vn_forwarding_mode(self, encap): + '''Change the vn forwarding mode from l2l3 only to l2 and verify l3 routes gets deleted and check with l2 traffic ''' # Setting up default encapsulation self.logger.info('Setting new Encap before continuing') @@ -611,8 +445,6 @@ def verify_change_of_l2_vn_forwarding_mode(self, encap): if len(host_list) > 1: compute_1 = host_list[0] compute_2 = host_list[1] - vm1_ip6 = '1001::1/64' - vm2_ip6 = '1001::2/64' (self.vn3_name, self.vn3_subnets) = ("EVPN-MGMT-VN", ["33.1.1.0/24"]) vn3_fixture = self.useFixture( VNFixture( @@ -620,7 +452,7 @@ def verify_change_of_l2_vn_forwarding_mode(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -633,8 +465,7 @@ def verify_change_of_l2_vn_forwarding_mode(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn1_name, - subnets=self.vn1_subnets, - enable_dhcp=False)) + subnets=self.vn1_subnets)) assert self.vn1_fixture.verify_on_setup() vn_l2_vm1_fixture = self.useFixture( VMFixture( @@ -663,45 +494,48 @@ def verify_change_of_l2_vn_forwarding_mode(self, encap): # Wait till vm is up assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() + vn1_subnet_id=self.vn1_fixture.get_subnets()[0]['id'] + cmd = 'ip addr flush dev eth1' + vn_l2_vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True, timeout=60) + vn_l2_vm2_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True, timeout=60) + vn1_dhcp_dict = {'enable_dhcp': False} + self.vn1_fixture.update_subnet(vn1_subnet_id,vn1_dhcp_dict) self.logger.info( - "Changing vn1 forwarding mode from l2 only to l2l3 followed by calling verify_on_setup for vms which checks if l3 routes are there or not ") - disable_subnet_dhcp = {'enable_dhcp':False} - self.quantum_h.update_subnet(self.vn1_fixture.vn_subnet_objs[0]['id'], disable_subnet_dhcp) + "Changing vn1 forwarding mode from l2l3 to l2 only followed by calling verify_on_setup for vms which checks l2 routes and explicity check l3 routes are removed ") + self.vn1_fixture.add_forwarding_mode( + project_fq_name=self.inputs.project_fq_name, + vn_name=self.vn1_name, + forwarding_mode='l2') + cmd = 'dhclient eth1' + vn_l2_vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True, timeout=10) + vn_l2_vm2_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True, timeout=10) + assert not self.verify_eth1_ip_from_vm(vn_l2_vm1_fixture),'L2 VM got IP when dhcp is disabled' + assert not self.verify_eth1_ip_from_vm(vn_l2_vm2_fixture),'L2 VM got IP when dhcp is disabled' assert self.vn1_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() assert vn_l2_vm2_fixture.verify_on_setup() - - # Bring the intreface up forcefully - self.bringup_interface_forcefully(vn_l2_vm1_fixture) - self.bringup_interface_forcefully(vn_l2_vm2_fixture) - - # Configure IPV6 address - cmd_to_pass1 = ['ifconfig eth1 inet6 add %s' % (vm1_ip6)] - vn_l2_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['ifconfig eth1 inet6 add %s' % (vm2_ip6)] - vn_l2_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - - vm1_ipv6 = vn_l2_vm1_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global').split('/')[0].strip() - vm2_ipv6 = vn_l2_vm2_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global').split('/')[0].strip() - - self.tcpdump_start_on_all_compute() - assert vn_l2_vm1_fixture.ping_to_ipv6(vm2_ipv6, count='15', - other_opt='-I eth1') - assert vn_l2_vm2_fixture.ping_to_ipv6(vm1_ipv6, count='15', - other_opt='-I eth1') - comp_vm1_ip = vn_l2_vm1_fixture.vm_node_ip - comp_vm2_ip = vn_l2_vm2_fixture.vm_node_ip - self.tcpdump_analyze_on_compute(comp_vm1_ip, encap.upper()) - self.tcpdump_analyze_on_compute(comp_vm2_ip, encap.upper()) - - self.tcpdump_stop_on_all_compute() + + #send l2 traffic and verify + self.mac1=vn_l2_vm1_fixture.mac_addr[self.vn1_fixture.vn_fq_name] + self.mac2=vn_l2_vm2_fixture.mac_addr[self.vn1_fixture.vn_fq_name] + filters = 'ether src %s' %(self.mac1) + tap_intf = vn_l2_vm2_fixture.tap_intf[self.vn1_fixture.vn_fq_name]['name'] + session,pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(20) + self.send_l2_traffic(vn_l2_vm1_fixture,iface='eth1') + result = verify_tcpdump_count(self, session, pcap, exp_count=10,mac=self.mac2) + + #for bug-id 1514703 + #check ping working between l2 vms + #assert vn_l2_vm1_fixture.ping_with_certainty(dst_vm_fixture=vn_l2_vm2_fixture, + # vn_fq_name=self.vn1_fixture.vn_fq_name) + return result - # End verify_change_of_l2_vn_forwarding_mode + # End verify_change_of_l2l3_vn_forwarding_mode - def verify_change_of_l2l3_vn_forwarding_mode(self, encap): - '''Change the vn forwarding mode from l2l3 only to l2 and verify l3 routes gets deleted + def verify_change_of_l3_vn_forwarding_mode(self, encap): + '''Change the vn forwarding mode from l2_l3 to l3_only and verify l3 mode ''' # Setting up default encapsulation self.logger.info('Setting new Encap before continuing') @@ -719,8 +553,6 @@ def verify_change_of_l2l3_vn_forwarding_mode(self, encap): if len(host_list) > 1: compute_1 = host_list[0] compute_2 = host_list[1] - vm1_ip6 = '1001::1/64' - vm2_ip6 = '1001::2/64' (self.vn3_name, self.vn3_subnets) = ("EVPN-MGMT-VN", ["33.1.1.0/24"]) vn3_fixture = self.useFixture( VNFixture( @@ -728,7 +560,7 @@ def verify_change_of_l2l3_vn_forwarding_mode(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -771,36 +603,29 @@ def verify_change_of_l2l3_vn_forwarding_mode(self, encap): assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() self.logger.info( - "Changing vn1 forwarding mode from l2l3 to l2 only followed by calling verify_on_setup for vms which checks l2 routes and explicity check l3 routes are removed ") - enable_subnet_dhcp = {'enable_dhcp':False} - self.quantum_h.update_subnet(self.vn1_fixture.vn_subnet_objs[0]['id'], enable_subnet_dhcp) + "Changing vn1 forwarding mode from l2l3 to l3 only followed by calling verify_on_setup ") + self.vn1_fixture.add_forwarding_mode( + project_fq_name=self.inputs.project_fq_name, + vn_name=self.vn1_name, + forwarding_mode='l3') assert self.vn1_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() assert vn_l2_vm2_fixture.verify_on_setup() - - # Bring the intreface up forcefully - self.bringup_interface_forcefully(vn_l2_vm1_fixture) - self.bringup_interface_forcefully(vn_l2_vm2_fixture) - - # Configure IPV6 address - cmd_to_pass1 = ['ifconfig eth1 inet6 add %s' % (vm1_ip6)] - vn_l2_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['ifconfig eth1 inet6 add %s' % (vm2_ip6)] - vn_l2_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - - vm1_ipv6 = vn_l2_vm1_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global').split('/')[0].strip() - vm2_ipv6 = vn_l2_vm2_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global').split('/')[0].strip() - - assert vn_l2_vm1_fixture.ping_to_ipv6(vm2_ipv6, count='15', - other_opt='-I eth1') - assert vn_l2_vm2_fixture.ping_to_ipv6(vm1_ipv6, count='15', - other_opt='-I eth1') - - self.tcpdump_stop_on_all_compute() + + #send l3 only traffic and verify + self.vn_l2_vm1_ip = vn_l2_vm1_fixture.vm_ip_dict[self.vn1_fixture.vn_fq_name][0] + self.vn_l2_vm2_ip = vn_l2_vm2_fixture.vm_ip_dict[self.vn1_fixture.vn_fq_name][0] + filters = '\'(src host %s and dst host %s and not arp)\'' \ + % (self.vn_l2_vm1_ip, self.vn_l2_vm2_ip) + tap_intf = vn_l2_vm2_fixture.tap_intf[self.vn1_fixture.vn_fq_name]['name'] + session, pcap = vn_l2_vm2_fixture.start_tcpdump(filters = filters,interface = tap_intf) + sleep(10) + output = self.send_l3_traffic(vn_l2_vm1_fixture) + assert verify_tcpdump_count(self,session, pcap,exp_count=10) + return result - # End verify_change_of_l2l3_vn_forwarding_mode + + # End verify_change_of_l3_vn_forwarding_mode def get_matching_vrf(self, vrf_objs, vrf_name): return [x for x in vrf_objs if x['name'] == vrf_name][0] @@ -813,7 +638,6 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2_vn(self): # Setting up default encapsulation self.logger.info('Setting new Encap before continuing') self.update_encap_priority(encap) - result = True host_list = self.connections.nova_h.get_hosts() compute_1 = host_list[0] @@ -821,9 +645,7 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2_vn(self): if len(host_list) > 1: compute_1 = host_list[0] compute_2 = host_list[1] - - vm1_ip6 = '1001::1/64' - vm2_ip6 = '1001::2/64' + (self.vn3_name, self.vn3_subnets) = ("EVPN-MGMT-VN", ["33.1.1.0/24"]) vn3_fixture = self.useFixture( VNFixture( @@ -831,7 +653,7 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2_vn(self): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -845,8 +667,8 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2_vn(self): self.vxlan_id = str(vxlan_random_id) self.connections.vnc_lib_fixture.set_vxlan_mode('configured') - self.addCleanup(self.connections.vnc_lib_fixture.set_vxlan_mode( - 'automatic')) + self.addCleanup(self.connections.vnc_lib_fixture.set_vxlan_mode, + vxlan_mode='automatic') self.vn1_fixture = self.useFixture( VNFixture( project_name=self.inputs.project_name, @@ -854,8 +676,9 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2_vn(self): inputs=self.inputs, vn_name=self.vn1_name, subnets=self.vn1_subnets, + vxlan_id=self.vxlan_id, enable_dhcp=False, - vxlan_id=self.vxlan_id)) + forwarding_mode='l2')) assert self.vn1_fixture.verify_on_setup() vn_l2_vm1_fixture = self.useFixture( @@ -882,7 +705,7 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2_vn(self): assert vn3_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() assert vn_l2_vm2_fixture.verify_on_setup() - + # Verify that configured vxlan_id shows up in agent introspect for compute_ip in self.inputs.compute_ips: inspect_h = self.agent_inspect[compute_ip] @@ -925,29 +748,20 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2_vn(self): # Wait till vm is up assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() - - # Bring the intreface up forcefully - self.bringup_interface_forcefully(vn_l2_vm1_fixture) - self.bringup_interface_forcefully(vn_l2_vm2_fixture) - - # Configure IPV6 address - cmd_to_pass1 = ['ifconfig eth1 inet6 add %s' % (vm1_ip6)] - vn_l2_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['ifconfig eth1 inet6 add %s' % (vm2_ip6)] - vn_l2_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - - vm1_ipv6 = vn_l2_vm1_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global').split('/')[0].strip() - vm2_ipv6 = vn_l2_vm2_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global').split('/')[0].strip() - + #send l2 traffic and verify + self.mac1=vn_l2_vm1_fixture.mac_addr[self.vn1_fixture.vn_fq_name] + self.mac2=vn_l2_vm2_fixture.mac_addr[self.vn1_fixture.vn_fq_name] + filters = 'ether src %s' %(self.mac1) + tap_intf = vn_l2_vm2_fixture.tap_intf[self.vn1_fixture.vn_fq_name]['name'] self.tcpdump_start_on_all_compute() - assert vn_l2_vm1_fixture.ping_to_ipv6(vm2_ipv6, count='15', - other_opt='-I eth1') - assert vn_l2_vm2_fixture.ping_to_ipv6(vm1_ipv6, count='15', - other_opt='-I eth1') + session,pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(10) + self.send_l2_traffic(vn_l2_vm1_fixture,iface='eth1') + result = verify_tcpdump_count(self, session, pcap, exp_count=10,mac=self.mac2) comp_vm1_ip = vn_l2_vm1_fixture.vm_node_ip comp_vm2_ip = vn_l2_vm2_fixture.vm_node_ip + # Pad vxlan_hex_id to length of 4 and grep it in tcpdump if vxlan_random_id < 15: vxlan_hex_id = '0' + vxlan_hex_id @@ -976,9 +790,6 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2l3_vn(self): compute_1 = host_list[0] compute_2 = host_list[1] - vm1_ip6 = '1001::1/64' - vm2_ip6 = '1001::2/64' - vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -991,8 +802,8 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2l3_vn(self): self.vxlan_id = str(vxlan_random_id) self.connections.vnc_lib_fixture.set_vxlan_mode('configured') - self.addCleanup(self.connections.vnc_lib_fixture.set_vxlan_mode( - 'automatic')) + self.addCleanup(self.connections.vnc_lib_fixture.set_vxlan_mode, + vxlan_mode='automatic') self.vn1_fixture = self.useFixture( VNFixture( project_name=self.inputs.project_name, @@ -1064,27 +875,24 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2l3_vn(self): # Wait till vm is up assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() - - # Configure IPV6 address - cmd_to_pass1 = ['ifconfig eth0 inet6 add %s' % (vm1_ip6)] - vn_l2_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['ifconfig eth0 inet6 add %s' % (vm2_ip6)] - vn_l2_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - - vm1_ipv6 = vn_l2_vm1_fixture.get_vm_ipv6_addr_from_vm( - addr_type='global').split('/')[0].strip() - vm2_ipv6 = vn_l2_vm2_fixture.get_vm_ipv6_addr_from_vm( - addr_type='global').split('/')[0].strip() - + #removing ipv6 verification + #send l3 only traffic and verify + self.vn_l2_vm1_ip = vn_l2_vm1_fixture.vm_ip_dict[self.vn1_fixture.vn_fq_name][0] + self.vn_l2_vm2_ip = vn_l2_vm2_fixture.vm_ip_dict[self.vn1_fixture.vn_fq_name][0] + filters = '\'(src host %s and dst host %s and not arp)\'' \ + % (self.vn_l2_vm1_ip, self.vn_l2_vm2_ip) + tap_intf = vn_l2_vm2_fixture.tap_intf[self.vn1_fixture.vn_fq_name]['name'] self.tcpdump_start_on_all_compute() - assert vn_l2_vm1_fixture.ping_to_ipv6(vm2_ipv6, count='15') - assert vn_l2_vm2_fixture.ping_to_ipv6(vm1_ipv6, count='15') + session, pcap = vn_l2_vm2_fixture.start_tcpdump(filters = filters,interface = tap_intf) + sleep(10) + self.send_l3_traffic(vn_l2_vm1_fixture) + assert verify_tcpdump_count(self,session, pcap,exp_count=10) comp_vm1_ip = vn_l2_vm1_fixture.vm_node_ip comp_vm2_ip = vn_l2_vm2_fixture.vm_node_ip + # Pad vxlan_hex_id to length of 4 and grep it in tcpdump if vxlan_random_id < 15: - vxlan_hex_id = '0' + vxlan_hex_id - + vxlan_hex_id = '0' + vxlan_hex_id self.tcpdump_analyze_on_compute( comp_vm1_ip, encap.upper(), vxlan_id=vxlan_hex_id) self.tcpdump_analyze_on_compute( @@ -1096,7 +904,7 @@ def verify_vxlan_mode_with_configured_vxlan_id_l2l3_vn(self): def get_matching_vrf(self, vrf_objs, vrf_name): return [x for x in vrf_objs if x['name'] == vrf_name][0] - + def verify_l2_vm_file_trf_by_scp(self, encap): '''Description: Test to validate File Transfer using scp between VMs. Files of different sizes. L2 forwarding mode is used for scp. ''' @@ -1132,7 +940,7 @@ def verify_l2_vm_file_trf_by_scp(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn4_fixture = self.useFixture( VNFixture( @@ -1141,7 +949,9 @@ def verify_l2_vm_file_trf_by_scp(self, encap): inputs=self.inputs, vn_name=self.vn4_name, subnets=self.vn4_subnets, - enable_dhcp=False)) + enable_dhcp=False, + forwarding_mode='l2' + )) self.connections.vnc_lib_fixture.set_rpf_mode(vn4_fixture.vn_fq_name, 'disable') @@ -1192,7 +1002,7 @@ def verify_l2_vm_file_trf_by_scp(self, encap): assert vm1_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() assert vn_l2_vm2_fixture.verify_on_setup() - + # Configure dhcp-server vm on eth1 and bring the intreface up # forcefully self.bringup_interface_forcefully(vm1_fixture) @@ -1274,10 +1084,11 @@ def verify_l2_vm_file_trf_by_scp(self, encap): by scp !! Pls check logs' % (size, dest_vm_ip)) result = False assert result - + self.tcpdump_stop_on_all_compute() return result - + + @retry(delay=2, tries=5) def verify_eth1_ip_from_vm(self, vm_fix): i = 'ifconfig eth1' cmd_to_pass5 = [i] @@ -1323,7 +1134,7 @@ def verify_l2_vm_file_trf_by_tftp(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn4_fixture = self.useFixture( VNFixture( @@ -1332,7 +1143,8 @@ def verify_l2_vm_file_trf_by_tftp(self, encap): inputs=self.inputs, vn_name=self.vn4_name, subnets=self.vn4_subnets, - enable_dhcp=False)) + enable_dhcp=False, + forwarding_mode='l2')) self.connections.vnc_lib_fixture.set_rpf_mode(vn4_fixture.vn_fq_name, 'disable') @@ -1392,7 +1204,7 @@ def verify_l2_vm_file_trf_by_tftp(self, encap): assert vm1_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() assert vn_l2_vm2_fixture.verify_on_setup() - + # Configure dhcp-server vm on eth1 and bring the intreface up # forcefully self.bringup_interface_forcefully(vm1_fixture) @@ -1507,7 +1319,7 @@ def verify_vlan_tagged_packets_for_l2_vn(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn4_fixture = self.useFixture( VNFixture( @@ -1516,7 +1328,8 @@ def verify_vlan_tagged_packets_for_l2_vn(self, encap): inputs=self.inputs, vn_name=self.vn4_name, subnets=self.vn4_subnets, - enable_dhcp=False)) + enable_dhcp=False, + forwarding_mode='l2')) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -1525,22 +1338,20 @@ def verify_vlan_tagged_packets_for_l2_vn(self, encap): VMFixture( project_name=self.inputs.project_name, connections=self.connections, - flavor='contrail_flavor_large', vn_objs=[ vn3_fixture.obj, vn4_fixture.obj], - image_name='ubuntu-with-vlan8021q', + image_name='ubuntu-traffic', vm_name=vn_l2_vm1_name, node_name=compute_1)) vn_l2_vm2_fixture = self.useFixture( VMFixture( project_name=self.inputs.project_name, connections=self.connections, - flavor='contrail_flavor_large', vn_objs=[ vn3_fixture.obj, vn4_fixture.obj], - image_name='ubuntu-with-vlan8021q', + image_name='ubuntu-traffic', vm_name=vn_l2_vm2_name, node_name=compute_2)) @@ -1552,7 +1363,7 @@ def verify_vlan_tagged_packets_for_l2_vn(self, encap): # Wait till vm is up assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() - + # Bring the intreface up forcefully self.bringup_interface_forcefully(vn_l2_vm1_fixture) self.bringup_interface_forcefully(vn_l2_vm2_fixture) @@ -1669,7 +1480,7 @@ def verify_vlan_qinq_tagged_packets_for_l2_vn(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn4_fixture = self.useFixture( VNFixture( @@ -1678,7 +1489,8 @@ def verify_vlan_qinq_tagged_packets_for_l2_vn(self, encap): inputs=self.inputs, vn_name=self.vn4_name, subnets=self.vn4_subnets, - enable_dhcp=False)) + enable_dhcp=False, + forwarding_mode='l2')) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -1687,11 +1499,10 @@ def verify_vlan_qinq_tagged_packets_for_l2_vn(self, encap): VMFixture( project_name=self.inputs.project_name, connections=self.connections, - flavor='contrail_flavor_large', vn_objs=[ vn3_fixture.obj, vn4_fixture.obj], - image_name='ubuntu-with-vlan8021q', + image_name='ubuntu-traffic', vm_name=vn_l2_vm1_name, node_name=compute_1)) vn_l2_vm2_fixture = self.useFixture( @@ -1702,10 +1513,9 @@ def verify_vlan_qinq_tagged_packets_for_l2_vn(self, encap): vn_objs=[ vn3_fixture.obj, vn4_fixture.obj], - image_name='ubuntu-with-vlan8021q', + image_name='ubuntu-traffic', vm_name=vn_l2_vm2_name, node_name=compute_2)) - assert vn3_fixture.verify_on_setup() assert vn4_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() @@ -1714,7 +1524,7 @@ def verify_vlan_qinq_tagged_packets_for_l2_vn(self, encap): # Wait till vm is up assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() - + # Bring the intreface up forcefully self.bringup_interface_forcefully(vn_l2_vm1_fixture) self.bringup_interface_forcefully(vn_l2_vm2_fixture) @@ -1984,8 +1794,6 @@ def verify_epvn_l2_mode_control_node_switchover(self, encap): compute_1 = host_list[0] compute_2 = host_list[1] - vn1_vm1 = '1001::1/64' - vn1_vm2 = '1001::2/64' (self.vn3_name, self.vn3_subnets) = ("EVPN-MGMT-VN", ["33.1.1.0/24"]) (self.vn4_name, self.vn4_subnets) = ("EVPN-L2-VN", ["44.1.1.0/24"]) vn3_fixture = self.useFixture( @@ -1994,7 +1802,7 @@ def verify_epvn_l2_mode_control_node_switchover(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn4_fixture = self.useFixture( VNFixture( @@ -2003,7 +1811,8 @@ def verify_epvn_l2_mode_control_node_switchover(self, encap): inputs=self.inputs, vn_name=self.vn4_name, subnets=self.vn4_subnets, - enable_dhcp=False)) + enable_dhcp=False, + forwarding_mode='l2')) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -2038,25 +1847,17 @@ def verify_epvn_l2_mode_control_node_switchover(self, encap): assert vn_l2_vm1_fixture.wait_till_vm_is_up() assert vn_l2_vm2_fixture.wait_till_vm_is_up() - # Bring the intreface up forcefully - self.bringup_interface_forcefully(vn_l2_vm1_fixture) - self.bringup_interface_forcefully(vn_l2_vm2_fixture) - - # Configured IPV6 address - cmd_to_pass1 = ['ifconfig eth1 inet6 add %s' % (vn1_vm1)] - vn_l2_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['ifconfig eth1 inet6 add %s' % (vn1_vm2)] - vn_l2_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - - vm1_ipv6 = vn_l2_vm1_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global') - vm2_ipv6 = vn_l2_vm2_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global') + #removed ipv6 verification self.tcpdump_start_on_all_compute() - assert vn_l2_vm1_fixture.ping_to_ipv6( - vm2_ipv6.split("/")[0].strip(), count='15') - assert vn_l2_vm2_fixture.ping_to_ipv6( - vm1_ipv6.split("/")[0].strip(), count='15') + self.mac1=vn_l2_vm1_fixture.mac_addr[vn4_fixture.vn_fq_name] + self.mac2=vn_l2_vm2_fixture.mac_addr[vn4_fixture.vn_fq_name] + filters = 'ether src %s' %(self.mac1) + tap_intf = vn_l2_vm2_fixture.tap_intf[vn4_fixture.vn_fq_name]['name'] + session,pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(20) + self.send_l2_traffic(vn_l2_vm1_fixture,iface='eth1') + comp_vm1_ip = vn_l2_vm1_fixture.vm_node_ip comp_vm2_ip = vn_l2_vm2_fixture.vm_node_ip self.tcpdump_analyze_on_compute(comp_vm1_ip, encap.upper()) @@ -2126,17 +1927,23 @@ def verify_epvn_l2_mode_control_node_switchover(self, encap): 'With Peer %s peering is not Established. Current State %s ' % (entry['peer'], entry['state'])) # Check ping + #removed ipv6 verification + #send l2_traffic self.tcpdump_start_on_all_compute() - assert vn_l2_vm1_fixture.ping_to_ipv6( - vm2_ipv6.split("/")[0].strip(), count='15') - assert vn_l2_vm2_fixture.ping_to_ipv6( - vm1_ipv6.split("/")[0].strip(), count='15') + self.mac1=vn_l2_vm1_fixture.mac_addr[vn4_fixture.vn_fq_name] + self.mac2=vn_l2_vm2_fixture.mac_addr[vn4_fixture.vn_fq_name] + filters = 'ether src %s' %(self.mac1) + tap_intf = vn_l2_vm2_fixture.tap_intf[vn4_fixture.vn_fq_name]['name'] + session,pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(20) + self.send_l2_traffic(vn_l2_vm1_fixture,iface='eth1') comp_vm1_ip = vn_l2_vm1_fixture.vm_node_ip comp_vm2_ip = vn_l2_vm2_fixture.vm_node_ip self.tcpdump_analyze_on_compute(comp_vm1_ip, encap.upper()) self.tcpdump_analyze_on_compute(comp_vm2_ip, encap.upper()) self.tcpdump_stop_on_all_compute() - + return result # verify_epvn_l2_mode_control_node_switchover @@ -2191,20 +1998,17 @@ def verify_epvn_with_agent_restart(self, encap): assert vn1_vm2_fixture.verify_on_setup() assert vn1_vm1_fixture.wait_till_vm_is_up() assert vn1_vm2_fixture.wait_till_vm_is_up() - - # Bug 1374192: Removing all traffic test from this case. - # This test case will only veirfy L2 route after vrouter restart - # Will add new test case for L2 fallback - #for i in range(0, 20): - # vm2_ipv6 = vn1_vm2_fixture.get_vm_ipv6_addr_from_vm() - # if vm2_ipv6 is not None: - # break - #if vm2_ipv6 is None: - # self.logger.error('Not able to get VM link local address') - # return False - #self.logger.info( - # 'Checking the communication between 2 VM using ping6 to VM link local address from other VM') - #assert vn1_vm1_fixture.ping_to_ipv6(vm2_ipv6.split("/")[0]) + + #send l2 traffic and verify + self.mac1=vn1_vm1_fixture.mac_addr[vn1_fixture.vn_fq_name] + self.mac2=vn1_vm2_fixture.mac_addr[vn1_fixture.vn_fq_name] + filters = 'ether src %s' %(self.mac1) + tap_intf = vn1_vm2_fixture.tap_intf[vn1_fixture.vn_fq_name]['name'] + session,pcap = vn1_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(20) + self.send_l2_traffic(vn1_vm1_fixture,iface='eth0') + result = verify_tcpdump_count(self, session, pcap, exp_count=10,mac=self.mac2) self.logger.info('Will restart compute services now') for compute_ip in self.inputs.compute_ips: self.inputs.restart_service('contrail-vrouter', [compute_ip]) @@ -2213,28 +2017,25 @@ def verify_epvn_with_agent_restart(self, encap): 'Verifying L2 route and other VM verification after restart') assert vn1_vm1_fixture.verify_on_setup(force=True) assert vn1_vm2_fixture.verify_on_setup(force=True) - #for i in range(0, 20): - # vm2_ipv6 = vn1_vm2_fixture.get_vm_ipv6_addr_from_vm() - # if vm2_ipv6 is not None: - # break - #if vm2_ipv6 is None: - # self.logger.error('Not able to get VM link local address') - # return False - #self.logger.info( - # 'Checking the communication between 2 VM after vrouter restart') - #self.tcpdump_start_on_all_compute() - #assert vn1_vm1_fixture.ping_to_ipv6( - # vm2_ipv6.split("/")[0], count='15') - #comp_vm2_ip = vn1_vm2_fixture.vm_node_ip - #if len(set(self.inputs.compute_ips)) >= 2: - # self.tcpdump_analyze_on_compute(comp_vm2_ip, encap.upper()) - #self.tcpdump_stop_on_all_compute() + #send l2 traffic and verify + self.mac1=vn1_vm1_fixture.mac_addr[vn1_fixture.vn_fq_name] + self.mac2=vn1_vm2_fixture.mac_addr[vn1_fixture.vn_fq_name] + filters = 'ether src %s' %(self.mac1) + tap_intf = vn1_vm2_fixture.tap_intf[vn1_fixture.vn_fq_name]['name'] + session,pcap = vn1_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(20) + self.send_l2_traffic(vn1_vm1_fixture,iface='eth0') + result = verify_tcpdump_count(self, session, pcap, exp_count=10,mac=self.mac2) + self.logger.info('Checking the communication between 2 VM after vrouter restart') + assert vn1_vm1_fixture.ping_with_certainty(dst_vm_fixture=vn1_vm2_fixture, + vn_fq_name=vn1_fixture.vn_fq_name) return True # End test_epvn_with_agent_restart def verify_epvn_l2_mode(self, encap): - '''Restart the vrouter service and verify the impact on L2 route + '''verify the impact on L2 route with each encapsulation ''' # Setting up default encapsulation self.logger.info('Setting new Encap before continuing') @@ -2253,8 +2054,6 @@ def verify_epvn_l2_mode(self, encap): compute_1 = host_list[0] compute_2 = host_list[1] - vn1_vm1 = '1001::1/64' - vn1_vm2 = '1001::2/64' (self.vn3_name, self.vn3_subnets) = ("EVPN-MGMT-VN", ["33.1.1.0/24"]) (self.vn4_name, self.vn4_subnets) = ("EVPN-L2-VN", ["44.1.1.0/24"]) @@ -2264,7 +2063,7 @@ def verify_epvn_l2_mode(self, encap): connections=self.connections, inputs=self.inputs, vn_name=self.vn3_name, - subnets=self.vn3_subnets,)) + subnets=self.vn3_subnets)) vn4_fixture = self.useFixture( VNFixture( @@ -2273,7 +2072,8 @@ def verify_epvn_l2_mode(self, encap): inputs=self.inputs, vn_name=self.vn4_name, subnets=self.vn4_subnets, - enable_dhcp=False)) + enable_dhcp=False, + forwarding_mode='l2')) vn_l2_vm1_name = 'EVPN_VN_L2_VM1' vn_l2_vm2_name = 'EVPN_VN_L2_VM2' @@ -2298,7 +2098,6 @@ def verify_epvn_l2_mode(self, encap): image_name='ubuntu', vm_name=vn_l2_vm2_name, node_name=compute_2)) - assert vn3_fixture.verify_on_setup() assert vn4_fixture.verify_on_setup() assert vn_l2_vm1_fixture.verify_on_setup() @@ -2308,41 +2107,165 @@ def verify_epvn_l2_mode(self, encap): vn_l2_vm1_fixture.wait_till_vm_is_up() vn_l2_vm2_fixture.wait_till_vm_is_up() - # Bring the intreface up forcefully - self.bringup_interface_forcefully(vn_l2_vm1_fixture) - self.bringup_interface_forcefully(vn_l2_vm2_fixture) - - # Configured IPV6 address - cmd_to_pass1 = ['ifconfig eth1 inet6 add %s' % (vn1_vm1)] - vn_l2_vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) - cmd_to_pass2 = ['ifconfig eth1 inet6 add %s' % (vn1_vm2)] - vn_l2_vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) - - vm1_ipv6 = vn_l2_vm1_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global') - vm2_ipv6 = vn_l2_vm2_fixture.get_vm_ipv6_addr_from_vm( - intf='eth1', addr_type='global') + #send l2 traffic and verify + self.mac1=vn_l2_vm1_fixture.mac_addr[vn4_fixture.vn_fq_name] + self.mac2=vn_l2_vm2_fixture.mac_addr[vn4_fixture.vn_fq_name] + filters = 'ether src %s' %(self.mac1) + tap_intf = vn_l2_vm2_fixture.tap_intf[vn4_fixture.vn_fq_name]['name'] self.tcpdump_start_on_all_compute() - assert vn_l2_vm1_fixture.ping_to_ipv6( - vm2_ipv6.split("/")[0].strip(), count='15', other_opt='-I eth1') + session,pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(10) + self.send_l2_traffic(vn_l2_vm1_fixture,iface='eth1') + result = verify_tcpdump_count(self, session, pcap, exp_count=10,mac=self.mac2) + comp_vm2_ip = vn_l2_vm2_fixture.vm_node_ip if len(self.connections.nova_h.get_hosts()) >= 2: self.tcpdump_analyze_on_compute(comp_vm2_ip, encap.upper()) self.tcpdump_stop_on_all_compute() - #self.logger.info('Will restart compute services now') - # for compute_ip in self.inputs.compute_ips: - # self.inputs.restart_service('contrail-vrouter',[compute_ip]) - # sleep(10) - - # TODO - #assert vn1_vm1_fixture.verify_on_setup() - #assert vn1_vm2_fixture.verify_on_setup() - - #self.logger.info('Checking the communication between 2 VM after vrouter restart') - #assert vn_l2_vm1_fixture.ping_to_ipv6(vm2_ipv6.split("/")[0]) return True # End verify_epvn_l2_mode + + def verify_l2_only_and_l3_only_arp_resolution(self,encap): + + # Setting up default encapsulation + self.logger.info('Setting new Encap before continuing') + if (encap == 'gre'): + self.update_encap_priority('gre') + elif (encap == 'udp'): + self.update_encap_priority('udp') + elif (encap == 'vxlan'): + self.update_encap_priority('vxlan') + + result = True + host_list = self.connections.nova_h.get_hosts() + compute_1 = host_list[0] + compute_2 = host_list[0] + if len(host_list) > 1: + compute_1 = host_list[0] + compute_2 = host_list[1] + (self.vn3_name, self.vn3_subnets) = ("EVPN-MGMT-VN", ["33.1.1.0/24"]) + vn3_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=self.vn3_name, + subnets=self.vn3_subnets)) + + vn_l2_vm1_name = 'EVPN_VN_L2_VM1' + vn_l2_vm2_name = 'EVPN_VN_L2_VM2' + + (self.vn1_name, self.vn1_subnets) = ("EVPN-Test-VN1", ["55.1.1.0/24"]) + + self.vn1_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=self.vn1_name, + subnets=self.vn1_subnets)) + assert self.vn1_fixture.verify_on_setup() + vn_l2_vm1_fixture = self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_objs=[ + vn3_fixture.obj, + self.vn1_fixture.obj], + image_name='ubuntu-traffic', + vm_name=vn_l2_vm1_name, + node_name=compute_1)) + vn_l2_vm2_fixture = self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_objs=[ + vn3_fixture.obj, + self.vn1_fixture.obj], + image_name='ubuntu-traffic', + vm_name=vn_l2_vm2_name, + node_name=compute_2)) + + assert vn_l2_vm1_fixture.verify_on_setup() + assert vn_l2_vm2_fixture.verify_on_setup() + + # Wait till vm is up + assert vn_l2_vm1_fixture.wait_till_vm_is_up() + assert vn_l2_vm2_fixture.wait_till_vm_is_up() + self.bringup_interface_forcefully(vn_l2_vm1_fixture) + cmd = 'dhclient eth1' + vn_l2_vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True) + assert self.verify_eth1_ip_from_vm(vn_l2_vm1_fixture) + + self.bringup_interface_forcefully(vn_l2_vm2_fixture) + cmd = 'dhclient eth1' + vn_l2_vm2_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True) + assert self.verify_eth1_ip_from_vm(vn_l2_vm2_fixture) + self.logger.info( + "Changing vn1 forwarding mode from l2l3 to l2 only followed by calling verify_on_setup for vms which checks l2 routes and explicity check l3 routes are removed ") + self.vn1_fixture.add_forwarding_mode( + project_fq_name=self.inputs.project_fq_name, + vn_name=self.vn1_name, + forwarding_mode='l2') + assert self.vn1_fixture.verify_on_setup() + assert vn_l2_vm1_fixture.verify_on_setup() + assert vn_l2_vm2_fixture.verify_on_setup() + cmd = 'ip -s -s neigh flush all' + vn_l2_vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True) + mac1=vn_l2_vm1_fixture.mac_addr[self.vn1_fixture.vn_fq_name] + mac2=vn_l2_vm2_fixture.mac_addr[self.vn1_fixture.vn_fq_name] + self.logger.info('verify l2_only arp resolution') + filters = 'arp' + tap_intf = vn_l2_vm2_fixture.tap_intf[self.vn1_fixture.vn_fq_name]['name'] + session,pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(20) + + self.vn_l2_vm1_ip = vn_l2_vm1_fixture.vm_ip_dict[self.vn1_fixture.vn_fq_name][0] + self.vn_l2_vm2_ip = vn_l2_vm2_fixture.vm_ip_dict[self.vn1_fixture.vn_fq_name][0] + intf_name = vn_l2_vm1_fixture.get_vm_interface_name(mac1) + output,form_output = vn_l2_vm1_fixture.arping(self.vn_l2_vm2_ip,intf_name) + search_string = self.vn_l2_vm1_ip + status = search_in_pcap(session, pcap,search_string) + vn_l2_vm2_fixture.stop_tcpdump(session,pcap) + if status and mac2 in output: + self.logger.info('arp resolution was done by end vm') + else: + assert status,'arp got proxied which is not expected in L2 mode' + + #for bug-id 1513718 + #clear arps and do arping + #check vrouter proxy arp request or not + self.logger.info('Verifying l3_only arp resolution') + self.logger.info( + "Changing vn1 forwarding mode from l2l3 to l3 only followed by calling verify_on_setup ") + self.vn1_fixture.add_forwarding_mode( + project_fq_name=self.inputs.project_fq_name, + vn_name=self.vn1_name, + forwarding_mode='l3') + cmd = 'ip -s -s neigh flush all' + vn_l2_vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True) + filters = 'arp' + session,pcap = vn_l2_vm2_fixture.start_tcpdump(filters=filters,interface=tap_intf) + self.logger.info('waiting to get tcpdump started') + sleep(20) + intf_name = vn_l2_vm1_fixture.get_vm_interface_name(mac1) + output,form_output = vn_l2_vm1_fixture.arping(self.vn_l2_vm2_ip,intf_name) + vn_l2_vm2_fixture.stop_tcpdump(session, pcap) + search_string = self.vn_l2_vm1_ip + status = search_in_pcap(session, pcap,search_string) + vn_l2_vm2_fixture.stop_tcpdump(session,pcap) + if not status and ('00:00:5e:00:01:00' in output): + self.logger.info('arp proxied by vrouter in L3 mode') + else: + assert status,'arp not proxied by vrouter' + + return result + #end verify_l2_l3_and_l3_only_arp_resolution + + def bringup_interface_forcefully(self, vm_fixture, intf='eth1'): cmd = 'ifconfig %s up'%(intf) @@ -2386,12 +2309,12 @@ def tcpdump_start_on_all_compute(self): pcap1 = '/tmp/encap-udp.pcap' pcap2 = '/tmp/encap-gre.pcap' pcap3 = '/tmp/encap-vxlan.pcap' - cmd1 = 'tcpdump -ni %s udp port 51234 and less 170 -w %s -s 0' % ( + cmd1 = 'tcpdump -ni %s -U udp port 51234 and less 170 and ether[100:4]==0x5a5a5a5a -w %s -s 0' % ( comp_intf, pcap1) cmd_udp = "nohup " + cmd1 + " >& /dev/null < /dev/null &" - cmd2 = 'tcpdump -ni %s proto 47 -w %s -s 0' % (comp_intf, pcap2) + cmd2 = 'tcpdump -ni %s -U proto 47 and ether[100:4]==0x5a5a5a5a -w %s -s 0' % (comp_intf, pcap2) cmd_gre = "nohup " + cmd2 + " >& /dev/null < /dev/null &" - cmd3 = 'tcpdump -ni %s dst port 4789 -w %s -s 0' % ( + cmd3 = 'tcpdump -ni %s -U dst port 4789 and ether[100:4]==0x5a5a5a5a -w %s -s 0' % ( comp_intf, pcap3) cmd_vxlan = "nohup " + cmd3 + " >& /dev/null < /dev/null &" @@ -2542,5 +2465,27 @@ def tcpdump_analyze_on_compute( return True - # return True # end tcpdump_analyze_on_compute + + def send_l3_traffic(self,vm1_fixture): + python_code = Template(''' +from scapy.all import * +payload = 'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ' +a=IP(src='$src_ip',dst='$dst_ip')/payload +send(a, count=10) + ''') + python_code = python_code.substitute(src_ip=self.vn_l2_vm1_ip, dst_ip=self.vn_l2_vm2_ip) + return vm1_fixture.run_python_code(python_code) + #end send_l3_traffic + def send_l2_traffic(self,vm1_fixture,iface): + + python_code = Template(''' +from scapy.all import * +payload = 'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ' +a=Ether(src='$mac1',dst='$mac2')/payload +sendp(a, count=10, inter=0, iface='$iface') + ''') + python_code = python_code.substitute(mac1=self.mac1,mac2=self.mac2,iface=iface) + return vm1_fixture.run_python_code(python_code) + #end send_l2_traffic + diff --git a/serial_scripts/floatingip/base.py b/serial_scripts/floatingip/base.py index 8c7cb04dd..1f1b87985 100644 --- a/serial_scripts/floatingip/base.py +++ b/serial_scripts/floatingip/base.py @@ -1,46 +1,36 @@ -import test +import test_v1 from common import isolated_creds, create_public_vn from vn_test import * from vm_test import * import fixtures -class FloatingIpBaseTest(test.BaseTestCase): +class FloatingIpBaseTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(FloatingIpBaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.admin_inputs = cls.isolated_creds.get_admin_inputs() - cls.admin_connections = cls.isolated_creds.get_admin_connections() cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib cls.agent_inspect = cls.connections.agent_inspect cls.cn_inspect = cls.connections.cn_inspect cls.analytics_obj = cls.connections.analytics_obj + if cls.inputs.admin_username: + public_creds = cls.admin_isolated_creds + else: + public_creds = cls.isolated_creds cls.public_vn_obj = create_public_vn.PublicVn( - cls.__name__, - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) + public_creds, + cls.inputs, + ini_file=cls.ini_file, + logger=cls.logger) cls.public_vn_obj.configure_control_nodes() # end setUpClass @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_tenant() super(FloatingIpBaseTest, cls).tearDownClass() # end tearDownClass diff --git a/serial_scripts/floatingip/test_floatingip.py b/serial_scripts/floatingip/test_floatingip.py index d56b36497..5aa715783 100644 --- a/serial_scripts/floatingip/test_floatingip.py +++ b/serial_scripts/floatingip/test_floatingip.py @@ -32,7 +32,7 @@ from fabric.api import run import base import test - +from compute_node_test import * class FloatingipTestSanity_restart(base.FloatingIpBaseTest): @@ -386,14 +386,24 @@ def test_traffic_with_control_node_switchover(self): # Verify Ingress Traffic self.logger.info('Verifying Ingress Flow Record') vn_fq_name=vn1_vm1_traffic_fixture.vn_fq_name - flow_rec1 = inspect_h1.get_vna_fetchflowrecord( - nh=vn1_vm1_traffic_fixture.tap_intf[vn_fq_name]['flow_key_idx'], - sip=vn1_vm1_traffic_fixture.vm_ip, - dip=fvn1_vm1_traffic_fixture.vm_ip, - sport='0', - dport='0', - protocol='1') - + compute_node_fixture = self.useFixture(ComputeNodeFixture( + self.connections, vn1_vm1_traffic_fixture.vm_node_ip)) + fwd_flow, rev_flow = compute_node_fixture.get_flow_entry( + source_ip = vn1_vm1_traffic_fixture.vm_ip, + dest_ip = fvn1_vm1_traffic_fixture.vm_ip, + dest_port = '0', + proto = '1') + if fwd_flow: + sport = fwd_flow.source_port + flow_rec1 = inspect_h1.get_vna_fetchflowrecord( + nh=vn1_vm1_traffic_fixture.tap_intf[vn_fq_name]['flow_key_idx'], + sip=vn1_vm1_traffic_fixture.vm_ip, + dip=fvn1_vm1_traffic_fixture.vm_ip, + sport=sport, + dport='0', + protocol='1') + else: + flow_rec1 = None if flow_rec1 is not None: self.logger.info('Verifying NAT in flow records') match = inspect_h1.match_item_in_flowrecord( @@ -420,14 +430,16 @@ def test_traffic_with_control_node_switchover(self): # Check VMs are in same agent or not. Need to compute source vrf # accordingly self.logger.info('Verifying Egress Flow Records') - flow_rec2 = inspect_h1.get_vna_fetchflowrecord( - nh=vn1_vm1_traffic_fixture.tap_intf[vn_fq_name]['flow_key_idx'], - sip=fvn1_vm1_traffic_fixture.vm_ip, - dip=fip_fixture1.fip[fip_id1], - sport='0', - dport='0', - protocol='1') - + if fwd_flow: + flow_rec2 = inspect_h1.get_vna_fetchflowrecord( + nh=vn1_vm1_traffic_fixture.tap_intf[vn_fq_name]['flow_key_idx'], + sip=fvn1_vm1_traffic_fixture.vm_ip, + dip=fip_fixture1.fip[fip_id1], + sport=sport, + dport='0', + protocol='1') + else: + flow_rec2 = False if flow_rec2 is not None: self.logger.info('Verifying NAT in flow records') match = inspect_h1.match_item_in_flowrecord( diff --git a/serial_scripts/flow_tests/ReleaseToFlowSetupRateMapping.py b/serial_scripts/flow_tests/ReleaseToFlowSetupRateMapping.py deleted file mode 100644 index 89fe9e605..000000000 --- a/serial_scripts/flow_tests/ReleaseToFlowSetupRateMapping.py +++ /dev/null @@ -1,7 +0,0 @@ -# Here the rate is set for Policy flows, local to a compute, which is -# lesser than policy flows across computes -expected_flow_setup_rate = {} -expected_flow_setup_rate['policy'] = { - '1.04': 6000, '1.05': 9000, '1.06': 10000, '1.10': 10000} -expected_flow_setup_rate['nat'] = {'1.04': 4200, - '1.05': 6300, '1.06': 7500, '1.10': 7500} diff --git a/serial_scripts/flow_tests/base.py b/serial_scripts/flow_tests/base.py deleted file mode 100644 index 9b36331a2..000000000 --- a/serial_scripts/flow_tests/base.py +++ /dev/null @@ -1,39 +0,0 @@ -import test -from common import isolated_creds - - -class BaseFlowTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BaseFlowTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.admin_inputs = cls.isolated_creds.get_admin_inputs() - cls.admin_connections = cls.isolated_creds.get_admin_connections() - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.api_s_inspect = cls.connections.api_server_inspect - cls.analytics_obj=cls.connections.analytics_obj - # end setUpClass - - @classmethod - def tearDownClass(cls): - cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() - super(BaseFlowTest, cls).tearDownClass() - # end tearDownClass - -#end BaseFlowTest class - diff --git a/serial_scripts/flow_tests/flow_test_topo.py b/serial_scripts/flow_tests/flow_test_topo.py deleted file mode 100755 index 1e6cdb26f..000000000 --- a/serial_scripts/flow_tests/flow_test_topo.py +++ /dev/null @@ -1,178 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - - -class systest_topo_single_project (): - - def __init__(self, compute_node_list=None, domain='default-domain', project=None, username=None, password=None): - self.project_list = ['project2'] - - # Define the vm to compute node mapping to pin a vm to a particular - # compute node or else leave empty. - #self.vm_node_map = {} - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', 'vmc8': 'CN0', 'vmc9': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', 'vmd10': 'CN1', 'vmd11': 'CN1'} - elif len(compute_node_list) > 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', - 'vmc8': 'CN2', 'vmc9': 'CN2', 'vmd10': 'CN2', 'vmd11': 'CN2'} - - # Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - # - # Define traffic profile. - self.traffic_profile = { - 'TrafficProfile1': {'src_vm': 'vmc1', 'dst_vm': 'vmc2', 'num_flows': 100000, 'num_pkts': 900000}, # Intra VN,Intra Node - # Intra VN,Inter Node - 'TrafficProfile2': {'src_vm': 'vmc4', 'dst_vm': 'vmc7', 'num_flows': 100000, 'num_pkts': 900000}, - # Inter VN,Intra Node,Pol - 'TrafficProfile3': {'src_vm': 'vmc3', 'dst_vm': 'vmc4', 'num_flows': 100000, 'num_pkts': 900000}, - # Inter VN,Inter Node,Pol - 'TrafficProfile4': {'src_vm': 'vmc3', 'dst_vm': 'vmc7', 'num_flows': 100000, 'num_pkts': 900000}, - # Inter VN,Intra Node,FIP - 'TrafficProfile5': {'src_vm': 'vmc5', 'dst_vm': 'vmc6', 'num_flows': 100000, 'num_pkts': 900000}, - 'TrafficProfile6': {'src_vm': 'vmc8', 'dst_vm': 'vmc5', 'num_flows': 100000, 'num_pkts': 900000}} # Inter VN,Inter Node,FIP - - #self.traffic_profile = { - # 'TrafficProfile1': {'src_vm': 'vmc1', 'dst_vm': 'vmc2', 'num_flows': 9000, 'num_pkts': 90000}, # Intra VN,Intra Node - # 'TrafficProfile6': {'src_vm': 'vmc8', 'dst_vm': 'vmc5', 'num_flows': 9000, 'num_pkts': 90000}} # Inter VN,Inter Node,FIP - # - # A master list of all the vm static routes defined. - self.vm_static_route_master = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', 'vmc3': '111.3.1.0/28', - 'vmc4': '111.4.1.0/28', 'vmc5': '111.5.1.0/28', 'vmc7': '111.7.1.0/28', 'vmc8': '111.8.1.0/28'} - self.vm_static_route_test = {} - - # - # Define FIP pool - self.fip_pools = {'project2': { - 'p1-vn3-pool1': {'host_vn': 'vnet3', 'target_projects': ['project2']}, - 'p1-vn4-pool2': {'host_vn': 'vnet4', 'target_projects': ['project2']}, - 'p1-vn5-pool3': {'host_vn': 'vnet5', 'target_projects': ['project2']}, - } - } - #self.fvn_vm_map = {'vnet3':['vmc6', 'vmc8'], 'vnet4':['vmc5'], 'vnet5':['vmc5']} - self.fvn_vm_map = {'project2': { - 'vnet3': {'project2': ['vmc6', 'vmc8']}, - 'vnet4': {'project2': ['vmc5']}, - 'vnet5': {'project2': ['vmc5']}, - } - } - # end __init__ - - def build_topo_project2(self, domain='default-domain', project='project2', username='juniper', password='juniper123'): - # - # Topo for project: project2 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1', 'vnet2', 'vnet3', 'vnet4', 'vnet5', 'vnet6'] - # - # Define network info for each VN: - self.vn_nets = { - 'vnet1': ['10.1.1.0/30', '10.1.1.4/30', '10.1.1.8/30'], - 'vnet2': ['10.2.1.0/30', '10.2.1.4/30'], 'vnet3': ['10.3.1.0/30', '10.3.1.4/30', '10.3.1.8/30'], - 'vnet4': ['10.4.1.0/30', '10.4.1.4/30', '10.5.1.8/30'], - 'vnet5': ['10.5.1.0/30', '10.5.1.4/30', '10.5.1.8/30'], 'vnet6': ['10.6.1.0/30', '10.6.1.4/30']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'ipam1', 'vnet2': 'ipam2', 'vnet3': - 'ipam3', 'vnet4': 'ipam4', 'vnet5': 'ipam5', 'vnet6': 'ipam6'} - # - # Define network policies - self.policy_list = ['policy1'] - self.vn_policy = {'vnet1': ['policy1'], 'vnet2': ['policy1'], - 'vnet3': [], 'vnet4': [], 'vnet5': [], 'vnet6': []} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = { - 'vmc1': 'vnet1', 'vmc2': 'vnet1', 'vmc3': 'vnet1', 'vmc4': 'vnet2', 'vmc5': 'vnet3', - 'vmc6': 'vnet4', 'vmc7': 'vnet2', 'vmc8': 'vnet5', 'vmc9': 'vnet4', 'vmd10': 'vnet6'} - # - # Define static route behind vms. - self.vm_static_route = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', 'vmc3': '111.3.1.0/28', - 'vmc4': '111.4.1.0/28', 'vmc5': '111.5.1.0/28', 'vmc7': '111.7.1.0/28', 'vmc8': '111.8.1.0/28'} - self.vm_static_route_test.update(self.vm_static_route) - - # Not defining service instances for flow rate testing - ## Define Service template & instances - #self.st_list = ['st_trans_left', 'st_inNet_left'] - #self.si_list = ['si-mirror-1', 'si-mirror-2'] - - # - # Define network policy rules - self.rules = {} - self.rules['policy1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet2', 'source_network': - 'default-domain:project2:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}, - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet1', 'source_network': - 'default-domain:project2:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - - #ST and SI topology - self.st_params = {} - self.si_params = {} - - # Define security_group name - self.sg_list = ['test_sg_p1'] - # - # Define security_group with vm - self.sg_of_vm = { - 'vmc1': ['test_sg_p1'], 'vmc2': ['test_sg_p1'], 'vmc3': ['test_sg_p1'], 'vmc4': ['test_sg_p1'], 'vmc5': ['test_sg_p1'], - 'vmc6': ['test_sg_p1'], 'vmc7': ['test_sg_p1'], 'vmc8': ['test_sg_p1'], 'vmc9': ['test_sg_p1'], 'vmd10': ['test_sg_p1']} - # Define the security_group rules - import uuid - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - uuid_3 = uuid.uuid1().urn.split(':')[2] - self.sg_rules = {} - self.sg_rules['test_sg_p1'] = [ - {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_1, - 'dst_addresses': [{'security_group': 'local'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - }, {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_2, - 'src_addresses': [{'security_group': 'local'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - }, {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_3, - 'src_addresses': [{'security_group': 'default-domain:project2:test_sg_p1'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'security_group': 'local'}]}] - - return self - # end build_topo_project2 - -# end sdn_flow_test_topo_single_project diff --git a/serial_scripts/flow_tests/flow_test_utils.py b/serial_scripts/flow_tests/flow_test_utils.py deleted file mode 100644 index 23b9b07dc..000000000 --- a/serial_scripts/flow_tests/flow_test_utils.py +++ /dev/null @@ -1,162 +0,0 @@ -from time import sleep - -from common.servicechain.config import ConfigSvcChain -from tcutils.commands import ssh, execute_cmd, execute_cmd_out - - -class VerifySvcMirror(ConfigSvcChain): - - def start_tcpdump(self, session, tap_intf): - pcap = '/tmp/mirror-%s.pcap' % tap_intf - cmd = "tcpdump -ni %s udp port 8099 -w %s" % (tap_intf, pcap) - self.logger.info("Staring tcpdump to capture the mirrored packets.") - execute_cmd(session, cmd, self.logger) - return pcap - - def stop_tcpdump(self, session, pcap): - self.logger.info("Waiting for the tcpdump write to complete.") - sleep(30) - cmd = 'kill $(pidof tcpdump)' - execute_cmd(session, cmd, self.logger) - cmd = 'tcpdump -r %s | wc -l' % pcap - out, err = execute_cmd_out(session, cmd, self.logger) - count = int(out.strip('\n')) - cmd = 'rm -f %s' % pcap - execute_cmd(session, cmd, self.logger) - return count - - def tcpdump_on_analyzer(self, si_prefix): - sessions = {} - svm_name = si_prefix + '_1' - host = self.get_svm_compute(svm_name) - tapintf = self.get_svm_tapintf(svm_name) - session = ssh(host['host_ip'], host['username'], host['password']) - pcap = self.start_tcpdump(session, tapintf) - sessions.update({svm_name: (session, pcap)}) - - return sessions - - def verify_mirror(self, svm_name, session, pcap): - mirror_pkt_count = self.stop_tcpdump(session, pcap) - errmsg = "Packets not mirrored to the analyzer VM %s," % (svm_name) - if mirror_pkt_count == 0: - self.logger.error(errmsg) - return [False, errmsg] - self.logger.info("%s packets are mirrored to the analyzer " - "service VM '%s'", mirror_pkt_count, svm_name) - - return [True, None] - - -def get_flow_data(config_topo, src_vm_name, dst_vm_name, proto, src_proj, dst_proj): - '''Flows can be of following types: - i. intra-VN, intra-Node - ii. intra-VN, inter-Node - iii. inter-VN, intra-Node, by policy - iv. inter-VN, inter-node, by policy - v. inter-VN, intra-Node, by FIP - vi. inter-VN, inter-Node, by FIP - Need to consider these in preparing flow data... - Fields to be used for populating flow data: - src_ip, dst_ip, protocol, src_vrf, dst_vrf [can be same or different based on node] - if src/dst in different vn, need 2 flows with different src_vrf. - if src/dst are connected by FIP, need 2 flows with different pair of IPs.. - fwd_flow: src_ip->dst_ip, reverse_flow: fip_ip->src_ip - Sample: - 236<=>521432 10.3.1.2:1809 10.4.1.6:9100 17 (3->2) - (K(nh):31, Action:N(S), S(nh):31, Statistics:3311/423808) - - 260<=>500380 10.4.1.6:9100 10.5.1.10:3306 17 (2->3) - (K(nh):19, Action:N(D), S(nh):19, Statistics:0/0) - - 276 10.4.1.6:9100 10.5.1.10:4149 17 (2) - (K(nh):19, Action:D(FlowLim), S(nh):19, Statistics:0/0) - ''' - proto_map = {'icmp': 1, 1: 1, 'udp': 17, 17: 17, 'tcp': 6, 6: 6} - src_vm_fixture = config_topo[src_proj]['vm'][src_vm_name] - dst_vm_fixture = config_topo[dst_proj]['vm'][dst_vm_name] - src_vm_vn_name = src_vm_fixture.vn_names[0] - dst_vm_vn_name = dst_vm_fixture.vn_names[0] - src_vm_vn_fixt = config_topo[src_proj]['vn'][src_vm_vn_name] - dst_vm_vn_fixt = config_topo[dst_proj]['vn'][dst_vm_vn_name] - src_vm_node_ip = src_vm_fixture.vm_ip - dst_vm_node_ip = dst_vm_fixture.vm_ip - src_vrf = src_vm_fixture.get_vrf_id( - src_vm_vn_fixt.vn_fq_name, - src_vm_vn_fixt.vrf_name) - if src_vm_fixture.vm_node_ip == dst_vm_fixture.vm_node_ip: - dst_vrf = src_vm_fixture.get_vrf_id( - dst_vm_vn_fixt.vn_fq_name, - dst_vm_vn_fixt.vrf_name) - else: - dst_vrf = dst_vm_fixture.get_vrf_id( - dst_vm_vn_fixt.vn_fq_name, - dst_vm_vn_fixt.vrf_name) - fip_flow = False - src_vm_in_dst_vn_fip = src_vm_fixture.chk_vmi_for_fip( - dst_vm_vn_fixt.vn_fq_name) - if src_vm_in_dst_vn_fip is not None: - fip_flow = True - if fip_flow: - # For FIP case, always get dst_vrf from src node - dst_vrf = src_vm_fixture.get_vrf_id( - dst_vm_vn_fixt.vn_fq_name, - dst_vm_vn_fixt.vrf_name) - # inter-VN, connected by fip scenario, vrf trnslation happens only in this case - f_flow = {'src_ip': src_vm_node_ip, 'dst_ip': dst_vm_node_ip, - 'proto': proto_map[proto], 'vrf': src_vrf} - r_flow = {'src_ip': dst_vm_node_ip, 'dst_ip': src_vm_in_dst_vn_fip, - 'proto': proto_map[proto], 'vrf': dst_vrf} - return [f_flow, r_flow] - else: - # intra-VN scenario - if src_vm_vn_name == dst_vm_vn_name: - f_flow = {'src_ip': src_vm_node_ip, 'dst_ip': dst_vm_node_ip, - 'proto': proto_map[proto], 'vrf': src_vrf} - return [f_flow] - else: - # inter-VN, connected by policy scenario - f_flow = {'src_ip': src_vm_node_ip, 'dst_ip': dst_vm_node_ip, - 'proto': proto_map[proto], 'vrf': src_vrf} - r_flow = {'src_ip': dst_vm_node_ip, 'dst_ip': src_vm_node_ip, - 'proto': proto_map[proto], 'vrf': dst_vrf} - return [f_flow, r_flow] -# end get_flow_data - -def vm_vrouter_flow_count(self): - cmd = 'flow -l | grep Action | grep -E "F|N" | wc -l ' - result = '' - output = self.inputs.run_cmd_on_server( - self.vm_node_ip, cmd, self.inputs.host_data[ - self.vm_node_ip]['username'], self.inputs.host_data[ - self.vm_node_ip]['password']) - for s in output: - if s.isdigit(): - result = result + s - - return int(result) -# end vm_vrouter_flow_count - -def get_max_flow_removal_time(generated_flows, flow_cache_timeout): - '''Based on total flows in the node & flow_cache_timeout''' - max_stats_pass_interval = 1000 - num_entries_inspected_per_stats_pass = (max_stats_pass_interval*generated_flows )/(1000*flow_cache_timeout) - num_passes_needed_for_total_flows = generated_flows/num_entries_inspected_per_stats_pass - time_to_complete_all_passes = num_passes_needed_for_total_flows*max_stats_pass_interval - flow_removal_time_in_secs = time_to_complete_all_passes/1000 - return flow_removal_time_in_secs -# end get_max_flow_removal_time - -def update_vm_mdata_ip(compute_node, self): - '''Once vrouter service is restarted in compute_node, update VM metadata IPs''' - if 'project_list' in dir(self.topo): - self.projectList = self.topo.project_list - else: - self.projectList = [self.inputs.project_name] - for project in self.projectList: - vm_fixtures = self.config_topo[project]['vm'] - for name,vm_fixt in vm_fixtures.items(): - if vm_fixt.vm_node_data_ip == compute_node: - vm_fixt.wait_till_vm_is_up() - # end for vm fixture - # end for project diff --git a/serial_scripts/flow_tests/mini_flow_test_topo.py b/serial_scripts/flow_tests/mini_flow_test_topo.py deleted file mode 100755 index c66835518..000000000 --- a/serial_scripts/flow_tests/mini_flow_test_topo.py +++ /dev/null @@ -1,141 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - - -class systest_topo_single_project (): - - def __init__(self, compute_node_list=None, domain='default-domain', project=None, username=None, password=None): - self.project_list = ['project2'] - - # Define the vm to compute node mapping to pin a vm to a particular - # compute node or else leave empty. - #self.vm_node_map = {} - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', - } - - # Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - # - # Define traffic profile. - self.traffic_profile = {'TrafficProfile1': {'src_vm': 'vmc1', 'dst_vm': 'vmc2', 'num_flows': 100000, 'num_pkts': 900000}} - # - # A master list of all the vm static routes defined. - self.vm_static_route_master = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28' - } - self.vm_static_route_test = {} - - # - # Define FIP pool - self.fip_pools = {} - self.fvn_vm_map = {} - # end __init__ - - def build_topo_project2(self, domain='default-domain', project='project2', username='juniper', password='juniper123'): - # - # Topo for project: project2 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1'] - # - # Define network info for each VN: - self.vn_nets = { - 'vnet1': ['10.1.1.0/30', '10.1.1.4/30', '10.1.1.8/30']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'ipam1'} - # - # Define network policies - #self.policy_list = ['policy1', 'policy-si-1', 'policy-si-2'] - #self.vn_policy = {'vnet1': ['policy1', 'policy-si-1'] - # } - self.policy_list = ['policy1'] - self.vn_policy = {'vnet1': ['policy1']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = { - 'vmc1': 'vnet1', 'vmc2': 'vnet1' - } - # - # Define static route behind vms. - self.vm_static_route = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28'} - self.vm_static_route_test.update(self.vm_static_route) - - ## Define Service template & instances - #self.st_list = ['st_trans_left', 'st_inNet_left'] - #self.si_list = ['si-mirror-1', 'si-mirror-2'] - - # - # Define network policy rules - self.rules = {} - self.rules['policy1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet1', - 'source_network': 'local', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - - #ST and SI topology - self.st_params = {} - self.si_params = {} - - # Define security_group name - self.sg_list = ['test_sg_p1'] - # - # Define security_group with vm - self.sg_of_vm = { - 'vmc1': ['test_sg_p1'], 'vmc2': ['test_sg_p1']} - # Define the security_group rules - import uuid - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - uuid_3 = uuid.uuid1().urn.split(':')[2] - self.sg_rules = {} - self.sg_rules['test_sg_p1'] = [ - {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_1, - 'dst_addresses': [{'security_group': 'local'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - }, {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_2, - 'src_addresses': [{'security_group': 'local'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - }, {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_3, - 'src_addresses': [{'security_group': 'default-domain:project2:test_sg_p1'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'security_group': 'local'}]}] - - return self - # end build_topo_project2 - -# end sdn_flow_test_topo_single_project diff --git a/serial_scripts/flow_tests/mini_system_test_topo.py b/serial_scripts/flow_tests/mini_system_test_topo.py deleted file mode 100755 index b4acae12c..000000000 --- a/serial_scripts/flow_tests/mini_system_test_topo.py +++ /dev/null @@ -1,151 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - - -class systest_topo_single_project (): - - def __init__(self, compute_node_list=None, domain='default-domain', project=None, username=None, password=None): - self.project_list = ['project2'] - - # Define the vm to compute node mapping to pin a vm to a particular - # compute node or else leave empty. - #self.vm_node_map = {} - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', - } - - # Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - # - # Define traffic profile. - self.traffic_profile = {'TrafficProfile1': {'src_vm': 'vmc1', 'dst_vm': 'vmc2', 'num_flows': 9000, 'num_pkts': 90000}} - # - # A master list of all the vm static routes defined. - self.vm_static_route_master = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28' - } - self.vm_static_route_test = {} - - # - # Define FIP pool - self.fip_pools = {} - self.fvn_vm_map = {} - # end __init__ - - def build_topo_project2(self, domain='default-domain', project='project2', username='juniper', password='juniper123'): - # - # Topo for project: project2 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1'] - # - # Define network info for each VN: - self.vn_nets = { - 'vnet1': ['10.1.1.0/30', '10.1.1.4/30', '10.1.1.8/30']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'ipam1'} - # - # Define network policies - self.policy_list = ['policy1', 'policy-si-1', 'policy-si-2'] - self.vn_policy = {'vnet1': ['policy1', 'policy-si-1'] - } - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = { - 'vmc1': 'vnet1', 'vmc2': 'vnet1' - } - # - # Define static route behind vms. - self.vm_static_route = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28'} - self.vm_static_route_test.update(self.vm_static_route) - - ## Define Service template & instances - self.st_list = ['st_trans_left', 'st_inNet_left'] - self.si_list = ['si-mirror-1', 'si-mirror-2'] - - # - # Define network policy rules - self.rules = {} - self.rules['policy1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet1', 'source_network': 'local', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules['policy-si-1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any', 'action_list': {'simple_action':'pass', 'mirror_to': {'analyzer_name' : ':'.join([self.domain,self.project,self.si_list[0]])}}}] - self.rules['policy-si-2'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any', 'action_list': {'simple_action':'pass', 'mirror_to': {'analyzer_name' : ':'.join([self.domain,self.project,self.si_list[1]])}}}] - - #ST and SI topology - self.st_params = {} - self.si_params = {} - - self.st_params[self.st_list[0]]={'svc_img_name': 'analyzer', 'svc_type':'analyzer', 'if_list':[['left', False, False]], 'svc_mode':'transparent', 'svc_scaling':False, 'flavor':'m1.medium', 'ordered_interfaces': True} - self.st_params[self.st_list[1]] = {'svc_img_name': 'analyzer', 'svc_type':'analyzer', 'if_list':[['left', False, False]], 'svc_mode':'in-network', 'svc_scaling':False, 'flavor':'m1.medium', 'ordered_interfaces': True} - - self.si_params[self.si_list[0]] = {'svc_template':self.st_list[0], 'if_list':self.st_params[self.st_list[0]]['if_list'], 'left_vn':None} - self.si_params[self.si_list[1]] = {'svc_template':self.st_list[1], 'if_list':self.st_params[self.st_list[1]]['if_list'], 'left_vn':None} - - self.pol_si= {self.policy_list[1]:self.si_list[0], self.policy_list[2]:self.si_list[1]} - self.si_pol = {self.si_list[0]:self.policy_list[1], self.si_list[1]:self.policy_list[2]} - - # Define security_group name - self.sg_list = ['test_sg_p1'] - # - # Define security_group with vm - self.sg_of_vm = { - 'vmc1': ['test_sg_p1'], 'vmc2': ['test_sg_p1']} - # Define the security_group rules - import uuid - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - uuid_3 = uuid.uuid1().urn.split(':')[2] - self.sg_rules = {} - self.sg_rules['test_sg_p1'] = [ - {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_1, - 'dst_addresses': [{'security_group': 'local'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - }, {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_2, - 'src_addresses': [{'security_group': 'local'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - }, {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_3, - 'src_addresses': [{'security_group': 'default-domain:project2:test_sg_p1'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'security_group': 'local'}]}] - - return self - # end build_topo_project2 - -# end sdn_flow_test_topo_single_project diff --git a/serial_scripts/flow_tests/sdn_flow_test_topo.py b/serial_scripts/flow_tests/sdn_flow_test_topo.py deleted file mode 100755 index aeb5514c4..000000000 --- a/serial_scripts/flow_tests/sdn_flow_test_topo.py +++ /dev/null @@ -1,259 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - - -class sdn_flow_test_topo_single_project (): - - def __init__(self, domain='default-domain', compute_node_list=None): - self.project_list = ['project1'] - - # Define the vm to compute node mapping to pin a vm to a particular - # compute node or else leave empty. - #self.vm_node_map = {} - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', 'vmc8': 'CN0', 'vmc9': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', 'vmd10': 'CN1', 'vmd11': 'CN1'} - elif len(compute_node_list) > 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', - 'vmc8': 'CN2', 'vmc9': 'CN2', 'vmd10': 'CN2', 'vmd11': 'CN2'} - - # Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - # - # Define traffic profile. - self.traffic_profile = {'TrafficProfile1': {'src_vm': 'vmc1', 'dst_vm': 'vmc2', 'num_flows': 300000, 'num_pkts': 2000000}, # Intra VN,Intra Node - # Intra VN,Inter Node - 'TrafficProfile2': {'src_vm': 'vmc4', 'dst_vm': 'vmc7', 'num_flows': 300000, 'num_pkts': 2000000}, - # Inter VN,Intra Node,Pol - 'TrafficProfile3': {'src_vm': 'vmc3', 'dst_vm': 'vmc4', 'num_flows': 300000, 'num_pkts': 2000000}, - # Inter VN,Inter Node,Pol - 'TrafficProfile4': {'src_vm': 'vmc3', 'dst_vm': 'vmc7', 'num_flows': 300000, 'num_pkts': 2000000}, - # Inter VN,Intra Node,FIP - 'TrafficProfile5': {'src_vm': 'vmc5', 'dst_vm': 'vmc6', 'num_flows': 100000, 'num_pkts': 2000000}, - 'TrafficProfile6': {'src_vm': 'vmc8', 'dst_vm': 'vmc5', 'num_flows': 100000, 'num_pkts': 2000000}} # Inter VN,Inter Node,FIP - # - # A master list of all the vm static routes defined. - self.vm_static_route_master = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', 'vmc3': '111.3.1.0/28', - 'vmc4': '111.4.1.0/28', 'vmc5': '111.5.1.0/28', 'vmc7': '111.7.1.0/28', 'vmc8': '111.8.1.0/28'} - self.vm_static_route_test = {} - - # end __init__ - - def build_topo_project1(self, domain='default-domain', project='project1', username='juniper', password='juniper123'): - # - # Topo for project: project1 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1', 'vnet2', 'vnet3', 'vnet4', 'vnet5', 'vnet6'] - # - # Define network info for each VN: - self.vn_nets = { - 'vnet1': ['10.1.1.0/24'], 'vnet2': ['10.2.1.0/24'], 'vnet3': ['10.3.1.0/24'], - 'vnet4': ['10.4.1.0/24'], 'vnet5': ['10.5.1.0/24'], 'vnet6': ['10.6.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'ipam1', 'vnet2': 'ipam2', 'vnet3': - 'ipam3', 'vnet4': 'ipam4', 'vnet5': 'ipam5', 'vnet6': 'ipam6'} - # - # Define network policies - self.policy_list = ['policy1', 'policy2'] - self.vn_policy = {'vnet1': ['policy1'], 'vnet2': ['policy1'], - 'vnet3': [], 'vnet4': ['policy2'], 'vnet5': [], 'vnet6': []} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = { - 'vmc1': 'vnet1', 'vmc2': 'vnet1', 'vmc3': 'vnet1', 'vmc4': 'vnet2', 'vmc5': 'vnet3', - 'vmc6': 'vnet4', 'vmc7': 'vnet2', 'vmc8': 'vnet5', 'vmc9': 'vnet4', 'vmd10': 'vnet6'} - # - # Define static route behind vms. - self.vm_static_route = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', 'vmc3': '111.3.1.0/28', - 'vmc4': '111.4.1.0/28', 'vmc5': '111.5.1.0/28', 'vmc7': '111.7.1.0/28', 'vmc8': '111.8.1.0/28'} - self.vm_static_route_test.update(self.vm_static_route) - - # - # Define network policy rules - self.rules = {} - self.rules['policy1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project1:vnet2', 'source_network': - 'default-domain:project1:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}, - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project1:vnet1', 'source_network': 'default-domain:project1:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules[ - 'policy2'] = [{'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet7', - 'source_network': 'default-domain:project1:vnet4', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - - # - # Define VN to VM mappings for each of the floating ip pools to be - # created. - self.fvn_vm_map = {'vnet3': ['vmc6', 'vmc8'], - 'vnet4': ['vmc5'], 'vnet5': ['vmc5']} - - return self - # end build_topo_project1 - -# end sdn_flow_test_topo_single_project - - -class sdn_flow_test_topo_multi_project (): - - def __init__(self, domain='default-domain', compute_node_list=None): - self.project_list = ['project1', 'project2'] - - # Define the vm to compute node mapping to pin a vm to a particular - # compute node or else leave empty. - #self.vm_node_map = {} - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', 'vmd12': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', 'vmd13': 'CN1', - 'vmc8': 'CN2', 'vmc9': 'CN2', 'vmd10': 'CN2', 'vmd11': 'CN2'} - - # Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - # - # Define traffic profile. - self.traffic_profile = { - 'TrafficProfile1': {'src_vm': 'vmc1', 'dst_vm': 'vmc2', 'num_flows': 50000, 'num_pkts': 2000000}, - 'TrafficProfile2': {'src_vm': 'vmc4', 'dst_vm': 'vmc7', 'num_flows': 50000, 'num_pkts': 2000000}, - 'TrafficProfile3': {'src_vm': 'vmc3', 'dst_vm': 'vmc4', 'num_flows': 50000, 'num_pkts': 2000000}, - 'TrafficProfile4': {'src_vm': 'vmc3', 'dst_vm': 'vmc7', 'num_flows': 50000, 'num_pkts': 2000000}} - # - # A master list of all the vm static routes defined. - self.vm_static_route_master = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', - 'vmc3': '111.3.1.0/28', 'vmc4': '111.4.1.0/28', 'vmc7': '111.7.1.0/28'} - self.vm_static_route_test = {} - - # end __init__ - - def build_topo_project1(self, domain='default-domain', project='project1', username='juniper', password='juniper123'): - # - # Topo for project: project1 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1', 'vnet2', 'vnet3', 'vnet4', 'vnet5', 'vnet6'] - # - # Define network info for each VN: - self.vn_nets = { - 'vnet1': ['10.1.1.0/24'], 'vnet2': ['10.2.1.0/24'], 'vnet3': ['10.3.1.0/24'], - 'vnet4': ['10.4.1.0/24'], 'vnet5': ['10.5.1.0/24'], 'vnet6': ['10.6.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'ipam1', 'vnet2': 'ipam2', 'vnet3': - 'ipam3', 'vnet4': 'ipam4', 'vnet5': 'ipam5', 'vnet6': 'ipam6'} - # - # Define network policies - self.policy_list = ['policy1', 'policy2'] - self.vn_policy = {'vnet1': ['policy1'], 'vnet2': ['policy1'], - 'vnet3': [], 'vnet4': ['policy2'], 'vnet5': [], 'vnet6': []} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = { - 'vmc1': 'vnet1', 'vmc2': 'vnet1', 'vmc3': 'vnet1', 'vmc4': 'vnet2', 'vmc5': 'vnet3', - 'vmc6': 'vnet4', 'vmc7': 'vnet2', 'vmc8': 'vnet5', 'vmc9': 'vnet4', 'vmd10': 'vnet6'} - # - # Define static route behind vms. - self.vm_static_route = {'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', - 'vmc3': '111.3.1.0/28', 'vmc4': '111.4.1.0/28', 'vmc7': '111.7.1.0/28'} - self.vm_static_route_test.update(self.vm_static_route) - - # - # Define network policy rules - self.rules = {} - self.rules['policy1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project1:vnet2', 'source_network': - 'default-domain:project1:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}, - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project1:vnet1', 'source_network': 'default-domain:project1:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules[ - 'policy2'] = [{'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet7', - 'source_network': 'default-domain:project1:vnet4', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - return self - # end build_topo_project1 - - def build_topo_project2(self, domain='default-domain', project='project2', username=None, password=None): - # - # Topo for project: project2 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet7'] - # - # Define network info for each VN: - self.vn_nets = {'vnet7': ['10.7.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet7': 'ipam7'} - # - # Define network policies - self.policy_list = ['policy3'] - self.vn_policy = {'vnet7': ['policy3']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmd11': 'vnet7'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy3'] = [{'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project1:vnet4', - 'source_network': 'default-domain:project2:vnet7', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - return self - # end build_topo_project2 - -# end sdn_flow_test_topo_multi_project diff --git a/serial_scripts/flow_tests/sdn_flow_test_topo_multiple_projects.py b/serial_scripts/flow_tests/sdn_flow_test_topo_multiple_projects.py deleted file mode 100755 index e34e01f5d..000000000 --- a/serial_scripts/flow_tests/sdn_flow_test_topo_multiple_projects.py +++ /dev/null @@ -1,138 +0,0 @@ -class multi_project_topo (): - - def __init__(self, domain='default-domain', compute_node_list=None): - self.project_list = ['project1', 'project2'] - - # Define the vm to compute node mapping to pin a vm to a particular - # compute node or else leave empty. - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', 'vmc8': 'CN0', 'vmc9': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', 'vmd10': 'CN1', 'vmd11': 'CN1'} - elif len(compute_node_list) > 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', - 'vmc8': 'CN2', 'vmc9': 'CN2', 'vmd10': 'CN2', 'vmd11': 'CN2'} - - # Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - # - # Define traffic profile. - self.traffic_profile = { - 'TrafficProfile1': {'src_vm': 'vmc3', 'dst_vm': 'vmd11', 'num_flows': 50000, 'num_pkts': 100000, 'src_proj': 'project1', 'dst_proj': 'project2'}} - # - # A master list of all the vm static routes defined. - self.vm_static_route_master = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', - 'vmc3': '111.3.1.0/28', 'vmc4': '111.4.1.0/28', 'vmc7': '111.7.1.0/28'} - self.vm_static_route_test = {} - - # end __init__ - - def build_topo_project1(self, domain='default-domain', project='project1', username='juniper', password='juniper123'): - # - # Topo for project: project1 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1', 'vnet2', 'vnet3', 'vnet4', 'vnet5', 'vnet6'] - # - # Define network info for each VN: - self.vn_nets = { - 'vnet1': ['10.1.1.0/24'], 'vnet2': ['10.2.1.0/24'], 'vnet3': ['10.3.1.0/24'], - 'vnet4': ['10.4.1.0/24'], 'vnet5': ['10.5.1.0/24'], 'vnet6': ['10.6.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'ipam1', 'vnet2': 'ipam2', 'vnet3': - 'ipam3', 'vnet4': 'ipam4', 'vnet5': 'ipam5', 'vnet6': 'ipam6'} - # - # Define network policies - self.policy_list = ['policy1', 'policy2'] - self.vn_policy = {'vnet1': ['policy1', 'policy2'], 'vnet2': ['policy1'], - 'vnet3': [], 'vnet4': ['policy2'], 'vnet5': [], 'vnet6': []} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = { - 'vmc1': 'vnet1', 'vmc2': 'vnet1', 'vmc3': 'vnet1', 'vmc4': 'vnet2', 'vmc5': 'vnet3', - 'vmc6': 'vnet4', 'vmc7': 'vnet2', 'vmc8': 'vnet5', 'vmc9': 'vnet4', 'vmd10': 'vnet6'} - # - # Define static route behind vms. - self.vm_static_route = {'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', - 'vmc3': '111.3.1.0/28', 'vmc4': '111.4.1.0/28', 'vmc7': '111.7.1.0/28'} - self.vm_static_route_test.update(self.vm_static_route) - - # - # Define network policy rules - self.rules = {} - self.rules['policy1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project1:vnet2', 'source_network': - 'default-domain:project1:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}, - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project1:vnet1', 'source_network': 'default-domain:project1:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules[ - 'policy2'] = [{'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet7', - 'source_network': 'default-domain:project1:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - return self - # end build_topo_project1 - - def build_topo_project2(self, domain='default-domain', project='project2', username='project2-user', password='juniper123'): - # - # Topo for project: project2 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet7'] - # - # Define network info for each VN: - self.vn_nets = {'vnet7': ['10.7.1.0/24']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet7': 'ipam7'} - # - # Define network policies - self.policy_list = ['policy3'] - self.vn_policy = {'vnet7': ['policy3']} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = {'vmd11': 'vnet7'} - # - # Define network policy rules - self.rules = {} - self.rules[ - 'policy3'] = [{'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project1:vnet1', - 'source_network': 'default-domain:project2:vnet7', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - return self - # end build_topo_project2 - -# end multi_project_topo diff --git a/serial_scripts/flow_tests/system_test_topo.py b/serial_scripts/flow_tests/system_test_topo.py deleted file mode 100755 index 04892be45..000000000 --- a/serial_scripts/flow_tests/system_test_topo.py +++ /dev/null @@ -1,184 +0,0 @@ -'''*******AUTO-GENERATED TOPOLOGY*********''' - - -class systest_topo_single_project (): - - def __init__(self, compute_node_list=None, domain='default-domain', project=None, username=None, password=None): - self.project_list = ['project2'] - - # Define the vm to compute node mapping to pin a vm to a particular - # compute node or else leave empty. - #self.vm_node_map = {} - self.vm_node_map = {} - if compute_node_list is not None: - if len(compute_node_list) == 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', 'vmc8': 'CN0', 'vmc9': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', 'vmd10': 'CN1', 'vmd11': 'CN1'} - elif len(compute_node_list) > 2: - self.vm_node_map = { - 'vmc1': 'CN0', 'vmc2': 'CN0', 'vmc3': 'CN0', 'vmc4': 'CN0', - 'vmc5': 'CN1', 'vmc6': 'CN1', 'vmc7': 'CN1', - 'vmc8': 'CN2', 'vmc9': 'CN2', 'vmd10': 'CN2', 'vmd11': 'CN2'} - - # Logic to create a vm to Compute node mapping. - if self.vm_node_map: - CN = [] - for cn in self.vm_node_map.keys(): - if self.vm_node_map[cn] not in CN: - CN.append(self.vm_node_map[cn]) - my_node_dict = {} - if compute_node_list is not None: - if len(compute_node_list) >= len(CN): - my_node_dict = dict(zip(CN, compute_node_list)) - - if my_node_dict: - for key in my_node_dict: - for key1 in self.vm_node_map: - if self.vm_node_map[key1] == key: - self.vm_node_map[key1] = my_node_dict[key] - - # - # Define traffic profile. - self.traffic_profile = {'TrafficProfile1': {'src_vm': 'vmc1', 'dst_vm': 'vmc2', 'num_flows': 9000, 'num_pkts': 90000}, # Intra VN,Intra Node - # Intra VN,Inter Node - 'TrafficProfile2': {'src_vm': 'vmc4', 'dst_vm': 'vmc7', 'num_flows': 9000, 'num_pkts': 90000}, - # Inter VN,Intra Node,Pol - 'TrafficProfile3': {'src_vm': 'vmc3', 'dst_vm': 'vmc4', 'num_flows': 9000, 'num_pkts': 90000}, - # Inter VN,Inter Node,Pol - 'TrafficProfile4': {'src_vm': 'vmc3', 'dst_vm': 'vmc7', 'num_flows': 9000, 'num_pkts': 90000}, - # Inter VN,Intra Node,FIP - 'TrafficProfile5': {'src_vm': 'vmc5', 'dst_vm': 'vmc6', 'num_flows': 9000, 'num_pkts': 90000}, - 'TrafficProfile6': {'src_vm': 'vmc8', 'dst_vm': 'vmc5', 'num_flows': 9000, 'num_pkts': 90000}} # Inter VN,Inter Node,FIP - #self.traffic_profile = {'TrafficProfile1': {'src_vm': 'vmc1', 'dst_vm': 'vmc2', 'num_flows': 9000, 'num_pkts': 90000}, # Intra VN,Intra Node - # 'TrafficProfile6': {'src_vm': 'vmc8', 'dst_vm': 'vmc5', 'num_flows': 9000, 'num_pkts': 90000}} # Inter VN,Inter Node,FIP - # - # A master list of all the vm static routes defined. - self.vm_static_route_master = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', 'vmc3': '111.3.1.0/28', - 'vmc4': '111.4.1.0/28', 'vmc5': '111.5.1.0/28', 'vmc7': '111.7.1.0/28', 'vmc8': '111.8.1.0/28'} - self.vm_static_route_test = {} - - # - # Define FIP pool - self.fip_pools = {'project2': { - 'p1-vn3-pool1': {'host_vn': 'vnet3', 'target_projects': ['project2']}, - 'p1-vn4-pool2': {'host_vn': 'vnet4', 'target_projects': ['project2']}, - 'p1-vn5-pool3': {'host_vn': 'vnet5', 'target_projects': ['project2']}, - } - } - #self.fvn_vm_map = {'vnet3':['vmc6', 'vmc8'], 'vnet4':['vmc5'], 'vnet5':['vmc5']} - self.fvn_vm_map = {'project2': { - 'vnet3': {'project2': ['vmc6', 'vmc8']}, - 'vnet4': {'project2': ['vmc5']}, - 'vnet5': {'project2': ['vmc5']}, - } - } - # end __init__ - - def build_topo_project2(self, domain='default-domain', project='project2', username='juniper', password='juniper123'): - # - # Topo for project: project2 - # Define Domain and project - self.domain = domain - self.project = project - self.username = username - self.password = password - # - # Define VN's in the project: - self.vnet_list = ['vnet1', 'vnet2', 'vnet3', 'vnet4', 'vnet5', 'vnet6'] - # - # Define network info for each VN: - self.vn_nets = { - 'vnet1': ['10.1.1.0/30', '10.1.1.4/30', '10.1.1.8/30'], 'vnet2': ['10.2.1.0/30', '10.2.1.4/30'], 'vnet3': ['10.3.1.0/30', '10.3.1.4/30', '10.3.1.8/30'], - 'vnet4': ['10.4.1.0/30', '10.4.1.4/30', '10.5.1.8/30'], 'vnet5': ['10.5.1.0/30', '10.5.1.4/30', '10.5.1.8/30'], 'vnet6': ['10.6.1.0/30', '10.6.1.4/30']} - # - # Define netowrk IPAM for each VN, if not defined default-user-created - # ipam will be created and used - self.vn_ipams = {'vnet1': 'ipam1', 'vnet2': 'ipam2', 'vnet3': - 'ipam3', 'vnet4': 'ipam4', 'vnet5': 'ipam5', 'vnet6': 'ipam6'} - # - # Define network policies - self.policy_list = ['policy1', 'policy-si-1', 'policy-si-2'] - self.vn_policy = {'vnet1': ['policy1', 'policy-si-1'], 'vnet2': ['policy1'], - 'vnet3': ['policy-si-2'], 'vnet4': [], 'vnet5': [], 'vnet6': []} - # - # Define VM's - # VM distribution on available compute nodes is handled by nova - # scheduler or contrail vm naming scheme - self.vn_of_vm = { - 'vmc1': 'vnet1', 'vmc2': 'vnet1', 'vmc3': 'vnet1', 'vmc4': 'vnet2', 'vmc5': 'vnet3', - 'vmc6': 'vnet4', 'vmc7': 'vnet2', 'vmc8': 'vnet5', 'vmc9': 'vnet4', 'vmd10': 'vnet6'} - # - # Define static route behind vms. - self.vm_static_route = { - 'vmc1': '111.1.1.0/28', 'vmc2': '111.2.1.0/28', 'vmc3': '111.3.1.0/28', - 'vmc4': '111.4.1.0/28', 'vmc5': '111.5.1.0/28', 'vmc7': '111.7.1.0/28', 'vmc8': '111.8.1.0/28'} - self.vm_static_route_test.update(self.vm_static_route) - - ## Define Service template & instances - self.st_list = ['st_trans_left', 'st_inNet_left'] - self.si_list = ['si-mirror-1', 'si-mirror-2'] - - # - # Define network policy rules - self.rules = {} - self.rules['policy1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet2', 'source_network': - 'default-domain:project2:vnet1', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}, - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'default-domain:project2:vnet1', 'source_network': 'default-domain:project2:vnet2', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any'}] - self.rules['policy-si-1'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any', 'action_list': {'simple_action':'pass', 'mirror_to': {'analyzer_name' : ':'.join([self.domain,self.project,self.si_list[0]])}}}] - self.rules['policy-si-2'] = [ - {'direction': '<>', 'protocol': 'udp', 'dest_network': 'any', 'source_network': 'any', 'dst_ports': 'any', 'simple_action': 'pass', 'src_ports': 'any', 'action_list': {'simple_action':'pass', 'mirror_to': {'analyzer_name' : ':'.join([self.domain,self.project,self.si_list[1]])}}}] - - #ST and SI topology - self.st_params = {} - self.si_params = {} - - self.st_params[self.st_list[0]]={'svc_img_name': 'analyzer', 'svc_type':'analyzer', 'if_list':[['left', False, False]], 'svc_mode':'transparent', 'svc_scaling':False, 'flavor':'contrail_flavor_2cpu', 'ordered_interfaces': True} - self.st_params[self.st_list[1]] = {'svc_img_name': 'analyzer', 'svc_type':'analyzer', 'if_list':[['left', False, False]], 'svc_mode':'in-network', 'svc_scaling':False, 'flavor':'contrail_flavor_2cpu', 'ordered_interfaces': True} - - self.si_params[self.si_list[0]] = {'svc_template':self.st_list[0], 'if_list':self.st_params[self.st_list[0]]['if_list'], 'left_vn':None} - self.si_params[self.si_list[1]] = {'svc_template':self.st_list[1], 'if_list':self.st_params[self.st_list[1]]['if_list'], 'left_vn':None} - - self.pol_si= {self.policy_list[1]:self.si_list[0], self.policy_list[2]:self.si_list[1]} - self.si_pol = {self.si_list[0]:self.policy_list[1], self.si_list[1]:self.policy_list[2]} - - # Define security_group name - self.sg_list = ['test_sg_p1'] - # - # Define security_group with vm - self.sg_of_vm = { - 'vmc1': ['test_sg_p1'], 'vmc2': ['test_sg_p1'], 'vmc3': ['test_sg_p1'], 'vmc4': ['test_sg_p1'], 'vmc5': ['test_sg_p1'], - 'vmc6': ['test_sg_p1'], 'vmc7': ['test_sg_p1'], 'vmc8': ['test_sg_p1'], 'vmc9': ['test_sg_p1'], 'vmd10': ['test_sg_p1']} - # Define the security_group rules - import uuid - uuid_1 = uuid.uuid1().urn.split(':')[2] - uuid_2 = uuid.uuid1().urn.split(':')[2] - uuid_3 = uuid.uuid1().urn.split(':')[2] - self.sg_rules = {} - self.sg_rules['test_sg_p1'] = [ - {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_1, - 'dst_addresses': [{'security_group': 'local'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - }, {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_2, - 'src_addresses': [{'security_group': 'local'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'subnet': {'ip_prefix': '0.0.0.0', 'ip_prefix_len': 0}}], - }, {'direction': '>', - 'protocol': 'any', 'rule_uuid': uuid_3, - 'src_addresses': [{'security_group': 'default-domain:project2:test_sg_p1'}], - 'dst_ports': [{'start_port': 0, 'end_port': 65535}], - 'src_ports': [{'start_port': 0, 'end_port': 65535}], - 'dst_addresses': [{'security_group': 'local'}]}] - - return self - # end build_topo_project2 - -# end sdn_flow_test_topo_single_project diff --git a/serial_scripts/flow_tests/test_flow_scenarios.py b/serial_scripts/flow_tests/test_flow_scenarios.py deleted file mode 100644 index 2b2af069f..000000000 --- a/serial_scripts/flow_tests/test_flow_scenarios.py +++ /dev/null @@ -1,48 +0,0 @@ -from string import Template - -from common.neutron.base import BaseNeutronTest -from tcutils.wrappers import preposttest_wrapper -from tcutils.util import skip_because - -class ExtendedFlowTests(BaseNeutronTest): - - @classmethod - def setUpClass(cls): - super(ExtendedFlowTests, cls).setUpClass() - cls.vnc_api_h = cls.vnc_lib - # end setUpClass - - @classmethod - def tearDownClass(cls): - super(ExtendedFlowTests, cls).tearDownClass() - # end tearDownClass - - @preposttest_wrapper - @skip_because( bug='1530034') - def test_with_fuzz_bug_1504710(self): - ''' - This test makes sure that the vrouter doesnt misbehave - with various IP protocols - ''' - - # TODO - # Unable to figure out what scapy profile can fuzz - # packets. Currently use raw scapy code itself - python_code = Template(''' -from scapy.all import * -a=fuzz(IP(dst='$dest_ip')/Raw(RandString(size=300))) -send(a, count=1000, inter=0, iface='eth0') -''') - - vn_fixture = self.create_vn() - vm1_fixture = self.create_vm(vn_fixture) - vm2_fixture = self.create_vm(vn_fixture) - vm1_fixture.wait_till_vm_is_up() - vm2_fixture.wait_till_vm_is_up() - python_code = python_code.substitute(dest_ip=vm2_fixture.vm_ip) - vm1_fixture.run_python_code(python_code) - - # Now validate that later pings between vms work - self.do_ping_test(vm1_fixture, vm1_fixture.vm_ip, vm2_fixture.vm_ip) - # end test_with_fuzz_bug_1504710 - diff --git a/serial_scripts/flow_tests/test_system_flows.py b/serial_scripts/flow_tests/test_system_flows.py deleted file mode 100644 index bc3244cf9..000000000 --- a/serial_scripts/flow_tests/test_system_flows.py +++ /dev/null @@ -1,918 +0,0 @@ -from flow_tests.base import BaseFlowTest -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from vm_test import * -from floating_ip import * -from policy_test import * -from contrail_fixtures import * -from tcutils.agent.vna_introspect_utils import * -from tcutils.topo.topo_helper import * -from tcutils.wrappers import preposttest_wrapper -from tcutils.commands import ssh, execute_cmd, execute_cmd_out -from tcutils.topo.sdn_topo_setup import * -from common.policy.get_version import * -from common.system.system_verification import verify_system_parameters -import sdn_flow_test_topo -import traffic_tests -import time -import datetime -import threading -import socket -import flow_test_utils -from compute_node_test import ComputeNodeFixture -import system_test_topo -import flow_test_topo -import sdn_flow_test_topo_multiple_projects -from tcutils.test_lib.test_utils import assertEqual, get_ip_list_from_prefix -import math - - -class SDNFlowTests(BaseFlowTest, flow_test_utils.VerifySvcMirror): - _interface = 'json' - max_system_flows = 512000 - - @classmethod - def setUpClass(cls): - super(SDNFlowTests, cls).setUpClass() - # end setUpClass - - def cleanUp(self): - super(SDNFlowTests, self).cleanUp() - # end cleanUp - - def runTest(self): - pass - # end runTest - - def set_flow_tear_time(self): - # Get flow-cache_timeout from one node and use as reference... - # Assumption is all nodes set to same value... - cmp_node = self.inputs.compute_ips[0] - self.agent_obj = self.useFixture( - ComputeNodeFixture( - self.connections, - cmp_node)) - self.flow_cache_timeout = self.agent_obj.get_config_flow_aging_time() - self.flow_teardown_time = 60 - self.time_to_retire_flows = int( - self.flow_cache_timeout) + self.flow_teardown_time - - def delete_agent_flows(self): - for comp_node in self.inputs.compute_ips: - comp_node_fixt = self.useFixture( - ComputeNodeFixture(self.connections, comp_node)) - self.logger.info( - "flows now in %s: %s" % - (comp_node, comp_node_fixt.get_vrouter_flow_count())) - comp_inspect = self.agent_inspect[comp_node] - comp_inspect.delete_all_flows() - self.logger.info( - "flows after deleting in %s: %s" % - (comp_node, comp_node_fixt.get_vrouter_flow_count())) - self.logger.info("wait for 10 secs for the flows to tear down") - time.sleep(10) - - # get source min, max ip's and destination max port. - def src_min_max_ip_and_dst_max_port( - self, - ips, - no_of_ip, - dst_min_port, - flows): - """ Called by test_flow_single_project or test_flow_multi_project to get the min source ip, max source ip and - Max port number of the destination. This helps to create certain no of flows as expected by test_flow_single_project - or test_flow_multi_project routines, from where it is called. - """ - ip_list = list() - for index in range(no_of_ip): - ip_list.append(ips[index]) - src_min_ip = ip_list[0] - src_max_ip = ip_list[-1] - dst_max_port = dst_min_port + (flows / no_of_ip) - result_dict = {'src_min_ip': src_min_ip, 'src_max_ip': - src_max_ip, 'dst_max_port': dst_max_port} - return result_dict - # end src_min_max_ip_and_dst_max_port - - def create_traffic_profiles(self, topo_obj, config_topo): - - # Create traffic based on traffic profile defined in topology. - traffic_profiles = {} - count = 0 - num_ports_per_ip = 50000.00 - # forward flows = (total no. of flows / 2), so fwd_flow_factor = 2 - fwd_flow_factor = 2 - for profile, data in topo_obj.traffic_profile.items(): - src_min_ip = 0 - src_max_ip = 0 - dst_ip = 0 - pkt_cnt = 0 - dst_min_port = 5000 - dst_max_port = 55000 - count += 1 - profile = 'profile' + str(count) - src_vm = data['src_vm'] - src_vm_obj = None - dst_vm_obj = None - pkt_cnt = data['num_pkts'] - for proj in config_topo: - for vm in config_topo[proj]: - for vm_name in config_topo[proj][vm]: - if data['dst_vm'] == vm_name: - dst_ip = config_topo[proj][vm][vm_name].vm_ip - dst_vm_obj = config_topo[proj][vm][vm_name] - if src_vm == vm_name: - src_vm_obj = config_topo[proj][vm][vm_name] - - prefix = topo_obj.vm_static_route_master[src_vm] - ip_list = get_ip_list_from_prefix(prefix) - no_of_ip = int( - math.ceil( - (data['num_flows'] / - fwd_flow_factor) / - num_ports_per_ip)) - forward_flows = data['num_flows'] / fwd_flow_factor - result_dict = self.src_min_max_ip_and_dst_max_port( - ip_list, no_of_ip, dst_min_port, forward_flows) - if int(no_of_ip) == 1: - # Use the src VM IP to create the flows no need of static IP's - # that have been provisioned to the VM route table. - traffic_profiles[profile] = [src_vm_obj, - src_vm_obj.vm_ip, # src_ip_min - src_vm_obj.vm_ip, # src_ip_max - dst_ip, # dest_vm_ip - dst_min_port, # dest_port_min - # dest_port_max - result_dict['dst_max_port'], - data['num_pkts'], dst_vm_obj] - else: - # Use thestatic IP's that have been provisioned to the VM route - # table as src IP range. - traffic_profiles[profile] = [src_vm_obj, - # src_ip_min - result_dict['src_min_ip'], - # src_ip_max - result_dict['src_max_ip'], - dst_ip, # dest_vm_ip - dst_min_port, # dest_port_min - # dest_port_max - result_dict['dst_max_port'], - data['num_pkts'], dst_vm_obj] - return traffic_profiles - - # end create_traffic_profiles - - def start_traffic( - self, - vm, - src_min_ip='', - src_max_ip='', - dest_ip='', - dest_min_port='', - dest_max_port='', - pkt_cnt=''): - """ This routine is for generation of UDP flows using pktgen. Only UDP packets are generated using this routine. - """ - self.logger.info("Sending traffic...") - try: - cmd = '~/flow_test_pktgen.sh %s %s %s %s %s %s' % ( - src_min_ip, src_max_ip, dest_ip, dest_min_port, dest_max_port, pkt_cnt) - self.logger.info("Traffic cmd: %s" % (cmd)) - vm.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - except Exception as e: - self.logger.exception("Got exception at start_traffic as %s" % (e)) - # end start_traffic - - def generate_udp_flows_and_do_verification( - self, - traffic_profile, - build_version): - """ Routine to generate UDP flows by calling the start_traffic routine in a thread and do parallel verification of - flow setup rate. - @inputs : - traffic_profile - a list of traffic generation parameters as explained in test_flow_single_project and test_flow_multi_project routines. - build_version - os_version, release_version and build_version for logging purposes. - """ - for cmp_node in self.inputs.compute_ips: - comp_node_fixt = self.useFixture(ComputeNodeFixture( - self.connections, cmp_node)) - flows_now = comp_node_fixt.get_vrouter_flow_count() - for action, count in flows_now.iteritems(): - # Any flows set by previous traffic tests should have retired - # by now.. - if int(count) > 1000: - self.logger.error( - "unexpected flow count of %s with action as %s" % - (count, action)) - return False - - Shost = socket.gethostbyaddr(traffic_profile[0].vm_node_ip) - Dhost = socket.gethostbyaddr(traffic_profile[7].vm_node_ip) - self.logger.info( - "Src_VM = %s, Src_IP_Range = %s to %s, Dest_VM = %s, Dest_IP = %s, Src_VN = %s, Dest_VN = %s," - " Port_Range = %s to %s, Src_Node = %s, Dst_Node = %s." % - (traffic_profile[0].vm_name, - traffic_profile[1], - traffic_profile[2], - traffic_profile[7].vm_name, - traffic_profile[3], - traffic_profile[0].vn_name, - traffic_profile[7].vn_name, - traffic_profile[4], - traffic_profile[5], - Shost[0], - Dhost[0])) - - th = threading.Thread( - target=self.start_traffic, args=( - traffic_profile[0], traffic_profile[1], traffic_profile[2], - traffic_profile[3], traffic_profile[ - 4], traffic_profile[ - 5], - traffic_profile[6])) - th.start() - - # - # Flow setup rate calculation. - NoOfFlows = [] - FlowRatePerInterval = [] - AverageFlowSetupRate = 0 - default_setup_rate = 7000 # A default value of 7K flows per second. - src_vm_obj = traffic_profile[0] - dst_vm_obj = traffic_profile[7] - - # - # Decide the test is for NAT Flow or Policy Flow. - PolNatSI = 'NONE' - srcFIP = src_vm_obj.chk_vmi_for_fip(src_vm_obj.vn_fq_name) - dstFIP = dst_vm_obj.chk_vmi_for_fip(dst_vm_obj.vn_fq_name) - if srcFIP is None: - if dstFIP is None: - PolNatSI = 'Policy Flow' - else: - PolNatSI = 'NAT Flow' - - # - # Get or calculate the sleep_interval/wait time before getting the no of flows in vrouter for each release based - # on a file defining a release to average flow setup rate mapping. The threshold defined in the file is for Policy Flows, - # so NAT flow is calculated at 70% of the average flow setup rate - # defined. - RelVer = build_version.split('-')[1] - import ReleaseToFlowSetupRateMapping - #from ReleaseToFlowSetupRateMapping import * - try: - DefinedSetupRate = ReleaseToFlowSetupRateMapping.expected_flow_setup_rate['policy'][RelVer] - except KeyError: - # A default value of 7K flows per second is set. - DefinedSetupRate = default_setup_rate - - # - # Set Expected NAT Flow Rate - if PolNatSI == 'NAT Flow': - DefinedSetupRate = ReleaseToFlowSetupRateMapping.expected_flow_setup_rate['nat'][RelVer] - # - # The flow setup rate is calculated based on setup time required for first 100K flows. So TotalFlows is set to 100K and 5 - # samples (NoOfIterations) are taken within the time required to setup 100K flows. The time interval (sleep_interval) is - # calculated based on DefinedSetupRate for the particular release - # version. - TotalFlows = 100000 - NoOfIterations = 5 - sleep_interval = (float(TotalFlows) / float(DefinedSetupRate)) / \ - float(NoOfIterations) - - # For scaled flows & low profile VM, it takes time for VM/tool to start sending packets... - #self.logger.info("Sleeping for 20 sec, for VM to start sending packets.") - #time.sleep(20) - # - # After each sleep_interval we get the number of active forward or nat flows setup on the vrouter which is repeated for - # NoOfIterations times. and the average is calculated in each - # iteration. - for ind in range(NoOfIterations): - time.sleep(sleep_interval) - flows_now = flow_test_utils.vm_vrouter_flow_count(src_vm_obj) - NoOfFlows.append(flows_now) - if ind == 0: - FlowRatePerInterval.append(NoOfFlows[ind]) - AverageFlowSetupRate = FlowRatePerInterval[ind] - elif ind > 0: - FlowRatePerInterval.append(NoOfFlows[ind] - NoOfFlows[ind - 1]) - AverageFlowSetupRate = ( - AverageFlowSetupRate + FlowRatePerInterval[ind]) / 2 - self.logger.info("Flows setup in last %s sec = %s" % - (sleep_interval, FlowRatePerInterval[ind])) - self.logger.info( - "Average flow setup rate per %s sec till this iteration = %s" % - (sleep_interval, AverageFlowSetupRate)) - self.logger.info("Flow samples so far: %s" % (NoOfFlows)) - self.logger.info(" ") - if flows_now > 90000: - self.logger.info("Flows setup so far: %s" % (flows_now)) - self.logger.info("Close to 100k flows setup, no need to wait") - break - - # @setup rate of 9000 flows per sec, 30*9000=270k flows can be setup - # with ~10s over with above loop, wait for another 20s - # self.logger.info("Sleeping for 20 sec, for all the flows to be setup.") - # time.sleep(20) - # Calculate the flow setup rate per second = average flow setup in - # sleep interval over the above iterations / sleep interval. - AverageFlowSetupRate = int(AverageFlowSetupRate / sleep_interval) - self.logger.info("Flow setup rate seen in this test is = %s" % - (AverageFlowSetupRate)) - if (AverageFlowSetupRate < (0.9 * DefinedSetupRate)): - self.logger.warn( - "Flow setup rate seen in this test fell below 90 percent of the defined flow setup rate for this release - %s." % - (DefinedSetupRate)) - else: - self.logger.info( - "Flow setup rate seen in this test is close to or above the defined flow setup rate for this release - %s." % - (DefinedSetupRate)) - - # write to a file to do record keeping of the flow rate on a particular - # node. - ts = time.time() - mtime = datetime.datetime.fromtimestamp( - ts).strftime('%Y-%m-%d %H:%M:%S') - - fh = open("Flow_Test_Data.xls", "a") - localflow = 'Remote Flow' - # Check if it's a remote or local flow to log the data accordingly. - if Shost[0] == Dhost[0]: - localflow = 'Local Flow' - # if source and destination VN are same then it's not a NAT/Policy flow - # else it is a NAT/Policy flow and needs to be logged accordingly. - if src_vm_obj.vn_name == dst_vm_obj.vn_name: - mystr = "%s\t%s\t%s\t%s\t%s\n" % ( - build_version, mtime, Shost[0], AverageFlowSetupRate, localflow) - else: - mystr = "%s\t%s\t%s\t%s\t%s\t%s\n" % ( - build_version, mtime, Shost[0], AverageFlowSetupRate, localflow, PolNatSI) - - fh.write(mystr) - fh.close() - - self.logger.info("Joining thread") - th.join() - - # - # Fail the test if the actual flow setup rate is < 70% of the defined - # flow setup rate for the release. - if (AverageFlowSetupRate < (0.6 * DefinedSetupRate)): - self.logger.error( - "The Flow setup rate seen in this test is below 70% of the defined (expected) flow setup rate for this release.") - self.logger.error( - "The Actual Flow setup rate = %s and the Defined Flow setup rate = %s." % - (AverageFlowSetupRate, DefinedSetupRate)) - self.logger.error( - "This clearly indicates there is something wrong here and thus the test will execute no further test cases.") - self.logger.error("Exiting Now!!!") - return False - - return True - # end generate_udp_flows_and_do_verification - - def generate_udp_flows(self, traffic_profile, build_version): - """ Routine to generate UDP flows by calling the start_traffic routine in a thread .. - @inputs : - traffic_profile - a list of traffic generation parameters as explained in test_flow_single_project and test_flow_multi_project routines. - """ - Shost = socket.gethostbyaddr(traffic_profile[0].vm_node_ip) - Dhost = socket.gethostbyaddr(traffic_profile[7].vm_node_ip) - self.logger.info( - "Src_VM = %s, Src_IP_Range = %s to %s, Dest_VM = %s, Dest_IP = %s, Src_VN = %s, Dest_VN = %s," - " Port_Range = %s to %s, Src_Node = %s, Dst_Node = %s." % - (traffic_profile[0].vm_name, - traffic_profile[1], - traffic_profile[2], - traffic_profile[7].vm_name, - traffic_profile[3], - traffic_profile[0].vn_name, - traffic_profile[7].vn_name, - traffic_profile[4], - traffic_profile[5], - Shost[0], - Dhost[0])) - - th = threading.Thread( - target=self.start_traffic, args=( - traffic_profile[0], traffic_profile[1], traffic_profile[2], - traffic_profile[3], traffic_profile[ - 4], traffic_profile[ - 5], - traffic_profile[6])) - th.start() - - # single project topo, retrieve topo obj for the project - # Need to specify the project which has mirror service instance.. - proj_topo = self.topo.values()[0] - for idx, si in enumerate(proj_topo.si_list): - self.logger.info("Starting tcpdump in mirror instance %s" % (si)) - sessions = self.tcpdump_on_analyzer(si) - for svm_name, (session, pcap) in sessions.items(): - out, msg = self.verify_mirror(svm_name, session, pcap) - self.logger.info( - "Mirror check status in %s is %s" % - (svm_name, out)) - src_vm_obj = traffic_profile[0] - NoOfFlows = flow_test_utils.vm_vrouter_flow_count(src_vm_obj) - self.logger.info("No. of flows in source compute is %s" % (NoOfFlows)) - - self.logger.info("Joining thread") - th.join() - - # - # Fail the test if flows are not generated, use 5-tuple check and not - # vrouter flow count with flow -l - - return True - # end generate_udp_flows - - @preposttest_wrapper - def test_flow_single_project(self): - """Tests related to flow setup rate and flow table stability accross various triggers for verification - accross VN's within a single project""" - result = True - #self.agent_objs = {} - #self.set_flow_tear_time() - # - # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test. - # else report that minimum 2 compute nodes are needed for this test and - # exit. - if len(self.inputs.compute_ips) < 2: - self.logger.warn( - "Minimum 2 compute nodes are needed for this test to run") - self.logger.warn( - "Exiting since this test can't be run on single compute node") - return True - # - # Get config for test from topology - topology_class_name = flow_test_topo.systest_topo_single_project - # mini topo for testing script - # topology_class_name = mini_flow_test_topo.systest_topo_single_project - self.logger.info( - "Scenario for the test used is: %s" % - (topology_class_name)) - - topo = topology_class_name( - compute_node_list=self.inputs.compute_ips) - # - # Test setup: Configure policy, VN, & VM - # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} - # Returned topo is of following format: - # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': - # vm_fixture} - setup_obj = self.useFixture( - sdnTopoSetupFixture(self.connections, topo)) - out = setup_obj.sdn_topo_setup() - assertEqual(out['result'], True, out['msg']) - if out['result']: - topo, config_topo = out['data'][0], out['data'][1] - proj = list(topo.keys())[0] - - # Get the vrouter build version for logging purposes. - BuildTag = get_OS_Release_BuildVersion(self) - - # Create traffic profile with all details like IP addresses, port - # numbers and no of flows, from the profile defined in the topology. - traffic_profiles = self.create_traffic_profiles( - topo[proj], - config_topo) - - self.topo, self.config_topo = topo, config_topo - for each_profile in traffic_profiles: - result = self.generate_udp_flows_and_do_verification( - traffic_profiles[each_profile], str(BuildTag)) - # verify_system_parameters(self, out) - self.delete_agent_flows() - if not result: - False - - return True - - # end test_flow_single_project - - @preposttest_wrapper - def test_system_single_project(self): - """Basic systest with single project with many features & traffic.. - """ - result = True - #self.agent_objs = {} - #self.set_flow_tear_time() - # - # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test. - # else report that minimum 2 compute nodes are needed for this test and - # exit. - if len(self.inputs.compute_ips) < 2: - self.logger.warn( - "Minimum 2 compute nodes are needed for this test to run") - self.logger.warn( - "Exiting since this test can't be run on single compute node") - return True - # - # Get config for test from topology - topology_class_name = system_test_topo.systest_topo_single_project - # For testing script, use mini topology - # topology_class_name = - # mini_system_test_topo.systest_topo_single_project - self.logger.info( - "Scenario for the test used is: %s" % - (topology_class_name)) - - topo = topology_class_name( - compute_node_list=self.inputs.compute_ips) - # - # Test setup: Configure policy, VN, & VM - # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} - # Returned topo is of following format: - # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': - # vm_fixture} - setup_obj = self.useFixture( - sdnTopoSetupFixture(self.connections, topo)) - out = setup_obj.sdn_topo_setup() - assertEqual(out['result'], True, out['msg']) - if out['result']: - topo, config_topo = out['data'][0], out['data'][1] - proj = list(topo.keys())[0] - - # Get the vrouter build version for logging purposes. - BuildTag = get_OS_Release_BuildVersion(self) - - # Create traffic profile with all details like IP addresses, port - # numbers and no of flows, from the profile defined in the topology. - traffic_profiles = self.create_traffic_profiles( - topo[proj], - config_topo) - - self.topo, self.config_topo = topo, config_topo - for each_profile in traffic_profiles: - result = self.generate_udp_flows( - traffic_profiles[each_profile], str(BuildTag)) - #verify_system_parameters(self, out) - self.delete_agent_flows() - if not result: - return False - - return True - - # end test_system_single_project - - @preposttest_wrapper - def test_flow_multi_projects(self): - """Tests related to flow setup rate and flow table stability accross various triggers for verification - accross VN's and accross multiple projects""" - result = True - self.comp_node_fixt = {} - for cmp_node in self.inputs.compute_ips: - self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture( - self.connections, cmp_node)) - # - # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test. - # else report that minimum 2 compute nodes are needed for this test and - # exit. - if len(self.inputs.compute_ips) < 2: - self.logger.warn( - "Minimum 2 compute nodes are needed for this test to run") - self.logger.warn( - "Exiting since this test can't be run on single compute node") - return True - # - # Get config for test from topology - msg = [] - topology_class_name = sdn_flow_test_topo_multiple_projects.multi_project_topo - - self.logger.info("Scenario for the test used is: %s" % - (topology_class_name)) - # - # Create a list of compute node IP's and pass it to topo if you want to pin - # a vm to a particular node - topo = topology_class_name( - compute_node_list=self.inputs.compute_ips) - # - # 1. Test setup: Configure policy, VN, & VM - # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} - # Returned topo is of following format: - # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': - # vm_fixture} - setup_obj = self.useFixture( - sdnTopoSetupFixture(self.connections, topo)) - out = setup_obj.sdn_topo_setup() - assertEqual(out['result'], True, out['msg']) - self.topo, self.config_topo = out['data'][0], out['data'][1] - self.proj = list(self.topo.keys())[0] - # 2. Start Traffic - for profile, details in self.topo[self.proj].traffic_profile.items(): - self.logger.info("Profile under test: %s, details: %s" %(profile, details)) - self.src_vm = details['src_vm'] - self.dst_vm = details['dst_vm'] - self.src_proj = details['src_proj'] - self.dst_proj = details['dst_proj'] - # Not flow scaling test, limit num_flows to low number.. - num_flows = 15000 - self.generated_flows = 2*num_flows - self.flow_gen_rate = 1000 - src_vm_fixture = self.config_topo[self.src_proj]['vm'][self.src_vm] - src_vm_vn = src_vm_fixture.vn_names[0] - src_vm_vn_fix = self.config_topo[self.src_proj]['vn'][src_vm_vn] - dst_vm_fixture = self.config_topo[self.dst_proj]['vm'][self.dst_vm] - self.proto = 'udp' - self.cmp_node = src_vm_fixture.vm_node_ip - self.comp_node_fixt[self.cmp_node].get_config_per_vm_flow_limit() - self.comp_node_fixt[self.cmp_node].get_config_flow_aging_time() - self.max_vm_flows = self.comp_node_fixt[self.cmp_node].max_vm_flows - self.flow_cache_timeout = self.comp_node_fixt[self.cmp_node].flow_cache_timeout - self.traffic_obj = self.useFixture( - traffic_tests.trafficTestFixture(self.connections)) - # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None, - # stream_proto= 'udp', start_sport= 8000, - # total_single_instance_streams= 20): - startStatus = self.traffic_obj.startTraffic( - total_single_instance_streams=num_flows, - pps=self.flow_gen_rate, - start_sport=1000, - cfg_profile='ContinuousSportRange', - tx_vm_fixture=src_vm_fixture, - rx_vm_fixture=dst_vm_fixture, - stream_proto=self.proto) - msg1 = "Status of start traffic : %s, %s, %s" % ( - self.proto, src_vm_fixture.vm_ip, startStatus['status']) - self.logger.info(msg1) - assert startStatus['status'], msg1 - # 3. Poll live traffic & verify VM flow count - self.verify_node_flow_setup() - # 4. Stop Traffic - self.logger.info("Proceed to stop traffic..") - self.traffic_obj.stopTraffic(wait_for_stop=False) - start_time = time.time() - # 5. Verify flow ageing - self.logger.info( - "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing") - sleep(self.flow_cache_timeout) - while True: - begin_flow_count = self.comp_node_fixt[ - self.cmp_node].get_vrouter_matching_flow_count( - self.flow_data) - self.logger.debug('begin_flow_count: %s' %(begin_flow_count)) - if begin_flow_count['all'] == 0: - break - flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(begin_flow_count['all'], self.flow_cache_timeout)) - # flow_teardown_time is not the actual time to remove flows - # Based on flow_count at this time, teardown_time is calculated to the value - # which will vary with agent's poll, which is done at regular intervals.. - self.logger.info('Sleeping for %s secs' %(flow_teardown_time)) - sleep(flow_teardown_time) - # at the end of wait, actual_flows should be atleast < 50% of total flows before start of teardown - current_flow_count = self.comp_node_fixt[ - self.cmp_node].get_vrouter_matching_flow_count( - self.flow_data) - self.logger.debug('current_flow_count: %s' %(current_flow_count)) - if current_flow_count['all'] > (0.5*begin_flow_count['all']): - msg = ['Flow removal not happening as expected in node %s' %self.cmp_node] - msg.append('Flow count before wait: %s, after wait of %s secs, its: %s' % - (begin_flow_count['all'], flow_teardown_time, current_flow_count['all'])) - assert False, msg - if current_flow_count['all'] < (0.1*begin_flow_count['all']): - break - # end of while loop - elapsed_time = time.time() - start_time - self.logger.info( - "Flows aged out as expected in configured flow_cache_timeout") - # end of profile for loop - return True - # end test_flow_multi_projects - - def reset_vm_flow_limit_and_stop_traffic(self): - self.max_vm_flows = 100 - self.comp_node_fixt[ - self.cmp_node].set_per_vm_flow_limit( - self.max_vm_flows) - self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart() - self.traffic_obj.stopTraffic(wait_for_stop=False) - - def verify_node_flow_setup(self): - '''Written for agent params test to test flow setup data - generated_flows: generated by tool - allowed_flows: set by max_vm_flows value in agent conf - expected_flows: expected non FlowLimited flows based on above 2 values - ''' - self.flow_data = flow_test_utils.get_flow_data( - self.config_topo, - self.src_vm, - self.dst_vm, - self.proto, - self.src_proj, - self.dst_proj) - self.logger.info( - "Received flow_data for checking: %s" % - self.flow_data) - self.comp_node_fixt[ - self.cmp_node].get_vrouter_flow_count() - # keep generated flow info for processing flow removal - allowed_flows = int( - float(self.max_system_flows) * (float(self.max_vm_flows) / 100)) - self.logger.info( - "In node %s, allowed_flows is set to %s" % - (self.cmp_node, allowed_flows)) - if self.generated_flows < allowed_flows: - expected_flows = self.generated_flows - # assert if flows_beyond_limit found, as we don't expect to see - flow_limit_assert = False - else: - expected_flows = allowed_flows - # don't assert if flows_beyond_limit found, as we expect to see - flow_limit_assert = True - node_flow_data = self.comp_node_fixt[ - self.cmp_node].get_vrouter_matching_flow_count(self.flow_data) - actual_flows = int(node_flow_data['allowed']) - retries = 0 - retry_wait_time = 2 - max_retries = math.ceil(self.generated_flows / self.flow_gen_rate) - while retries < max_retries and actual_flows < expected_flows: - self.logger.info( - "Wait for flows to be setup completely, flows so far: %s, expected: %s" % - (actual_flows, expected_flows)) - sleep(retry_wait_time) - retries += 1 - node_flow_data = self.comp_node_fixt[ - self.cmp_node].get_vrouter_matching_flow_count(self.flow_data) - actual_flows = int(node_flow_data['allowed']) - diff_flows = actual_flows - expected_flows - # allow 5% diff on upper side.. - allowed_upper_threshold = expected_flows * 0.05 - if diff_flows > allowed_upper_threshold: - msg = "Seeing more flows in system than expected - node: %s, expected_flows: %s, actual_flows: %s" % ( - self.cmp_node, expected_flows, actual_flows) - self.reset_vm_flow_limit_and_stop_traffic() - assert False, msg - # allow 5% diff on lower side.. - allowed_lower_threshold = expected_flows * 0.05 - if diff_flows < -allowed_lower_threshold: - msg = "Don't see expected flows in node %s, expected_flows: %s, actual_flows: %s" % ( - self.cmp_node, expected_flows, actual_flows) - self.reset_vm_flow_limit_and_stop_traffic() - assert False, msg - else: - self.logger.info( - "Flow count good as configured.., expected %s, actual %s" % - (expected_flows, actual_flows)) - # If generated_flows < allowed_flows, flows_beyond_limit is not - # expected.. - flows_beyond_limit = int(node_flow_data['dropped_by_limit']) - if flows_beyond_limit > 0: - msg = "Seeing dropped flows due to FlowLimit in system than expected - node: %s, flows_beyond_limit: %s, allowed_flows: %s" % ( - self.cmp_node, flows_beyond_limit, allowed_flows) - assert flow_limit_assert, msg - else: - self.logger.info( - "Dont see Flow Limited Dropped flows as expected, flow limited drop count is %s.." % - flows_beyond_limit) - - @preposttest_wrapper - def test_agent_flow_settings(self): - """Basic systest with single project with many features & traffic.. - """ - # - # Check if there are enough nodes i.e. atleast 2 compute nodes to run this test. - # else report that minimum 2 compute nodes are needed for this test and - # exit. - if len(self.inputs.compute_ips) < 2: - self.logger.warn( - "Minimum 2 compute nodes are needed for this test to run") - self.logger.warn( - "Exiting since this test can't be run on single compute node") - return True - # - # Get config for test from topology - # import mini_flow_test_topo - # topology_class_name = mini_flow_test_topo.systest_topo_single_project - topology_class_name = flow_test_topo.systest_topo_single_project - self.logger.info( - "Scenario for the test used is: %s" % - (topology_class_name)) - - topo = topology_class_name( - compute_node_list=self.inputs.compute_ips) - # - # 1. Test setup: Configure policy, VN, & VM - # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]} - # Returned topo is of following format: - # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': - # vm_fixture} - setup_obj = self.useFixture( - sdnTopoSetupFixture(self.connections, topo)) - out = setup_obj.sdn_topo_setup() - assertEqual(out['result'], True, out['msg']) - if out['result']: - config_topo = out['data'][1] - self.proj = list(config_topo.keys())[0] - self.topo, self.config_topo = topo, config_topo - - # 2. set agent flow_cache_timeout to 60s - # set max_vm_flows to 1% of 500k, comes to 5000 - self.comp_node_fixt = {} - self.flow_cache_timeout = 60 - for cmp_node in self.inputs.compute_ips: - self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture( - self.connections, cmp_node)) - self.comp_node_fixt[cmp_node].set_flow_aging_time( - self.flow_cache_timeout) - self.comp_node_fixt[cmp_node].sup_vrouter_process_restart() - - # 3. Start Traffic - for profile, details in self.topo.traffic_profile.items(): - self.logger.info("Profile under test: %s, details: %s" %(profile, details)) - self.src_vm = details['src_vm'] - self.dst_vm = details['dst_vm'] - self.src_proj = self.proj - self.dst_proj = self.proj - # Set num_flows to fixed, smaller value but > 1% of - # system max flows - num_flows = 5555 - self.generated_flows = 2*num_flows - self.flow_gen_rate = 1000 - src_vm_fixture = self.config_topo[self.proj]['vm'][self.src_vm] - src_vm_vn = src_vm_fixture.vn_names[0] - src_vm_vn_fix = self.config_topo[self.proj]['vn'][src_vm_vn] - dst_vm_fixture = self.config_topo[self.proj]['vm'][self.dst_vm] - self.proto = 'udp' - self.cmp_node = src_vm_fixture.vm_node_ip - # 3a. Set max_vm_flows to 1% in TX VM node - self.max_vm_flows = 1 - self.comp_node_fixt[ - self.cmp_node].set_per_vm_flow_limit( - self.max_vm_flows) - self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart() - self.logger.info( - "Wait for 2s for flow setup to start after service restart") - sleep(2) - flow_test_utils.update_vm_mdata_ip(self.cmp_node, self) - self.traffic_obj = self.useFixture( - traffic_tests.trafficTestFixture(self.connections)) - # def startTraffic (tx_vm_fixture= None, rx_vm_fixture= None, - # stream_proto= 'udp', start_sport= 8000, - # total_single_instance_streams= 20): - startStatus = self.traffic_obj.startTraffic( - total_single_instance_streams=num_flows, - pps=self.flow_gen_rate, - start_sport=1000, - cfg_profile='ContinuousSportRange', - tx_vm_fixture=src_vm_fixture, - rx_vm_fixture=dst_vm_fixture, - stream_proto=self.proto) - msg1 = "Status of start traffic : %s, %s, %s" % ( - self.proto, src_vm_fixture.vm_ip, startStatus['status']) - self.logger.info(msg1) - assert startStatus['status'], msg1 - # 4. Poll live traffic & verify VM flow count - self.verify_node_flow_setup() - # 5. Increase max_vm_flows to 50% in TX VM node - self.max_vm_flows = 50 - self.comp_node_fixt[ - self.cmp_node].set_per_vm_flow_limit( - self.max_vm_flows) - self.comp_node_fixt[self.cmp_node].sup_vrouter_process_restart() - self.logger.info( - "Wait for 2s for flow setup to start after service restart") - sleep(2) - # 6. Poll live traffic - self.verify_node_flow_setup() - # 7. Stop Traffic - self.logger.info("Proceed to stop traffic..") - self.traffic_obj.stopTraffic(wait_for_stop=False) - start_time = time.time() - # 8. Verify flow ageing - self.logger.info( - "With traffic stopped, wait for flow_cache_timeout to trigger flow ageing") - sleep(self.flow_cache_timeout) - retries = 0 - retry_wait_time = 10 - flow_teardown_time = math.ceil(flow_test_utils.get_max_flow_removal_time(self.generated_flows, self.flow_cache_timeout)) - self.logger.debug("flow tear down time based on calcualtion: %s" %flow_teardown_time) - max_retries = math.ceil(self.flow_cache_timeout / retry_wait_time) - while retries < max_retries: - actual_flows = self.comp_node_fixt[ - self.cmp_node].get_vrouter_matching_flow_count( - self.flow_data) - actual_flows = int(actual_flows['all']) - if actual_flows > 10: - self.logger.info("Waiting for flows to age out") - sleep(retry_wait_time) - retries += 1 - else: - break - elapsed_time = time.time() - start_time - if actual_flows > 50: - msg = "Expected flows to age-out as configured, Seeing flows still active after elapsed time %s in node: %s, actual_flows: %s" % ( - elapsed_time, self.cmp_node, actual_flows) - assert False, msg - else: - self.logger.info( - "Flows aged out as expected in configured flow_cache_timeout") - self.logger.info( - "elapsed_time after stopping traffic is %s, flow_count is %s" % - (elapsed_time, actual_flows)) - # end of profile for loop - # end of test_agent_flow_settings -# end SDNFlowTests diff --git a/common/ecmp/__init__.py b/serial_scripts/forwarding_mode/__init__.py similarity index 100% rename from common/ecmp/__init__.py rename to serial_scripts/forwarding_mode/__init__.py diff --git a/serial_scripts/forwarding_mode/base.py b/serial_scripts/forwarding_mode/base.py new file mode 100644 index 000000000..298109d6e --- /dev/null +++ b/serial_scripts/forwarding_mode/base.py @@ -0,0 +1,60 @@ +import test +from common.connections import ContrailConnections +from common import isolated_creds +from vm_test import VMFixture +from vn_test import VNFixture + + +class BaseForwardingMode(test.BaseTestCase): + + @classmethod + def setUpClass(cls): + super(BaseForwardingMode, cls).setUpClass() + cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ + cls.inputs, ini_file = cls.ini_file, \ + logger = cls.logger) + cls.isolated_creds.setUp() + cls.project = cls.isolated_creds.create_tenant() + cls.isolated_creds.create_and_attach_user_to_tenant() + cls.inputs = cls.isolated_creds.get_inputs() + cls.inputs.set_af('v4') + cls.connections = cls.isolated_creds.get_conections() + cls.orch = cls.connections.orch + cls.quantum_h= cls.connections.quantum_h + cls.nova_h = cls.connections.nova_h + cls.vnc_lib_fixture=cls.connections.vnc_lib_fixture + cls.vnc_lib= cls.connections.vnc_lib + cls.agent_inspect= cls.connections.agent_inspect + cls.cn_inspect= cls.connections.cn_inspect + cls.analytics_obj=cls.connections.analytics_obj + cls.api_s_inspect = cls.connections.api_server_inspect + cls.gl_forwarding_mode = None + + #end setUpClass + + @classmethod + def tearDownClass(cls): + cls.isolated_creds.delete_tenant() + super(BaseForwardingMode, cls).tearDownClass() + cls.vnc_lib_fixture.set_global_forwarding_mode(None) + #end tearDownClass + + def create_vn(self, *args, **kwargs): + return self.useFixture( + VNFixture(project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + *args, **kwargs + )) + + def create_vm(self, vn_fixture, image_name='ubuntu', *args, **kwargs): + return self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_obj=vn_fixture.obj, + image_name=image_name, + *args, **kwargs + )) + + diff --git a/serial_scripts/forwarding_mode/test_forwarding_mode.py b/serial_scripts/forwarding_mode/test_forwarding_mode.py new file mode 100644 index 000000000..e7f68977e --- /dev/null +++ b/serial_scripts/forwarding_mode/test_forwarding_mode.py @@ -0,0 +1,94 @@ +from vn_test import * +from vm_test import * +from vnc_api_test import * +from tcutils.wrappers import preposttest_wrapper +from base import BaseForwardingMode +from common import isolated_creds +import time +import test + +class TestForwardingMode(BaseForwardingMode): + + @classmethod + def setUpClass(cls): + super(TestForwardingMode, cls).setUpClass() + + + @classmethod + def tearDownClass(cls): + super(TestForwardingMode, cls).tearDownClass() + + @preposttest_wrapper + def test_forwarding_mode_l2(self): + '''Test to check traffic between VM's when forwarding_mode set to L2''' + return self.setup_commmon_objects(vn_name='vn_l2',vm_name1='vm1_l2',vm_name2='vm2_l2',forwarding_mode='l2') + + @preposttest_wrapper + def test_forwarding_mode_l3(self): + '''Test to check traffic between VM's when forwarding_mode set to L3''' + return self.setup_commmon_objects(vn_name='vn_l3',vm_name1='vm1_l3',vm_name2='vm2_l3',forwarding_mode='l3') + + @preposttest_wrapper + def test_forwarding_mode_l2_l3(self): + '''Test to check traffic between VM's when forwarding_mode set to L2_L3''' + return self.setup_commmon_objects(vn_name='vn_l2_l3',vm_name1='vm1_l2_l3',vm_name2='vm2_l2_l3',forwarding_mode='l2_l3') + + @preposttest_wrapper + def test_forwarding_mode_global_l2(self): + '''Test to check traffic between VM's when global forwarding_mode set to L2''' + self.gl_forwarding_mode='l2' + self.vnc_lib_fixture.set_global_forwarding_mode(self.gl_forwarding_mode) + return self.setup_commmon_objects(vn_name='vn_global_l2',vm_name1='vm1_global_l2',vm_name2='vm2_global_l2',forwarding_mode=None) + + @preposttest_wrapper + def test_forwarding_mode_global_l3(self): + '''Test to check traffic between VM's when global forwarding_mode set to L3''' + self.gl_forwarding_mode='l3' + self.vnc_lib_fixture.set_global_forwarding_mode(self.gl_forwarding_mode) + return self.setup_commmon_objects(vn_name='vn_global_l3',vm_name1='vm1_global_l3',vm_name2='vm2_global_l3',forwarding_mode=None) + + @preposttest_wrapper + def test_forwarding_mode_global_l2_l3(self): + '''Test to check traffic between VM's when global forwarding_mode set to L2_L3''' + self.gl_forwarding_mode='l2_l3' + self.vnc_lib_fixture.set_global_forwarding_mode(self.gl_forwarding_mode) + return self.setup_commmon_objects(vn_name='vn_global_l2_l3',vm_name1='vm1_global_l2_l3',vm_name2='vm2_global_l2_l3',forwarding_mode=None) + + def setup_commmon_objects(self,vn_name,vm_name1,vm_name2,forwarding_mode): + vn_fixture = self.create_vn(vn_name=vn_name,forwarding_mode=forwarding_mode) + assert vn_fixture.verify_on_setup() + # Get all compute host + host_list = self.connections.orch.get_hosts() + vm1_fixture = self.create_vm(vn_fixture=vn_fixture,vm_name=get_random_name(vm_name1), + flavor='contrail_flavor_small', + image_name='ubuntu', + node_name=host_list[0]) + if len(host_list) > 1: + self.logger.info("Multi-Node Setup") + vm2_fixture = self.create_vm(vn_fixture=vn_fixture,vm_name=get_random_name(vm_name2), + flavor='contrail_flavor_small', + image_name='ubuntu', + node_name=host_list[1]) + else: + self.logger.info("Single-Node Setup") + vm2_fixture = self.create_vm(vn_fixture=vn_fixture,vm_name=get_random_name(vm_name2), + flavor='contrail_flavor_small', + image_name='ubuntu') + + if self.vnc_lib_fixture.get_active_forwarding_mode(vn_fixture.vn_fq_name) =='l2': + self.logger.info("sleeping until vm's comes up") + sleep(300) + vm1_fixture.wait_till_vm_is_up() + vm2_fixture.wait_till_vm_is_up() + vm1_fixture.verify_on_setup() + vm2_fixture.verify_on_setup() + + if self.vnc_lib_fixture.get_active_forwarding_mode(vn_fixture.vn_fq_name) =='l2': + self.logger.info("Skipping Ping Test between VM's as forwarding_mode is L2") + else: + assert vm1_fixture.ping_with_certainty(dst_vm_fixture=vm2_fixture),\ + "Ping from %s to %s failed" % (vm1_fixture.vm_name, vm2_fixture.vm_name) + assert vm2_fixture.ping_with_certainty(dst_vm_fixture=vm1_fixture),\ + "Ping from %s to %s failed" % (vm2_fixture.vm_name, vm1_fixture.vm_name) + return True + #end setup_common_objects \ No newline at end of file diff --git a/serial_scripts/ha/base.py b/serial_scripts/ha/base.py index 171302f97..3d66b3e41 100644 --- a/serial_scripts/ha/base.py +++ b/serial_scripts/ha/base.py @@ -5,7 +5,7 @@ import socket import random from fabric.state import connections as fab_connections -import test +import test_v1 import traffic_tests from common.contrail_test_init import * from common import isolated_creds @@ -13,37 +13,30 @@ from vm_test import * from floating_ip import * from tcutils.commands import * -sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) +from tcutils.util import get_random_name +trafficdir = os.path.join(os.path.dirname(__file__), '../../tcutils/pkgs/Traffic') +sys.path.append(trafficdir) from traffic.core.stream import Stream from traffic.core.profile import create, ContinuousProfile from traffic.core.helpers import Host from traffic.core.helpers import Sender, Receiver +from fabric.api import local -class HABaseTest(test.BaseTestCase): +class HABaseTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(HABaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.nova_h = cls.connections.nova_h cls.orch = cls.connections.orch cls.vnc_lib_fixture = cls.connections.vnc_lib_fixture -# cls.logger= cls.inputs.logger cls.ipmi_list = cls.inputs.hosts_ipmi[0] #end setUpClass @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_tenant() super(HABaseTest, cls).tearDownClass() - #end tearDownClass + #end tearDownClass def remove_from_cleanups(self, fix): self.remove_api_from_cleanups(fix.cleanUp) @@ -66,7 +59,7 @@ def reboot(self,ip): def cold_reboot(self,ip,option): ''' API to power clycle node for a given IP address ''' - if option != 'on': + if option != 'on': cmd = 'if ! grep -Rq "GRUB_RECORDFAIL_TIMEOUT" /etc/default/grub; then echo "GRUB_RECORDFAIL_TIMEOUT=10" >> /etc/default/grub; update-grub ; fi ;sed -i s/GRUB_CMDLINE_LINUX_DEFAULT.*/GRUB_CMDLINE_LINUX_DEFAULT=\"nomodeset\"/g /etc/default/grub ; update-grub;' self.logger.info('command executed %s' %cmd) self.inputs.run_cmd_on_server(ip, cmd) @@ -76,24 +69,12 @@ def cold_reboot(self,ip,option): cmd = 'echo "blacklist mei_me" > /etc/modprobe.d/mei_me.conf;' self.inputs.run_cmd_on_server(ip, cmd) cmd = 'if ! grep -Rq "mei_me" /etc/modprobe.d/blacklist.conf ; then echo "blacklist mei_me" >> /etc/modprobe.d/blacklist.conf; fi ;' - self.inputs.run_cmd_on_server(ip, cmd) + self.inputs.run_cmd_on_server(ip, cmd) ipmi_addr = self.get_ipmi_address(ip) - # ToDo: Use python based ipmi shutdown wrapper rather than ipmitool - test_ip = self.inputs.cfgm_ips[0] - cmd = 'wget http://us.archive.ubuntu.com/ubuntu/pool/universe/i/ipmitool/ipmitool_1.8.13-1ubuntu0.2_amd64.deb' - self.logger.info('command executed %s' %cmd) - self.inputs.run_cmd_on_server(test_ip,cmd) - cmd = 'dpkg -i /root/ipmitool_1.8.13-1ubuntu0.2_amd64.deb' - self.logger.info('command executed %s' %cmd) - self.inputs.run_cmd_on_server(test_ip,cmd) - cmd = 'rm -rf /root/ipmitool_1.8.13-1ubuntu0.2_amd64.deb' - self.logger.info('command executed %s' %cmd) - self.inputs.run_cmd_on_server(test_ip,cmd) - # TODO removed later , when support is there to execute test from test node. - cmd = '/usr/bin/ipmitool -H "%s" -U %s -P %s chassis power "%s"'%(ipmi_addr,self.inputs.ipmi_username,self.inputs.ipmi_password,option) + cmd = 'ipmitool -H "%s" -U %s -P %s chassis power "%s"'%(ipmi_addr,self.inputs.ipmi_username,self.inputs.ipmi_password,option) self.logger.info('command executed %s' %cmd) - self.inputs.run_cmd_on_server(test_ip,cmd) + local(cmd) # clear the fab connections sleep(20) self.connections.update_inspect_handles() @@ -119,7 +100,7 @@ def isolate_node(self,ctrl_ip,state): def get_ipmi_address(self,ip): ''' API to get IPMI address for a given IP address ''' self.ipmi_list = self.inputs.hosts_ipmi[0] - return self.ipmi_list[ip] + return self.ipmi_list[ip] def get_gw(self,routes): for route in routes: @@ -140,7 +121,7 @@ def get_mac(ip): def update_handles(self, hosts, service=None): ''' Updates the handles when a node is isolated or removed from list ''' - vip = self.inputs.vip['contrail'] + vip = self.inputs.contrail_internal_vip for host in hosts: if host in self.inputs.cfgm_ips: self.inputs.cfgm_ips[self.inputs.cfgm_ips.index(host)] = vip @@ -164,7 +145,7 @@ def update_handles(self, hosts, service=None): def reset_handles(self, hosts, service=None): ''' resetting cfgm_ip , bgp_ips , compute_ips required for ha testing during node failures ''' - vip = self.inputs.vip['contrail'] + vip = self.inputs.contrail_internal_vip for host in hosts: if vip in self.inputs.cfgm_ips: self.inputs.cfgm_ips[self.inputs.cfgm_ips.index(vip)] = host @@ -188,7 +169,7 @@ def reset_handles(self, hosts, service=None): def ha_start(self): ''' - ha_start will spawn VM's and starts traffic from + ha_start will spawn VM's and starts traffic from VM - VM , VM - floating IP. ''' self.vn1_name='vn1000' @@ -200,7 +181,7 @@ def ha_start(self): self.fip_subnets = [self.inputs.fip_pool] self.vmlist = [] self.vm_fixture = [] - self.vm_num = 2 + self.vm_num = 2 self.jdaf_ip = '6.6.6.1' self.public_ip = '8.8.8.8' self.mx_rt = self.inputs.mx_rt @@ -208,23 +189,22 @@ def ha_start(self): self.sport = 39100 self.dport = 39200 self.proto_list = ['tcp','icmp'] - self.fip = "" + self.fip = "" self.count = "" - self.sender = {} - self.sender_fip = {} - self.receiver = {} - self.send_node = {} - self.send_fip_node = {} - self.recv_node = {} - self.send_fip_host = {} - self.recv_host = {} - self.send_host = {} + self.sender = {} + self.sender_fip = {} + self.receiver = {} + self.send_node = {} + self.send_fip_node = {} + self.recv_node = {} + self.send_fip_host = {} + self.recv_host = {} + self.send_host = {} # self.host_list= self.connections.nova_h.get_hosts() self.host_list = self.connections.orch.get_hosts() for i in range(0,self.vm_num): - val = random.randint(1,100000) - self.vmlist.append("vm-test"+str(val)) + self.vmlist.append(get_random_name("vm-test")) # ping gateway from VM's if self.inputs.orchestrator =='vcenter': @@ -246,10 +226,10 @@ def ha_start(self): for i in range(0,self.vm_num): node_indx = (i % host_cnt) if self.inputs.orchestrator =='vcenter': - self.vm_fixture.append(self.useFixture(VMFixture(project_name= self.inputs.project_name, connections= self.connections, vn_objs = [ self.vn1_fixture.obj ], vm_name= self.vmlist[i],flavor='contrail_flavor_large',image_name='ubuntu-traffic',node_name=self.host_list[node_indx]))) + self.vm_fixture.append(self.useFixture(VMFixture(project_name= self.inputs.project_name, connections= self.connections, vn_objs = [ self.vn1_fixture.obj ], vm_name= self.vmlist[i],image_name='ubuntu-traffic',node_name=self.host_list[node_indx]))) else: - self.vm_fixture.append(self.useFixture(VMFixture(project_name= self.inputs.project_name, connections= self.connections, vn_objs = [ self.vn1_fixture.obj,self.vn2_fixture.obj ], vm_name= self.vmlist[i],flavor='contrail_flavor_large',image_name='ubuntu-traffic',node_name=self.host_list[node_indx]))) + self.vm_fixture.append(self.useFixture(VMFixture(project_name= self.inputs.project_name, connections= self.connections, vn_objs = [ self.vn1_fixture.obj,self.vn2_fixture.obj ], vm_name= self.vmlist[i],image_name='ubuntu-traffic',node_name=self.host_list[node_indx]))) # self.vm_fixture.append(self.useFixture(VMFixture(project_name= self.inputs.project_name, connections= self.connections, vn_objs = [ self.vn1_fixture.obj ], vm_name= self.vmlist[i],flavor='contrail_flavor_large',image_name='ubuntu-traffic',node_name=self.host_list[node_indx]))) for i in range(0,self.vm_num): @@ -275,14 +255,14 @@ def ha_start(self): #Set VM credentials for proto in self.proto_list: self.send_node[proto] = [] - self.sender[proto] = [] + self.sender[proto] = [] if self.fip == 'True' : - self.sender_fip[proto] = [] - self.receiver[proto] = [] - self.send_node[proto] = [] - self.recv_node[proto] = [] - self.send_host[proto] = [] - self.recv_host[proto] = [] + self.sender_fip[proto] = [] + self.receiver[proto] = [] + self.send_node[proto] = [] + self.recv_node[proto] = [] + self.send_host[proto] = [] + self.recv_host[proto] = [] j = self.vm_num - 1 for i in range(0,((self.vm_num/2))): if self.fip == 'True' and proto == 'icmp' : @@ -340,7 +320,7 @@ def ha_stop(self): print("Sent: %s: Proto : %s"%(self.sender[proto][i].sent,proto)) if proto != 'icmp' : print("Received: %s Proto : %s"%(self.receiver[proto][i].recv,proto)) - else: + else: print("Received: %s Proto : %s"%(self.sender[proto][i].recv,proto)) if self.fip == 'True' and proto == 'icmp' : print("Sent FIP : %s: Proto : %s"%(self.sender_fip[proto][i].sent,proto)) @@ -354,12 +334,12 @@ def ha_basic_test(self): that the VM are spawned successfully and deletes them. ''' vms = [] -# vm_cnt = len(self.inputs.cfgm_ips) - vm_cnt = 1 +# vm_cnt = len(self.inputs.cfgm_ips) + vm_cnt = 1 self.logger.debug("In ha_basic_test.....") for i in range(0,vm_cnt): - vms.append(self.useFixture(VMFixture(project_name= self.inputs.project_name, connections= self.connections, vn_objs = [ self.vn1_fixture.obj ], vm_name= "ha_new_vm"+str(random.randint(1,100000)) ,flavor='contrail_flavor_large',image_name='ubuntu-traffic'))) + vms.append(self.useFixture(VMFixture(project_name= self.inputs.project_name, connections= self.connections, vn_objs = [ self.vn1_fixture.obj ], vm_name= get_random_name("ha_new_vm") ,image_name='ubuntu-traffic'))) for i in range(0,vm_cnt): assert vms[i].verify_on_setup() if self.inputs.orchestrator =='vcenter': @@ -429,7 +409,7 @@ def service_command(self, operation, service, node): status = self.inputs.run_cmd_on_server(node, st, username=username ,password=password) self.logger.info("status: %s" % status) if re.search('RUNNING', status,flags=re.I): - ret = True + ret = True break if not ret: self.logger.error("Failed: %s on %s" % (cmd, node)) @@ -440,12 +420,12 @@ def service_command(self, operation, service, node): def ha_service_restart_test(self, service, nodes): ''' Test service instance crash/restart Ensure that that system is operational when a signle service - instance crashes/restarted. + instance crashes/restarted. Pass crietria: as defined by ha_basic_test ''' sleep(10) - self.ha_start() + assert self.ha_start(), "Basic HA setup failed" for node in nodes: if not self.service_command('restart', service, node): return False @@ -457,13 +437,13 @@ def ha_service_restart_test(self, service, nodes): def ha_service_restart(self, service, nodes): ''' Test service instance crash/restart - Pass crietria: service restarted successfully + Pass crietria: service restarted successfully ''' sleep(10) for node in nodes: if not self.service_command('restart', service, node): return False - return True + return True def ha_service_single_failure_test(self, service, nodes): ''' Test single service instance failure @@ -478,7 +458,7 @@ def ha_service_single_failure_test(self, service, nodes): self.logger.info("Failed to start contrail service") return False sleep(10) - self.ha_start() + assert self.ha_start(), "Basic HA setup failed" for node in nodes: if not self.service_command('stop', service, node): return False @@ -509,7 +489,7 @@ def ha_service_single_failure_test(self, service, nodes): if not self.ha_basic_test(): return False - return self.ha_stop() + return self.ha_stop() def check_status(self,cmd,nodes): for node in nodes: @@ -529,11 +509,11 @@ def check_status(self,cmd,nodes): def ha_reboot_test(self, nodes): ''' Test reboot of controller nodes - instance crashes/restarted. + instance crashes/restarted. Pass crietria: as defined by ha_basic_test ''' - self.ha_start() - + assert self.ha_start(), "Basic HA setup failed" + for node in nodes: if not self.reboot(node): @@ -550,8 +530,8 @@ def ha_cold_reboot_test(self,nodes): ''' Test cold reboot of controller nodes Pass crietria: as defined by ha_basic_test ''' - self.ha_start() - + assert self.ha_start(), "Basic HA setup failed" + for node in nodes: if not self.cold_reboot(node,'cycle'): @@ -566,8 +546,8 @@ def ha_reboot_all_test(self,nodes,mode): ''' Test cold reboot of compute nodes Pass crietria: as defined by ha_basic_test ''' - self.ha_start() - + assert self.ha_start(), "Basic HA setup failed" + for node in nodes: if mode == 'ipmi': if not self.cold_reboot(node,'cycle'): @@ -585,7 +565,7 @@ def ha_cold_shutdown_test(self,nodes): ''' Test cold reboot of controller nodes Pass crietria: as defined by ha_basic_test ''' - self.ha_start() + assert self.ha_start(), "Basic HA setup failed" for node in nodes: @@ -603,14 +583,14 @@ def ha_cold_shutdown_test(self,nodes): self.remove_api_from_cleanups(self.reset_handles) if not self.ha_basic_test(): return False - + return self.ha_stop() def ha_isolate_test(self, nodes): ''' Test isolation of controller nodes Pass crietria: as defined by ha_basic_test ''' - self.ha_start() + assert self.ha_start(), "Basic HA setup failed" for node in nodes: self.addCleanup(self.isolate_node, node,'up') if not self.isolate_node(node,"down"): diff --git a/serial_scripts/ha/test_ha_node_failures.py b/serial_scripts/ha/test_ha_node_failures.py index 899e0392a..2f886b28c 100644 --- a/serial_scripts/ha/test_ha_node_failures.py +++ b/serial_scripts/ha/test_ha_node_failures.py @@ -16,7 +16,7 @@ def setUpClass(cls): @skip_because(ha_setup = False) def test_ha_reboot(self): time.sleep(120) - ret = self.ha_reboot_test([self.inputs.cfgm_ips[1],self.inputs.cfgm_ips[2]]) + ret = self.ha_reboot_test([self.inputs.cfgm_ips[0],self.inputs.cfgm_ips[1]]) time.sleep(30) return ret @@ -25,7 +25,7 @@ def test_ha_reboot(self): @skip_because(ha_setup = False) def test_ha_cold_reboot(self): time.sleep(120) - ret = self.ha_cold_reboot_test([self.inputs.cfgm_ips[1],self.inputs.cfgm_ips[2]]) + ret = self.ha_cold_reboot_test([self.inputs.cfgm_ips[0],self.inputs.cfgm_ips[1]]) time.sleep(30) return ret @@ -34,7 +34,7 @@ def test_ha_cold_reboot(self): @skip_because(ha_setup = False) def test_ha_cold_shutdown(self): time.sleep(120) - ret = self.ha_cold_shutdown_test([self.inputs.cfgm_ips[1],self.inputs.cfgm_ips[2]]) + ret = self.ha_cold_shutdown_test([self.inputs.cfgm_ips[0],self.inputs.cfgm_ips[1]]) time.sleep(30) return ret @@ -42,7 +42,7 @@ def test_ha_cold_shutdown(self): @preposttest_wrapper @skip_because(ha_setup = False) def test_ha_isolate(self): - ret = self.ha_isolate_test([self.inputs.cfgm_control_ips[1],self.inputs.cfgm_control_ips[2]]) + ret = self.ha_isolate_test([self.inputs.cfgm_control_ips[0],self.inputs.cfgm_control_ips[1]]) time.sleep(120) return ret diff --git a/serial_scripts/headless_vrouter/base.py b/serial_scripts/headless_vrouter/base.py index 5143d71c9..52b03347a 100644 --- a/serial_scripts/headless_vrouter/base.py +++ b/serial_scripts/headless_vrouter/base.py @@ -1,22 +1,12 @@ -import test +import test_v1 from common import isolated_creds -class BaseHeadlessVrouterTest(test.BaseTestCase): +class BaseHeadlessVrouterTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseHeadlessVrouterTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -29,8 +19,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseHeadlessVrouterTest, cls).tearDownClass() # end tearDownClass diff --git a/serial_scripts/headless_vrouter/test_headless_vrouter.py b/serial_scripts/headless_vrouter/test_headless_vrouter.py index 1ad7029ef..f5d93ccf5 100644 --- a/serial_scripts/headless_vrouter/test_headless_vrouter.py +++ b/serial_scripts/headless_vrouter/test_headless_vrouter.py @@ -392,7 +392,7 @@ def test_config_add_change_while_control_nodes_go_down(self): receiver.stop() sender.stop() project1_instance = config_topo['project1']['project']['project1'] - project1_instance.get_project_connections() + project1_instance.get_project_connections(username=project1_instance.username, password=project1_instance.password) vnet2_instance = config_topo['project1']['vn']['vnet2'] # add VM to existing VN @@ -406,10 +406,12 @@ def test_config_add_change_while_control_nodes_go_down(self): # create new IPAM ipam3_obj = self.useFixture( IPAMFixture( + connections=project1_instance.project_connections, project_obj=project1_instance, name='ipam3')) ipam4_obj = self.useFixture( IPAMFixture( + connections=project1_instance.project_connections, project_obj=project1_instance, name='ipam4')) diff --git a/serial_scripts/md5/base.py b/serial_scripts/md5/base.py index b93e49b20..264ca6325 100644 --- a/serial_scripts/md5/base.py +++ b/serial_scripts/md5/base.py @@ -1,48 +1,38 @@ -import test +import test_v1 from vn_test import MultipleVNFixture -from vnc_api.vnc_api import * -#from vnc_api.vnc_api import VncApi -from vm_test import MultipleVMFixture +from common.device_connection import NetconfConnection +import physical_device_fixture from fabric.api import run, hide, settings +from tcutils.contrail_status_check import * +from physical_router_fixture import PhysicalRouterFixture +from vm_test import MultipleVMFixture from vn_test import VNFixture from vm_test import VMFixture +from vnc_api.vnc_api import * from policy_test import PolicyFixture +from tcutils.util import get_random_name from scripts.securitygroup.verify import VerifySecGroup -from policy_test import PolicyFixture from common.policy.config import ConfigPolicy -from security_group import SecurityGroupFixture, get_secgrp_id_from_name from common import isolated_creds -from tcutils.util import get_random_name, copy_file_to_server, fab_put_file_to_vm import os import re -from physical_router_fixture import PhysicalRouterFixture +from time import sleep -class Md5Base(test.BaseTestCase, VerifySecGroup, ConfigPolicy): +class Md5Base(test_v1.BaseTestCase_v1, VerifySecGroup, ConfigPolicy): @classmethod def setUpClass(cls): super(Md5Base, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, - cls.inputs, ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib cls.agent_inspect = cls.connections.agent_inspect cls.cn_inspect = cls.connections.cn_inspect cls.analytics_obj = cls.connections.analytics_obj - # end setUpClass @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(Md5Base, cls).tearDownClass() # end tearDownClass @@ -52,40 +42,48 @@ def setUp(self): def tearDown(self): super(Md5Base, self).tearDown() - def config_basic(self): - - vn1 = "vn1" - vn2 = "vn2" - vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']} - self.logger.info("Configure the policy with allow any") - rules = [ - { - 'direction': '<>', - 'protocol': 'any', - 'source_network': vn1, - 'src_ports': [0, -1], - 'dest_network': vn2, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - }, - ] - self.multi_vn_fixture = self.useFixture(MultipleVNFixture( - connections=self.connections, inputs=self.inputs, subnet_count=2, - vn_name_net=vn_s, project_name=self.inputs.project_name)) - vns = self.multi_vn_fixture.get_all_fixture_obj() - (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0] - (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1] - assert self.vn1_fix.verify_on_setup() - assert self.vn2_fix.verify_on_setup() - self.config_policy_and_attach_to_vn(rules) - - self.multi_vm_fixture = self.useFixture(MultipleVMFixture( - project_name=self.inputs.project_name, connections=self.connections, - vm_count_per_vn=1, vn_objs=vns, image_name='cirros-0.3.0-x86_64-uec', - flavor='m1.tiny')) - vms = self.multi_vm_fixture.get_all_fixture() - (self.vm1_name, self.vm1_fix) = vms[0] - (self.vm2_name, self.vm2_fix) = vms[1] + def config_basic(self, is_mx_present): + #mx config using device manager + if is_mx_present: + if self.inputs.ext_routers: + if self.inputs.use_devicemanager_for_md5: + for i in range(len(self.inputs.physical_routers_data.values())): + router_params = self.inputs.physical_routers_data.values()[i] + if router_params['model'] == 'mx': + self.phy_router_fixture = self.useFixture(PhysicalRouterFixture( + router_params['name'], router_params['mgmt_ip'], + model=router_params['model'], + vendor=router_params['vendor'], + asn=router_params['asn'], + ssh_username=router_params['ssh_username'], + ssh_password=router_params['ssh_password'], + mgmt_ip=router_params['mgmt_ip'], + connections=self.connections)) + else: + if self.inputs.ext_routers: + for i in range(len(self.inputs.physical_routers_data.values())): + router_params = self.inputs.physical_routers_data.values()[i] + if router_params['model'] == 'mx': + cmd = [] + cmd.append('set groups md5_tests routing-options router-id %s' % router_params['mgmt_ip']) + cmd.append('set groups md5_tests routing-options route-distinguisher-id %s' % router_params['mgmt_ip']) + cmd.append('set groups md5_tests routing-options autonomous-system %s' % router_params['asn']) + cmd.append('set groups md5_tests protocols bgp group md5_tests type internal') + cmd.append('set groups md5_tests protocols bgp group md5_tests multihop') + cmd.append('set groups md5_tests protocols bgp group md5_tests local-address %s' % router_params['mgmt_ip']) + cmd.append('set groups md5_tests protocols bgp group md5_tests hold-time 90') + cmd.append('set groups md5_tests protocols bgp group md5_tests keep all') + cmd.append('set groups md5_tests protocols bgp group md5_tests family inet-vpn unicast') + cmd.append('set groups md5_tests protocols bgp group md5_tests family inet6-vpn unicast') + cmd.append('set groups md5_tests protocols bgp group md5_tests family evpn signaling') + cmd.append('set groups md5_tests protocols bgp group md5_tests family route-target') + cmd.append('set groups md5_tests protocols bgp group md5_tests local-as %s' % router_params['asn']) + for node in self.inputs.bgp_control_ips: + cmd.append('set groups md5_tests protocols bgp group md5_tests neighbor %s peer-as %s' % (node, router_params['asn'])) + cmd.append('set apply-groups md5_tests') + mx_handle = NetconfConnection(host = router_params['mgmt_ip']) + mx_handle.connect() + cli_output = mx_handle.config(stmts = cmd, timeout = 120) vn61_name = "test_vnv6sr" vn61_net = ['2001::101:0/120'] @@ -93,14 +91,12 @@ def config_basic(self): vn61_fixture = self.useFixture(VNFixture( project_name=self.inputs.project_name, connections=self.connections, vn_name=vn61_name, inputs=self.inputs, subnets=vn61_net)) - assert vn61_fixture.verify_on_setup() vn62_name = "test_vnv6dn" vn62_net = ['2001::201:0/120'] #vn2_fixture = self.config_vn(vn2_name, vn2_net) vn62_fixture = self.useFixture(VNFixture( project_name=self.inputs.project_name, connections=self.connections, vn_name=vn62_name, inputs=self.inputs, subnets=vn62_net)) - assert vn62_fixture.verify_on_setup() vm61_name = 'source_vm' vm62_name = 'dest_vm' #vm1_fixture = self.config_vm(vn1_fixture, vm1_name) @@ -108,14 +104,12 @@ def config_basic(self): vm61_fixture = self.useFixture(VMFixture( project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn61_fixture.obj, vm_name=vm61_name, node_name=None, - image_name='cirros-0.3.0-x86_64-uec', flavor='m1.tiny')) + image_name='cirros', flavor='m1.tiny')) vm62_fixture = self.useFixture(VMFixture( project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn62_fixture.obj, vm_name=vm62_name, node_name=None, - image_name='cirros-0.3.0-x86_64-uec', flavor='m1.tiny')) - assert vm61_fixture.verify_on_setup() - assert vm62_fixture.verify_on_setup() + image_name='cirros', flavor='m1.tiny')) vm61_fixture.wait_till_vm_is_up() vm62_fixture.wait_till_vm_is_up() @@ -137,23 +131,43 @@ def config_basic(self): policy_fixture, vn61_fixture) vn62_policy_fix = self.attach_policy_to_vn( policy_fixture, vn62_fixture) - #mx config using device manager - router_params = self.inputs.physical_routers_data.values()[0] - self.phy_router_fixture = self.useFixture(PhysicalRouterFixture( - router_params['name'], router_params['mgmt_ip'], - model=router_params['model'], - vendor=router_params['vendor'], - asn=router_params['asn'], - ssh_username=router_params['ssh_username'], - ssh_password=router_params['ssh_password'], - mgmt_ip=router_params['mgmt_ip'], - connections=self.connections)) + + vn1 = "vn1" + vn2 = "vn2" + vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']} + rules = [ + { + 'direction': '<>', + 'protocol': 'any', + 'source_network': vn1, + 'src_ports': [0, -1], + 'dest_network': vn2, + 'dst_ports': [0, -1], + 'simple_action': 'pass', + }, + ] + + self.logger.info("Configure the policy with allow any") + self.multi_vn_fixture = self.useFixture(MultipleVNFixture( + connections=self.connections, inputs=self.inputs, subnet_count=2, + vn_name_net=vn_s, project_name=self.inputs.project_name)) + vns = self.multi_vn_fixture.get_all_fixture_obj() + (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0] + (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1] + self.config_policy_and_attach_to_vn(rules) + + self.multi_vm_fixture = self.useFixture(MultipleVMFixture( + project_name=self.inputs.project_name, connections=self.connections, + vm_count_per_vn=1, vn_objs=vns, image_name='cirros', + flavor='m1.tiny')) + vms = self.multi_vm_fixture.get_all_fixture() + (self.vm1_name, self.vm1_fix) = vms[0] + (self.vm2_name, self.vm2_fix) = vms[1] def config_policy_and_attach_to_vn(self, rules): randomname = get_random_name() policy_name = "sec_grp_policy_" + randomname policy_fix = self.config_policy(policy_name, rules) - assert policy_fix.verify_on_setup() policy_vn1_attach_fix = self.attach_policy_to_vn( policy_fix, self.vn1_fix) policy_vn2_attach_fix = self.attach_policy_to_vn( @@ -166,21 +180,45 @@ def config_md5(self, host, auth_data): list_uuid.set_bgp_router_parameters(rparam) self.vnc_lib.bgp_router_update(list_uuid) - def check_bgp_status(self): + def check_bgp_status(self, is_mx_present=False): result = True self.cn_inspect = self.connections.cn_inspect # Verify the connection between all control nodes and MX(if # present) host = self.inputs.bgp_ips[0] cn_bgp_entry = self.cn_inspect[host].get_cn_bgp_neigh_entry() + if not is_mx_present: + if self.inputs.ext_routers: + for bgpnodes in cn_bgp_entry: + bgpnode = str(bgpnodes) + if self.inputs.ext_routers[0][0] in bgpnode: + cn_bgp_entry.remove(bgpnodes) + cn_bgp_entry = str(cn_bgp_entry) + cn_bgp_entry = str(cn_bgp_entry) - est = re.findall(' \'state\': \'(\w+)\', \'local', cn_bgp_entry) + est = re.findall(' \'state\': \'(\w+)\', \'flap_count', cn_bgp_entry) for ip in est: if not ('Established' in ip): result = False self.logger.debug("Check the BGP connection on %s", host) return result + def check_tcp_status(self): + result = True + #testcases which check tcp status quickly change keys and check for tcp status. + #internally, tcp session is restarted when md5 keys are changed, + #as tcp session may take some time to come up, adding some sleep. + sleep(10) + for node in self.inputs.bgp_control_ips: + cmd = 'netstat -tnp | grep :179 | awk \'{print $6}\'' + tcp_status = self.inputs.run_cmd_on_server(node, cmd) + tcp_status=tcp_status.split('\n') + for status in tcp_status: + if not ('ESTABLISHED' in status): + result = False + self.logger.debug("Check the TCP connection on %s", node) + return result + def config_per_peer(self, auth_data): uuid = self.vnc_lib.bgp_routers_list() uuid = str(uuid) @@ -192,10 +230,318 @@ def config_per_peer(self, auth_data): for str1 in iterrrefs: sess = str1['attr'].get_session() firstsess = sess[0] - #import pdb;pdb.set_trace() firstattr = firstsess.get_attributes() firstattr[0].set_auth_data(auth_data) list_uuid1._pending_field_updates.add('bgp_router_refs') self.vnc_lib.bgp_router_update(list_uuid1) + @classmethod + def remove_mx_group_config(cls): + if cls.inputs.ext_routers: + router_params = cls.inputs.physical_routers_data.values()[0] + cmd = [] + cmd.append('delete groups md5_tests') + cmd.append('delete apply-groups md5_tests') + mx_handle = NetconfConnection(host = router_params['mgmt_ip']) + mx_handle.connect() + cli_output = mx_handle.config(stmts = cmd, timeout = 120) + + def remove_configured_md5(self): + auth_data=None + for host in self.list_uuid: + self.config_per_peer(auth_data=auth_data) + self.config_md5( host=host, auth_data=auth_data ) + + def create_md5_config(self): + auth_data=None + for host in self.list_uuid: + self.config_per_peer(auth_data=auth_data) + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up before md5" + for host in self.list_uuid: + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes after basic md5 config not up" + return True + + def add_delete_md5_config(self): + auth_data=None + for host in self.list_uuid: + self.config_per_peer(auth_data=auth_data) + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up before md5" + host=self.list_uuid[1] + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + self.config_md5(host=host, auth_data=auth_data) + sleep(95) + assert not (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should not be up as only one side has md5" + + for host in self.list_uuid: + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after both sides have md5" + host=self.list_uuid[1] + auth_data=None + self.config_md5(host=host, auth_data=auth_data) + sleep(95) + assert not (self.check_bgp_status(self.is_mx_present)), "BGP between nodes 2 should not be up as others have md5" + + for host in self.list_uuid: + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after 2 both sides have md5" + + for host in self.list_uuid: + auth_data=None + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up" + return True + + def different_keys_md5_config(self): + auth_data=None + for host in self.list_uuid: + self.config_per_peer(auth_data=auth_data) + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up before md5" + for host in self.list_uuid: + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after md5 config" + i=1 + for host in self.list_uuid: + key = "juniper" + i.__str__() + auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + i += 1 + sleep(95) + assert not (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should not be up as keys are different" + + for host in self.list_uuid: + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after md5 config on all sides" + + for host in self.list_uuid: + auth_data=None + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up" + return True + + def check_per_peer_md5_config(self): + auth_data=None + for host in self.list_uuid: + self.config_per_peer(auth_data=auth_data) + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up before md5" + + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after per peer config" + return True + + def add_delete_per_peer_md5_config(self): + auth_data=None + for host in self.list_uuid: + self.config_per_peer(auth_data=auth_data) + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up before md5" + + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after per peer with mx" + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after different per peer value" + + auth_data=None + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up" + + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after reconfig per peer with mx" + auth_data=None + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after removing md5 with control" + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after reconfiguring md5 with control" + return True + + def diff_keys_per_peer_md5_config(self): + auth_data=None + for host in self.list_uuid: + self.config_per_peer(auth_data=auth_data) + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up before md5" + + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after per peer with mx" + + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer( auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up" + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after reconfiguring key with mx" + return True + + def precedence_per_peer_md5_config(self): + auth_data=None + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data) + for host in self.list_uuid: + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up before md5" + auth_data={'key_items': [ { 'key':"simple","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer( auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after per peer with mx" + + auth_data=None + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after removing md5 with control" + + i=1 + for host in self.list_uuid: + key = "juniper" + i.__str__() + auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + i += 1 + sleep(95) + assert not (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should not be up after global md5 key mismatch" + auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer( auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after global mismatch, but per peer match" + + + auth_data=None + host=self.list_uuid[1] + self.config_per_peer( auth_data=auth_data ) + + sleep(95) + assert not (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should not be up as global mismatch still exists" + for host in self.list_uuid: + auth_data={'key_items': [ { 'key':"trialbyerror","key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after reconfiguring global match" + + for host in self.list_uuid: + auth_data=None + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after having no md5 between control" + + return True + + def iter_keys_per_peer_md5_config(self): + auth_data=None + for host in self.list_uuid: + self.config_per_peer(auth_data=auth_data) + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up before md5" + auth_data={'key_items': [ { 'key':"iter","key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after per peer with mx" + + for i in range(1, 11): + for host in self.list_uuid: + key = "juniper" + i.__str__() + auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_tcp_status()), "TCP connection should be up after key change" + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up 1 as keys are the same everywhere" + with settings( + host_string='%s@%s' % ( + self.inputs.username, self.inputs.cfgm_ips[0]), + password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): + conrt = run('service contrail-control restart') + cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable() + assert cluster_status, 'Hash of error nodes and services : %s' % (error_nodes) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up 2 as keys are the same everywhere" + + for i in range(1, 11): + for host in self.list_uuid: + key = "juniper" + i.__str__() + auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} + self.config_md5( host=host, auth_data=auth_data ) + sleep(95) + assert (self.check_tcp_status()), "TCP connection should be up after key change" + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up 3 as keys are the same everywhere" + with settings( + host_string='%s@%s' % ( + self.inputs.username, self.inputs.cfgm_ips[0]), + password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): + conrt = run('service contrail-control restart') + cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable() + assert cluster_status, 'Hash of error nodes and services : %s' % (error_nodes) + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes should be up 4 as keys are the same everywhere" + + for i in range(1, 11): + key = "juniper" + i.__str__() + auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + self.config_per_peer( auth_data=auth_data ) + sleep(95) + assert (self.check_tcp_status()), "TCP connection should be up after key change" + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after per peer match" + + for i in range(1, 11): + key = "juniper" + i.__str__() + auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} + host=self.list_uuid[1] + notmx=1 + self.config_per_peer(auth_data=auth_data ) + sleep(95) + assert (self.check_tcp_status()), "TCP connection should be up after key change" + assert (self.check_bgp_status(self.is_mx_present)), "BGP between nodes not up after per peer match" + + return True + # end class Md5Base diff --git a/serial_scripts/md5/test_md5.py b/serial_scripts/md5/test_md5.py index da3ec401e..cb29dfda4 100644 --- a/serial_scripts/md5/test_md5.py +++ b/serial_scripts/md5/test_md5.py @@ -19,6 +19,7 @@ from tcutils.tcpdump_utils import * from time import sleep from tcutils.util import get_random_name +from tcutils.contrail_status_check import * class TestMd5tests(Md5Base, VerifySecGroup, ConfigPolicy): @@ -39,7 +40,15 @@ def is_test_applicable(self): def setUp(self): super(TestMd5tests, self).setUp() - self.config_basic() + result = self.is_test_applicable() + if result[0]: + self.is_mx_present=True + self.config_basic(self.is_mx_present) + uuid = self.vnc_lib.bgp_routers_list() + self.uuid = str(uuid) + self.list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', self.uuid) + else: + return @test.attr(type=['sanity']) @preposttest_wrapper @@ -47,23 +56,8 @@ def test_create_md5(self): """ Description: Verify md5 with allow specific protocol on all ports and policy with allow all between VN's """ - - uuid = self.vnc_lib.bgp_routers_list() - uuid = str(uuid) - list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', uuid) - auth_data=None - for host in list_uuid: - self.config_per_peer(auth_data=auth_data) - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up before md5" - for host in list_uuid: - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes after basic md5 config not up" - return True - + self.addCleanup(self.remove_configured_md5) + assert self.create_md5_config() #end create_md5 @preposttest_wrapper @@ -71,45 +65,8 @@ def test_add_delete_md5(self): """ Description: Verify md5 with add,delete and specific protocol on all ports and policy with allow all between VN's """ - uuid = self.vnc_lib.bgp_routers_list() - uuid = str(uuid) - list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', uuid) - auth_data=None - for host in list_uuid: - self.config_per_peer(auth_data=auth_data) - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up before md5" - host=list_uuid[1] - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - self.config_md5(host=host, auth_data=auth_data) - sleep(95) - assert not (self.check_bgp_status()), "BGP between nodes should not be up as only one side has md5" - - for host in list_uuid: - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after both sides have md5" - host=list_uuid[1] - auth_data=None - self.config_md5(host=host, auth_data=auth_data) - sleep(95) - assert not (self.check_bgp_status()), "BGP between nodes 2 should not be up as others have md5" - - for host in list_uuid: - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after 2 both sides have md5" - - for host in list_uuid: - auth_data=None - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up" - return True + self.addCleanup(self.remove_configured_md5) + assert self.add_delete_md5_config() #end add_delete_md5 @preposttest_wrapper @@ -117,41 +74,8 @@ def test_different_keys_md5(self): """ Description: Verify md5 with add,delete and specific protocol on all ports and policy with allow all between VN's """ - uuid = self.vnc_lib.bgp_routers_list() - uuid = str(uuid) - list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', uuid) - auth_data=None - for host in list_uuid: - self.config_per_peer(auth_data=auth_data) - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up before md5" - for host in list_uuid: - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after md5 config" - i=1 - for host in list_uuid: - key = i.__str__() - auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - i += 1 - sleep(95) - assert not (self.check_bgp_status()), "BGP between nodes should not be up as keys are different" - - for host in list_uuid: - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after md5 config on all sides" - - for host in list_uuid: - auth_data=None - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up" - return True + self.addCleanup(self.remove_configured_md5) + assert self.different_keys_md5_config() #end different_keys_md5 @test.attr(type=['sanity']) @@ -160,23 +84,8 @@ def test_check_per_peer(self): """ Description: Verify per peer md5 and specific protocol on all ports and policy with allow all between VN's """ - uuid = self.vnc_lib.bgp_routers_list() - uuid = str(uuid) - list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', uuid) - - auth_data=None - for host in list_uuid: - self.config_per_peer(auth_data=auth_data) - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up before md5" - - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after per peer config" - return True + self.addCleanup(self.remove_configured_md5) + assert self.check_per_peer_md5_config() #end check_per_peer @preposttest_wrapper @@ -184,50 +93,8 @@ def test_add_delete_per_peer(self): """ Description: Verify add delete per peer md5 and specific protocol on all ports and policy with allow all between VN's """ - uuid = self.vnc_lib.bgp_routers_list() - uuid = str(uuid) - list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', uuid) - - auth_data=None - for host in list_uuid: - self.config_per_peer(auth_data=auth_data) - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up before md5" - - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after per peer with mx" - auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after different per peer value" - - auth_data=None - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up" - - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after reconfig per peer with mx" - auth_data=None - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after removing md5 with control" - auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after reconfiguring md5 with control" - return True + self.addCleanup(self.remove_configured_md5) + assert self.add_delete_per_peer_md5_config() #end add_delete_per_peer @preposttest_wrapper @@ -235,33 +102,8 @@ def test_diff_keys_per_peer(self): """ Description: Verify different keys per peer md5 and specific protocol on all ports and policy with allow all between VN's """ - uuid = self.vnc_lib.bgp_routers_list() - uuid = str(uuid) - list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', uuid) - auth_data=None - for host in list_uuid: - self.config_per_peer(auth_data=auth_data) - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up before md5" - - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after per peer with mx" - - auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer( auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up" - auth_data={'key_items': [ { 'key':"7","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after reconfiguring key with mx" - return True + self.addCleanup(self.remove_configured_md5) + assert self.diff_keys_per_peer_md5_config() #end diff_keys_per_peer @preposttest_wrapper @@ -269,135 +111,122 @@ def test_precedence_per_peer(self): """ Description: Verify precedence per peer md5 and specific protocol on all ports and policy with allow all between VN's """ - uuid = self.vnc_lib.bgp_routers_list() - uuid = str(uuid) - list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', uuid) - auth_data=None - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data) - for host in list_uuid: - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up before md5" - #import pdb;pdb.set_trace() - auth_data={'key_items': [ { 'key':"simple","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer( auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after per peer with mx" + self.addCleanup(self.remove_configured_md5) + assert self.precedence_per_peer_md5_config() + #end precedence_per_peer + @preposttest_wrapper - auth_data=None - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after removing md5 with control" + def test_iter_keys_per_peer(self): + """ + Description: Verify iteration of same keys per peer md5 and specific protocol on all ports and policy with allow all between VN's + """ + self.addCleanup(self.remove_configured_md5) + assert self.iter_keys_per_peer_md5_config() + #end test_iter_keys_per_peer - i=1 - for host in list_uuid: - key = i.__str__() - auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - i += 1 - sleep(95) - assert not (self.check_bgp_status()), "BGP between nodes should not be up after global md5 key mismatch" - auth_data={'key_items': [ { 'key':"juniper","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer( auth_data=auth_data ) - sleep(95) - #import pdb;pdb.set_trace() - assert (self.check_bgp_status()), "BGP between nodes not up after global mismatch, but per peer match" - - - auth_data=None - host=list_uuid[1] - self.config_per_peer( auth_data=auth_data ) +#end class md5tests - sleep(95) - assert not (self.check_bgp_status()), "BGP between nodes should not be up as global mismatch still exists" - for host in list_uuid: - auth_data={'key_items': [ { 'key':"trialbyerror","key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after reconfiguring global match" - for host in list_uuid: - auth_data=None - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after having no md5 between control" +class TestMd5testsOnControl(Md5Base, VerifySecGroup, ConfigPolicy): - return True - #end precedence_per_peer + @classmethod + def setUpClass(cls): + super(TestMd5testsOnControl, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMd5testsOnControl, cls).tearDownClass() + cls.remove_mx_group_config() + + def is_test_applicable(self): + if (len(self.inputs.bgp_control_ips) == 1 and len(self.inputs.ext_routers) < 1): + return (False, 'Cluster needs 2 BGP peers to configure md5. There are no peers here') + return (True, None) + + def setUp(self): + super(TestMd5testsOnControl, self).setUp() + result = self.is_test_applicable() + if result[0]: + self.is_mx_present=False + self.config_basic(self.is_mx_present) + uuid = self.vnc_lib.bgp_routers_list() + self.uuid = str(uuid) + self.list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', self.uuid) + else: + return + + @test.attr(type=['sanity']) @preposttest_wrapper + def test_create_md5_on_control(self): + """ + Description: Verify md5 with allow specific protocol on all ports and policy with allow all between VN's + """ + self.addCleanup(self.remove_configured_md5) + assert self.create_md5_config() + #end create_md5 - def test_iter_keys_per_peer(self): + @preposttest_wrapper + def test_add_delete_md5_on_control(self): """ - Description: Verify iteration of same keys per peer md5 and specific protocol on all ports and policy with allow all between VN's + Description: Verify md5 with add,delete and specific protocol on all ports and policy with allow all between VN's """ - uuid = self.vnc_lib.bgp_routers_list() - uuid = str(uuid) - list_uuid = re.findall('u\'uuid\': u\'([a-zA-Z0-9-]+)\'', uuid) - auth_data=None - for host in list_uuid: - self.config_per_peer(auth_data=auth_data) - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up before md5" + self.addCleanup(self.remove_configured_md5) + assert self.add_delete_md5_config() + #end add_delete_md5 - auth_data={'key_items': [ { 'key':"iter","key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after per peer with mx" + @preposttest_wrapper + def test_different_keys_md5_on_control(self): + """ + Description: Verify md5 with add,delete and specific protocol on all ports and policy with allow all between VN's + """ + self.addCleanup(self.remove_configured_md5) + assert self.different_keys_md5_config() + #end different_keys_md5 - for i in range(1, 11): - for host in list_uuid: - key = i.__str__() - auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up 1 as keys are the same everywhere" - with settings( - host_string='%s@%s' % ( - self.inputs.username, self.inputs.cfgm_ips[0]), - password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): - conrt = run('service contrail-control restart') - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up 2 as keys are the same everywhere" + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_check_per_peer_on_control(self): + """ + Description: Verify per peer md5 and specific protocol on all ports and policy with allow all between VN's + """ + self.addCleanup(self.remove_configured_md5) + assert self.check_per_peer_md5_config() + #end check_per_peer - for i in range(1, 11): - for host in list_uuid: - key = i.__str__() - auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} - self.config_md5( host=host, auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up 3 as keys are the same everywhere" - with settings( - host_string='%s@%s' % ( - self.inputs.username, self.inputs.cfgm_ips[0]), - password=self.inputs.password, warn_only=True, abort_on_prompts=False, debug=True): - conrt = run('service contrail-control restart') - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes should be up 4 as keys are the same everywhere" + @preposttest_wrapper + def test_add_delete_per_peer_on_control(self): + """ + Description: Verify add delete per peer md5 and specific protocol on all ports and policy with allow all between VN's + """ + self.addCleanup(self.remove_configured_md5) + assert self.add_delete_per_peer_md5_config() + #end add_delete_per_peer - for i in range(1, 11): - key = i.__str__() - auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - self.config_per_peer( auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after per peer match" + @preposttest_wrapper + def test_diff_keys_per_peer_on_control(self): + """ + Description: Verify different keys per peer md5 and specific protocol on all ports and policy with allow all between VN's + """ + self.addCleanup(self.remove_configured_md5) + assert self.diff_keys_per_peer_md5_config() + #end diff_keys_per_peer - for i in range(1, 11): - key = i.__str__() - auth_data={'key_items': [ { 'key':key,"key_id":0 } ], "key_type":"md5"} - host=list_uuid[1] - notmx=1 - self.config_per_peer(auth_data=auth_data ) - sleep(95) - assert (self.check_bgp_status()), "BGP between nodes not up after per peer match" + @preposttest_wrapper + def test_precedence_per_peer_on_control(self): + """ + Description: Verify precedence per peer md5 and specific protocol on all ports and policy with allow all between VN's + """ + self.addCleanup(self.remove_configured_md5) + assert self.precedence_per_peer_md5_config() + #end precedence_per_peer + @preposttest_wrapper - return True + def test_iter_keys_per_peer_on_control(self): + """ + Description: Verify iteration of same keys per peer md5 and specific protocol on all ports and policy with allow all between VN's + """ + self.addCleanup(self.remove_configured_md5) + assert self.iter_keys_per_peer_md5_config() #end test_iter_keys_per_peer -#end class md5tests +#end class TestMd5testsonControl diff --git a/serial_scripts/neutron/lbaas/test_lbaas.py b/serial_scripts/neutron/lbaas/test_lbaas.py index 135f74832..a8cc82296 100644 --- a/serial_scripts/neutron/lbaas/test_lbaas.py +++ b/serial_scripts/neutron/lbaas/test_lbaas.py @@ -47,13 +47,13 @@ def test_active_standby_failover(self): vn_vip_fixture = self.create_vn(vn_vip, vn_vip_subnets) assert vn_vip_fixture.verify_on_setup() pool_vm1_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm1, - flavor='contrail_flavor_small', image_name='ubuntu') + image_name='cirros') pool_vm2_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm2, - flavor='contrail_flavor_small', image_name='ubuntu') + image_name='cirros') pool_vm3_fixture = self.create_vm(vn_pool_fixture,vm_name=pool_vm3, - flavor='contrail_flavor_small', image_name='ubuntu') + image_name='cirros') client_vm1_fixture = self.create_vm(vn_vip_fixture,vm_name=client_vm1, - flavor='contrail_flavor_small', image_name='ubuntu') + image_name='cirros') lb_pool_servers = [pool_vm1_fixture, pool_vm2_fixture, pool_vm3_fixture] @@ -148,7 +148,7 @@ def test_active_standby_failover(self): self.logger.info("clinet vm %s running on active compute, launching another client vm on standby" " %s before stopping the agent in active %s" % (client_vm1_fixture, standby, active)) client_vm2_fixture = self.create_vm(vn_vip_fixture,vm_name=client_vm2, - flavor='contrail_flavor_small', node_name=standby, image_name='ubuntu') + node_name=standby, image_name='cirros') assert client_vm2_fixture.wait_till_vm_is_up() client_vm_fixture = client_vm2_fixture else: diff --git a/serial_scripts/neutron/test_quota.py b/serial_scripts/neutron/test_quota.py index bf494bd68..365f005ae 100644 --- a/serial_scripts/neutron/test_quota.py +++ b/serial_scripts/neutron/test_quota.py @@ -24,22 +24,33 @@ def tearDownClass(cls): @preposttest_wrapper def test_update_default_quota_for_admin_tenant(self): result = True + # Get current object count and test on top of that + admin_proj_obj = self.vnc_lib.project_read( + fq_name=['default-domain', self.inputs.admin_tenant]) + subnets = self.get_subnets_count(admin_proj_obj.uuid) + 3 + vns = len(admin_proj_obj.get_virtual_networks() or []) + 3 + floating_ips = len(admin_proj_obj.get_floating_ip_back_refs() or []) + 10 + routers = len(admin_proj_obj.get_logical_routers() or []) + 10 + sgs = len(admin_proj_obj.get_security_groups() or []) + 5 + vmis = len(admin_proj_obj.get_virtual_machine_interfaces() or []) + 5 + self.update_default_quota_list( - subnet=3, - virtual_network=3, - floating_ip=10, - logical_router=10, + subnet=subnets, + virtual_network=vns, + floating_ip=floating_ips, + logical_router=routers, security_group_rule=10, - virtual_machine_interface=5, - security_group=5) + virtual_machine_interface=vmis, + security_group=sgs) + # Account for 1 default SG rule created resource_dict = self.create_quota_test_resources( self.admin_inputs, self.admin_connections, vn_count=3, router_count=10, - secgrp_count=4, - secgep_rule_count=4, + secgrp_count=5, + secgep_rule_count=8, fip_count=10, port_count=5) @@ -65,9 +76,11 @@ def test_update_default_quota_for_admin_tenant(self): assert result, 'Quota tests failed' + @preposttest_wrapper def test_update_default_quota_for_new_tenant(self): result = True + self.update_default_quota_list( subnet=3, virtual_network=3, @@ -79,22 +92,22 @@ def test_update_default_quota_for_new_tenant(self): project_name = 'Project' isolated_creds = IsolatedCreds( - project_name, self.admin_inputs, + project_name, ini_file=self.ini_file, logger=self.logger) - isolated_creds.setUp() - project_obj = isolated_creds.create_tenant() - isolated_creds.create_and_attach_user_to_tenant() - proj_inputs = isolated_creds.get_inputs() - proj_connection = isolated_creds.get_conections() + project_obj = self.admin_isolated_creds.create_tenant(isolated_creds.project_name) + self.admin_isolated_creds.create_and_attach_user_to_tenant(project_obj, + isolated_creds.username,isolated_creds.password) + proj_inputs = isolated_creds.get_inputs(project_obj) + proj_connection = project_obj.get_project_connections() resource_dict = self.create_quota_test_resources( proj_inputs, proj_connection, vn_count=3, router_count=10, secgrp_count=4, - secgep_rule_count=4, + secgep_rule_count=8, fip_count=10, port_count=5) @@ -120,7 +133,8 @@ def test_update_default_quota_for_new_tenant(self): self.logger.error("Quota limit not followed for %s " % (item)) assert result, 'Quota tests failed' - + + @preposttest_wrapper def test_update_quota_for_admin_tenant(self): '''Update quota for admin tenent using neutron quota_update @@ -137,7 +151,7 @@ def test_update_quota_for_admin_tenant(self): quota_rsp = self.admin_connections.quantum_h.update_quota( self.admin_connections.project_id, quota_dict) - + self.addCleanup(self.admin_connections.quantum_h.delete_quota, self.admin_connections.project_id) quota_show_dict = self.connections.quantum_h.show_quota( self.admin_connections.project_id) @@ -221,6 +235,10 @@ def verify_quota_limit(self, inputs, connections, vn_fix, sg_obj): response_dict['sg_rule'] = sg_rule_obj port_obj = connections.quantum_h.create_port( vn_fix.vn_id) + # Cleanup the port incase port-create works + if port_obj: + self.addCleanup(connections.quantum_h.delete_port, + port_obj['id']) response_dict['port'] = port_obj fip_obj = self.create_multiple_floatingip( inputs, @@ -264,7 +282,7 @@ def create_multiple_floatingip( fvn_fixture.vn_id, net_dict) assert net_rsp['network'][ - 'router:external'] == True, 'Failed to update router:external to True' + 'router:external'] == True, 'Failed to update router:external to True' fip_fixture = self.useFixture( FloatingIPFixture( project_name=inputs.project_name, @@ -298,13 +316,17 @@ def create_multiple_secgrp(self, connections, count=1): return secgrp_objs def create_multiple_secgrp_rule(self, connections, sg_obj_list, count=1): - proto_list = ['udp', 'tcp', 'icmp'] + proto = 'tcp' + port_range_min = 1 sg_rule_objs = [] for sg_obj in sg_obj_list: for i in range(count): + port = port_range_min + i sg_rule_obj = connections.quantum_h.create_security_group_rule( sg_obj['id'], - protocol=random.choice(proto_list)) + protocol=proto, + port_range_min=port, + port_range_max=port) sg_rule_objs.append(sg_rule_obj) return sg_rule_objs diff --git a/serial_scripts/policy/base.py b/serial_scripts/policy/base.py index 8ea78cf26..99a05259a 100644 --- a/serial_scripts/policy/base.py +++ b/serial_scripts/policy/base.py @@ -1,22 +1,12 @@ -import test +import test_v1 from common import isolated_creds -class BaseSerialPolicyTest(test.BaseTestCase): +class BaseSerialPolicyTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseSerialPolicyTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -29,8 +19,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseSerialPolicyTest, cls).tearDownClass() # end tearDownClass diff --git a/serial_scripts/policy/test_policy_serial.py b/serial_scripts/policy/test_policy_serial.py index cec4c820e..d910c4049 100644 --- a/serial_scripts/policy/test_policy_serial.py +++ b/serial_scripts/policy/test_policy_serial.py @@ -18,6 +18,7 @@ from common.topo import sdn_policy_topo_with_multi_project from tcutils.util import get_random_name, get_random_cidr, gen_str_with_spl_char import os +from tcutils.contrail_status_check import ContrailStatusChecker class TestSerialPolicy(BaseSerialPolicyTest): _interface = 'json' @@ -139,7 +140,7 @@ def validate_flow_in_vna(self, test_flow_list, test_vn, vn_fixture): self.logger.info( "--->VNA-Flow check: Looking for following test flow: %s" % (json.dumps(flow, sort_keys=True))) - vnet_list = [flow['source_vn'], flow['dst_vn']] + vnet_list = [flow['src_vn_match'], flow['dst_vn_match']] policy_route_state = self.check_policy_route_available( vnet_list, vn_fixture) try: @@ -181,14 +182,14 @@ def validate_flow_in_vna(self, test_flow_list, test_vn, vn_fixture): (json.dumps(agent_flow, sort_keys=True))) # For a matching flow, check following key values - keys_to_verify = ['dst_vn', 'action'] + keys_to_verify = ['dst_vn_match', 'action'] # For matching flow, check dest_vn and action to see if they are # intact for k in keys_to_verify: err_msg = None match = True - if k == 'action': + if k == keys_to_verify[1]: if flow[k][0] == 'pass': if agent_flow[k] == 'pass' or agent_flow[k] == '32': match = match and True @@ -207,7 +208,7 @@ def validate_flow_in_vna(self, test_flow_list, test_vn, vn_fixture): (k, expected, agent_flow[k])) match = match and False break - elif k == 'dst_vn': + elif k == keys_to_verify[0]: expected_vn = "__UNKNOWN__" if policy_route_state == False else flow[ k] if expected_vn == agent_flow[k]: @@ -278,8 +279,8 @@ def build_test_flow( f = test_flow['flow_entries'] f['src'] = test_vm1_fixture.vm_ip f['dst'] = test_vm2_fixture.vm_ip - f['source_vn'] = test_vn_vm1_fix.vn_fq_name - f['dst_vn'] = test_vn_vm2_fix.vn_fq_name + f['src_vn_match'] = test_vn_vm1_fix.vn_fq_name + f['dst_vn_match'] = test_vn_vm2_fix.vn_fq_name vm1_vn_fq_name = test_vm1_fixture.vn_fq_name nh = test_vm1_fixture.tap_intf[vm1_vn_fq_name]['flow_key_idx'] f['nh_id'] = nh @@ -592,9 +593,12 @@ def test_controlnode_switchover_policy_between_vns_traffic(self): self.inputs.start_service('contrail-control', [active_controller]) time.sleep(10) + #get the management ip corresponding to new_active_controller + host = self.inputs.host_data[new_active_controller] + host_ip = self.inputs.host_data[host['name']]['host_ip'] # Check the BGP peering status from the currently active control node cn_bgp_entry = self.cn_inspect[ - new_active_controller].get_cn_bgp_neigh_entry() + host_ip].get_cn_bgp_neigh_entry() time.sleep(5) for entry in cn_bgp_entry: if entry['state'] != 'Established': @@ -896,7 +900,7 @@ def policy_test_with_multi_proto_traffic(self, topo): # 6. Match traffic stats against Analytics flow series data self.logger.info("-" * 80) self.logger.info( - "***Match traffic stats against Analytics flow series data***") + "%%%Match traffic stats against Analytics flow series data%%%") self.logger.info("-" * 80) msg = {} for proto in traffic_proto_l: @@ -906,7 +910,7 @@ def policy_test_with_multi_proto_traffic(self, topo): msg[proto] = proto + \ " Traffic Stats is not matching with opServer flow series data" self.logger.info( - "***Actual Traffic sent by agent %s \n\n stats shown by Analytics flow series%s" % + "%%%%%%Actual Traffic sent by agent %s \n\n stats shown by Analytics flow series%s" % (traffic_stats[proto], flow_series_data[proto])) self.assertGreaterEqual( flow_series_data[proto][0]['sum(packets)'], @@ -916,7 +920,7 @@ def policy_test_with_multi_proto_traffic(self, topo): # 6.a Let flows age out and verify analytics still shows the data self.logger.info("-" * 80) self.logger.info( - "***Let flows age out and verify analytics still shows the data in the history***") + "%%%Let flows age out and verify analytics still shows the data in the history%%%") self.logger.info("-" * 80) time.sleep(180) for proto in traffic_proto_l: @@ -2378,10 +2382,15 @@ def test_policy_with_spl_char_in_name(self): (vm2_fixture.vm_name, vm1_fixture.vm_name)) result = False - self.inputs.restart_service('ifmap', host_ips=self.inputs.cfgm_ips) + service = 'ifmap' + self.inputs.restart_service(service, host_ips=self.inputs.cfgm_ips) - sleep(120) - #Revisit this once contrail-status cli work is complete + status_checker = ContrailStatusChecker(self.inputs) + #wait for all the services,as ifmap impacts other services too + self.logger.info("Waiting for all the services to be UP on config nodes: %s" + % (self.inputs.cfgm_ips)) + assert status_checker.wait_till_contrail_cluster_stable(self.inputs.cfgm_ips, + delay=5, tries=20)[0], "All services could not come UP after ifmap restart" if not vm1_fixture.ping_to_ip(vm2_fixture.vm_ip): self.logger.error( diff --git a/serial_scripts/rbac/test_rbac.py b/serial_scripts/rbac/test_rbac.py new file mode 100644 index 000000000..51baf5a00 --- /dev/null +++ b/serial_scripts/rbac/test_rbac.py @@ -0,0 +1,258 @@ +import test +from base import BaseRbac +from tcutils.wrappers import preposttest_wrapper +from tcutils.util import get_random_name, get_random_ip + +class TestRbac(BaseRbac): + @preposttest_wrapper + def test_create_delete_service_chain(self): + ''' + Validate creds passed via service-monitor/schema-transformer + steps: + 1. Add user1 as role1 + 2. Update project acl with role1:CRUD perms for *.* + 3. Validate Service-Chain of type Sevice-Template Version-1 as user1 + 4. Validate Service-Chain of type Sevice-Template Version-2 as user1 + ''' + self.add_user_to_project(self.user1, self.role1) + user1_conn = self.get_connections(self.user1, self.pass1) + user1_conn.inputs.use_admin_auth = True + rules = [{'rule_object': '*', + 'rule_field': None, + 'perms': [{'role': self.role1, 'crud': 'CRUD'}] + }] + proj_rbac = self.create_rbac_acl(rules=rules) + assert self.create_sc(connections=user1_conn), 'SC v1 creation failed' + assert self.create_sc(connections=user1_conn, st_version=2), 'SC v2 creation failed' + + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_perms2_global_share(self): + ''' + Test perms2 global shared property of an object + steps: + 1. Add user1 as role1 in project1 and project2 + 2. Add *.* role1:CRUD to domain acl + 3. Create a Shared virtual-network in project1 + 4. Verify global shared flag is set on VN's perms2 + 4. Using shared VN try to launch a VM in project2 + ''' + project1 = self.create_project() + project2 = self.create_project() + self.add_user_to_project(self.user1, self.role1, project1.project_name) + self.add_user_to_project(self.user1, self.role1, project2.project_name) + u1_p1_conn = self.get_connections(self.user1, self.pass1, project1) + u1_p2_conn = self.get_connections(self.user1, self.pass1, project2) + rules = [{'rule_object': '*', + 'rule_field': None, + 'perms': [{'role': self.role1, 'crud': 'CRUD'}] + }] + domain_rbac = self.create_rbac_acl(rules=rules, parent_type='domain') + vn = self.create_vn(connections=u1_p1_conn, shared=True, verify=False) + assert vn, 'VN creation failed' + obj = self.read_vn(connections=u1_p1_conn, uuid=vn.uuid) + assert obj, 'Unable to read VN using user1/proj1 creds' + assert obj.is_shared, 'VN is not marked shared' + assert obj.global_access() == 7 + assert self.read_vn(connections=u1_p2_conn, uuid=vn.uuid) + vm = self.create_vm(connections=u1_p2_conn, vn_fixture=vn) + assert vm, 'VM creation failed on shared VN' + + @preposttest_wrapper + def test_perms2_share(self): + ''' + Test perms2 shared property of an object + steps: + 1. Add user1 as role1 in project1 and project2 + 2. Create VN and FIP-Pool as admin in isloated tenant + 3. Make the FIP Pool sharable with project1 + 4. launch VM on project1 and associate FIP from FIP-Pool + 5. fip create from associate FIP from FIP-Pool + ''' + project1 = self.create_project() + project2 = self.create_project() + self.add_user_to_project(self.user1, self.role1, project1.project_name) + self.add_user_to_project(self.user1, self.role1, project2.project_name) + u1_p1_conn = self.get_connections(self.user1, self.pass1, project1) + u1_p2_conn = self.get_connections(self.user1, self.pass1, project2) + rules = [{'rule_object': '*', + 'rule_field': None, + 'perms': [{'role': self.role1, 'crud': 'CRUD'}] + }] + domain_rbac = self.create_rbac_acl(rules=rules, parent_type='domain') + vn = self.create_vn() + fip_pool = self.create_fip_pool(vn_fixture=vn) + self.share_obj(obj=fip_pool.fip_pool_obj, project=project1) + vm1 = self.create_vm(connections=u1_p1_conn, vn_fixture=vn) + vm2 = self.create_vm(connections=u1_p2_conn, vn_fixture=vn) + (fip, fip_id) = self.create_fip(connections=u1_p1_conn, fip_pool=fip_pool, vm_fixture=vm1) + assert fip and fip_id, "FIP creation failed" + (fip, fip_id) = self.create_fip(connections=u1_p2_conn, fip_pool=fip_pool, vm_fixture=vm2) + assert not fip or not fip_id, "FIP creation should have failed" + self.share_obj(obj=fip_pool.fip_pool_obj, project=project2, perms=4) + assert self.read_fip_pool(connections=u1_p2_conn, uuid=fip_pool.fip_pool_id), "Unable to read shared FIP Pool object" + + @preposttest_wrapper + def test_delete_default_acl(self): + ''' + delete default acl recreation + steps: + 1. delete default acl + 2. restart supervisor-config service + 3. default acl should be recreated on restart + ''' + self.global_acl.delete() + # Restart one contrail-api service alone + self.inputs.restart_service('contrail-api', [self.inputs.cfgm_ip]) + self.populate_default_rules_in_global_acl() + assert not self.global_acl.created, "Global ACL didnt get auto created upon restart" + + @preposttest_wrapper + def test_rbac_rules_hierarchy(self): + ''' + Validate rules hierarchy and longest acl rule match + steps: + 1. Create global rule '*.* role1:R' + 2. Create domain rule 'VirtualNetwork.* role2:R' + 3. Create project rule 'VirtualNetwork.flood_unknown_unicast admin:CRUD' + 4. Add user1 as role1 and user2 as role2 + 5. ToDo: Not sure about the expected results + ''' + self.add_user_to_project(self.user1, self.role1) + self.add_user_to_project(self.user2, self.role2) + user1_conn = self.get_connections(self.user1, self.pass1) + user2_conn = self.get_connections(self.user2, self.pass2) + vn = self.create_vn() + rules = [{'rule_object': '*', + 'rule_field': None, + 'perms': [{'role': self.role1, 'crud': 'CRUD'}] + }] + self.global_acl.add_rules(rules=rules) + self._cleanups.insert(0, (self.global_acl.delete_rules, (), {'rules': rules})) + assert self.read_vn(connections=user1_conn, uuid=vn.uuid) + assert not self.read_vn(connections=user2_conn, uuid=vn.uuid) + domain_rules = [{'rule_object': 'virtual-network', + 'rule_field': None, + 'perms': [{'role': self.role2, 'crud': 'CRUD'}] + }] + domain_rbac = self.create_rbac_acl(rules=domain_rules, parent_type='domain') + assert self.read_vn(connections=user2_conn, uuid=vn.uuid) + assert not self.read_vn(connections=user1_conn, uuid=vn.uuid) + proj_rules = [{'rule_object': 'virtual-network', + 'rule_field': 'flood_unknown_unicast', + 'perms': [{'role': 'admin', 'crud': 'CRUD'}] + }] + project_rbac = self.create_rbac_acl(rules=proj_rules) + assert not self.update_vn(connections=user2_conn, uuid=vn.uuid, + prop_kv={'flood_unknown_unicast': True}) + assert self.update_vn(connections=self.connections, uuid=vn.uuid, + prop_kv={'flood_unknown_unicast': True}) + + +class RbacMode(BaseRbac): + @classmethod + def setUpClass(cls): + super(RbacMode, cls).setUpClass() + cls.inputs.api_server_port = '9100' + + @preposttest_wrapper + def test_update_aaa_mode(self): + ''' + Validate the aaa_mode rest api + steps: + 1. Add user1 as role1 + 2. change aaa_mode to no-auth + 3. user1 should be able to create VN + 4. change aaa_mode to cloud-admin + 5. user1 shouldnt be able to read/create VNs + 6. Admin should be able to create/read VNs + 7. change aaa_mode to rbac + 8. Add global rule *.* role1:R + 9. user1 should be able to read VN + ''' + self.add_user_to_project(self.user1, self.role1) + user1_conn = self.get_connections(self.user1, self.pass1) + self.set_aaa_mode('no-auth') + self._cleanups.insert(0, (self.set_aaa_mode, (), {'aaa_mode': 'rbac'})) + vn = self.create_vn(connections=user1_conn, verify=False) + assert vn, 'VN creation failed' + assert self.read_vn(connections=user1_conn, uuid=vn.uuid) + self.set_aaa_mode('cloud-admin') + assert not self.read_vn(connections=user1_conn, uuid=vn.uuid) + assert self.read_vn(connections=self.connections, uuid=vn.uuid) + self.set_aaa_mode('rbac') + rules = [{'rule_object': '*', + 'rule_field': None, + 'perms': [{'role': self.role1, 'crud': 'CRUD'}] + }] + self.global_acl.add_rules(rules=rules) + self._cleanups.insert(0, (self.global_acl.delete_rules, (), {'rules': rules})) + assert self.read_vn(connections=user1_conn, uuid=vn.uuid) + return True + +class RbacLbassv2(BaseRbac): + def is_test_applicable(self): + if self.inputs.orchestrator.lower() != 'openstack': + return (False, 'Skipping Test. Openstack required') + if self.inputs.get_build_sku().lower()[0] < 'l': + return (False, 'Skipping Test. LBaasV2 is supported only on liberty and up') + return (True, None) + + @preposttest_wrapper + def test_rbac_lbaasv2_plugin(self): + ''' + Validate contrail neutron lbaasv2 plugin for rbac + steps: + 1. Add user1 as role1 + 2. create project acl rule *.* role1:CRUD + 3. create loadbalancer as user1 + ''' + self.add_user_to_project(self.user1, self.role1) + user1_conn = self.get_connections(self.user1, self.pass1) + rules = [{'rule_object': '*', + 'rule_field': None, + 'perms': [{'role': self.role1, 'crud': 'CRUD'}] + }] + proj_rbac = self.create_rbac_acl(rules=rules) + vn = self.create_vn(connections=user1_conn) + vm_ip = get_random_ip(vn.get_cidrs()[0]) + members = {'address': [vm_ip]} + lb_name = get_random_name('rbac-lb') + assert self.create_lbaas(connections=user1_conn, + lb_name=lb_name, network_id=vn.uuid, + members=members, hm_probe_type='PING') + +class TestRbac2(BaseRbac): + @preposttest_wrapper + def test_rbac_multiple_roles(self): + ''' + validate a user(user1) having multiple roles (role1 and role2) + steps: + 1. Add user1 as both role1 and role2 + 2. Update domain acl with role1:R for VirtualNetwork + 3. Create VN and VMI as admin user + 4. Try reading the VN and VM as user1 + 5. Update domain acl with role2:R for VirtualMachineInterface + 6. Try reading the VN and VM as user1 + ''' + vn = self.create_vn() + vmi = self.create_vmi(vn_fixture=vn) + self.add_user_to_project(self.user1, self.role1) + self.add_user_to_project(self.user1, self.role2) + user1_conn = self.get_connections(self.user1, self.pass1) + vn_rules = [{'rule_object': 'virtual-network', + 'rule_field': None, + 'perms': [{'role': self.role1, 'crud': 'R'}] + }] + vmi_rules = [{'rule_object': 'virtual-machine-interface', + 'rule_field': None, + 'perms': [{'role': self.role2, 'crud': 'R'}] + }] + domain_rbac = self.create_rbac_acl(rules=vn_rules, parent_type='domain') + assert self.read_vn(connections=user1_conn, uuid=vn.uuid) + assert not self.read_vmi(connections=user1_conn, uuid=vmi.uuid) + domain_rbac.add_rules(rules=vmi_rules) + domain_rbac.verify_on_setup() + assert self.read_vn(connections=user1_conn, uuid=vn.uuid) + assert self.read_vmi(connections=user1_conn, uuid=vmi.uuid) + diff --git a/serial_scripts/rsyslog/base.py b/serial_scripts/rsyslog/base.py index 05d69c6a3..ef460f141 100644 --- a/serial_scripts/rsyslog/base.py +++ b/serial_scripts/rsyslog/base.py @@ -1,22 +1,12 @@ -import test +import test_v1 from common import isolated_creds -class BaseRsyslogTest(test.BaseTestCase): +class BaseRsyslogTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseRsyslogTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -29,8 +19,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseRsyslogTest, cls).tearDownClass() # end tearDownClass diff --git a/serial_scripts/rsyslog/test_rsyslog.py b/serial_scripts/rsyslog/test_rsyslog.py index ce27c34c4..18230760c 100644 --- a/serial_scripts/rsyslog/test_rsyslog.py +++ b/serial_scripts/rsyslog/test_rsyslog.py @@ -54,7 +54,13 @@ def send_syslog_and_verify_in_db(self, server_ip=None): log_mesg = 'This is a test log to check rsyslog provisioning.' cmd = cmd + '"' + log_mesg + '"' for i in range(3): - reply = commands.getoutput(cmd) + with settings(host_string='%s@%s' % (self.inputs.host_data[ + self.inputs.cfgm_ips[0]]['username'], + self.inputs.cfgm_ips[0]), + password=self.inputs.host_data[ + self.inputs.cfgm_ips[0]]['password'], + warn_only=True, abort_on_prompts=False): + reply = run('%s' % (cmd), pty=True) cmd = "contrail-logs --last 2m --message-type Syslog | " cmd = cmd + "grep 'THISISMYTESTLOG'" output = self.inputs.run_cmd_on_server(server_ip, cmd, @@ -72,10 +78,16 @@ def test_rsyslog_sanity_if_provisioned(self): result = False # Check rsyslog.conf file for connections. - cmd = "grep '@\{1,2\}" - cmd = cmd+"[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}" - cmd = cmd+":[0-9]\{1,5\}' "+RSYSLOG_CONF_FILE - reply = commands.getoutput(cmd) + with settings(host_string='%s@%s' % (self.inputs.host_data[ + self.inputs.cfgm_ips[0]]['username'], + self.inputs.cfgm_ips[0]), + password=self.inputs.host_data[ + self.inputs.cfgm_ips[0]]['password'], + warn_only=True, abort_on_prompts=False): + cmd = "grep '@\{1,2\}" + cmd = cmd+"[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}" + cmd = cmd+":[0-9]\{1,5\}' "+RSYSLOG_CONF_FILE + reply = run('%s' % (cmd), pty=True) # If not present bail out. if not reply: diff --git a/serial_scripts/sriov/base.py b/serial_scripts/sriov/base.py index cd638bb41..0e8af6b04 100644 --- a/serial_scripts/sriov/base.py +++ b/serial_scripts/sriov/base.py @@ -1,4 +1,7 @@ import test +import time +import struct +import socket import fixtures from common import isolated_creds @@ -15,11 +18,9 @@ def setUpClass(cls): cls.isolated_creds.create_and_attach_user_to_tenant() cls.inputs = cls.isolated_creds.get_inputs() cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib -# cls.logger= cls.inputs.logger cls.agent_inspect= cls.connections.agent_inspect cls.cn_inspect= cls.connections.cn_inspect cls.analytics_obj=cls.connections.analytics_obj @@ -27,7 +28,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - #cls.isolated_creds.delete_user() cls.isolated_creds.delete_tenant() super(BaseSriovTest, cls).tearDownClass() #end tearDownClass @@ -43,9 +43,67 @@ def bringup_interface_forcefully(self, vm_fixture, intf='eth1'): break else: time.sleep(3) + def get_sriov_enabled_compute_list(self): + sriov_host_name_list=[] + sriov_host_list=self.inputs.sriov_data[0].keys() + for item in sriov_host_list: + sriov_host_name_list.append(self.inputs.host_data[item.split('@')[1]]['name']) + return sriov_host_name_list - def remove_from_cleanups(self, fix): - for cleanup in self._cleanups: - # if fix.cleanUp in cleanup: - self._cleanups.remove(cleanup) - #break + def get_sriov_physnets(self,compute_name): + host_key=self.inputs.host_data[compute_name]['username'] + '@' + self.inputs.host_data[compute_name]['ip'] + physnets_list={} + physnets_list=self.inputs.sriov_data[0][host_key][0]['physnets'] + return physnets_list + + def get_sriov_vf_number(self,compute_name): + host_key=self.inputs.host_data[compute_name]['username'] + '@' + self.inputs.host_data[compute_name]['ip'] + vf_number=None + vf_number=self.inputs.sriov_data[0][host_key][0]['VF'] + return vf_number + + def get_sriov_pf(self,compute_name): + host_key=self.inputs.host_data[compute_name]['username'] + '@' + self.inputs.host_data[compute_name]['ip'] + pf_intf=None + pf_intf=self.inputs.sriov_data[0][host_key][0]['interface'] + return pf_intf + + def ip_increment(self,base_ip,increase_by): + ip2int = lambda ipstr: struct.unpack('!I', socket.inet_aton(ipstr))[0] + ip_num=ip2int(base_ip) + ip_num=ip_num + int(increase_by) + int2ip = lambda n: socket.inet_ntoa(struct.pack('!I', n)) + new_ip=int2ip(ip_num) + return new_ip + + def vm_force_delete(self,vm_obj): + cmd= 'source /etc/contrail/openstackrc;nova force-delete %s' %(vm_obj.vm_id) + status=self.inputs.run_cmd_on_server(self.inputs.cfgm_ip, cmd) + return status + + def get_sriov_mac(self,vm_fix,interface): + intf_cmd='ifconfig %s| grep HWaddr'%(interface) + output=vm_fix.run_cmd_on_vm(cmds=[intf_cmd], as_sudo=True) + return output[intf_cmd].split(" ")[15] + + def get_vf_in_use(self,vm_fix,interface,mac): + host = self.inputs.get_host_ip(vm_fix.vm_node_ip) + cmd='ip link show dev %s| grep %s'%(interface,mac) + output=self.inputs.run_cmd_on_server(host, cmd) + return output.split(" ")[1] + + def set_mtu_on_vf(self,vm_fix,intf,vf_num,vlan_num,mtu): + host = self.inputs.get_host_ip(vm_fix.vm_node_ip) + cmd='ip link set %s vf %s vlan %s mtu %s'%(intf,vf_num,vlan_num,mtu) + output=self.inputs.run_cmd_on_server(host, cmd) + return output + + + def remove_from_cleanups(self, fix): + for cleanup in self._cleanups: + if fix.cleanUp in cleanup: + self._cleanups.remove(cleanup) + break + #end remove_from_cleanups + + diff --git a/serial_scripts/sriov/test_sriov.py b/serial_scripts/sriov/test_sriov.py index 45171d72a..4458de209 100644 --- a/serial_scripts/sriov/test_sriov.py +++ b/serial_scripts/sriov/test_sriov.py @@ -23,7 +23,25 @@ def runTest(self): @preposttest_wrapper def test_communication_between_two_sriov_vm(self): ''' + Configure two SRIOV VM in Same phynets and same VN. + VMs are configure across compute node. + Verify the commonication over SRIOV NIC. ''' return self.communication_between_two_sriov_vm() + @preposttest_wrapper + def test_communication_between_two_sriov_vm_with_large_mtu(self): + ''' + ''' + return self.communication_between_two_sriov_vm_with_large_mtu() + + @preposttest_wrapper + def test_virtual_function_exhaustion_and_resue(self): + ''' + Verify Nova can schdule VM to all the VF of a PF. + Nova should though error when VF is exhausted. + After clearing one VF that should be rsusable + ''' + return self.virtual_function_exhaustion_and_resue() + diff --git a/serial_scripts/sriov/verify.py b/serial_scripts/sriov/verify.py index 0162f7c19..3be68ecc9 100644 --- a/serial_scripts/sriov/verify.py +++ b/serial_scripts/sriov/verify.py @@ -1,5 +1,6 @@ import re import os +import time from vn_test import * from vm_test import * from quantum_test import * @@ -18,15 +19,12 @@ def communication_between_two_sriov_vm (self): Verify the commonication over SRIOV NIC. ''' result = True - host_list = self.connections.nova_h.get_hosts() - compute_1 = host_list[0] - compute_2 = host_list[0] - if len(host_list) > 1: - compute_1 = host_list[0] - compute_2 = host_list[1] - compute_1 = 'b7s36' - compute_2 = 'b7s37' - + sriov_compute_list= self.get_sriov_enabled_compute_list() + compute_1 = sriov_compute_list[0] + compute_2 = sriov_compute_list[0] + if len(sriov_compute_list) > 1: + compute_1 = sriov_compute_list[0] + compute_2 = sriov_compute_list[1] vm1_sriov_ip = '55.1.1.11' vm1_mgmt_ip = '33.1.1.11' vm2_sriov_ip = '55.1.1.12' @@ -53,7 +51,7 @@ def communication_between_two_sriov_vm (self): vn_name=self.vn1_name, subnets=self.vn1_subnets, sriov_enable=True, - sriov_provider_network='physnet1', + sriov_provider_network=self.get_sriov_physnets(compute_1)[0], sriov_vlan='200')) assert vn1_fixture.verify_on_setup() assert vn3_fixture.verify_on_setup() @@ -103,7 +101,7 @@ def communication_between_two_sriov_vm (self): self.bringup_interface_forcefully(vm1_fixture) self.bringup_interface_forcefully(vm2_fixture) - # Configure IPV6 address + # Configure IP address cmd_to_pass1 = ['ifconfig eth1 %s' % (vm1_sriov_ip)] vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) cmd_to_pass2 = ['ifconfig eth1 %s' % (vm2_sriov_ip)] @@ -113,3 +111,249 @@ def communication_between_two_sriov_vm (self): other_opt='-I eth1') # End communication_between_two_sriov_vm + def virtual_function_exhaustion_and_resue (self): + ''' + Verify Nova can schdule VM to all the VF of a PF. + Nova should though error when VF is exhausted. + After clearing one VF that should be rsusable + ''' + result = True + sriov_compute_list= self.get_sriov_enabled_compute_list() + compute_1 = sriov_compute_list[0] + compute_2 = sriov_compute_list[0] + if len(sriov_compute_list) > 1: + compute_1 = sriov_compute_list[0] + compute_2 = sriov_compute_list[1] + + vm1_sriov_ip = '55.1.1.3' + vm1_mgmt_ip = '33.1.1.3' + (self.vn3_name, self.vn3_subnets) = ("SRIOV-MGMT-VN", ["33.1.1.0/24"]) + vn3_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=self.vn3_name, + subnets=self.vn3_subnets,)) + + (self.vn1_name, self.vn1_subnets) = ("SRIOV-Test-VN1", ["55.1.1.0/24"]) + + vn1_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=self.vn1_name, + subnets=self.vn1_subnets, + sriov_enable=True, + sriov_provider_network=self.get_sriov_physnets(compute_1)[0], + sriov_vlan='200')) + assert vn1_fixture.verify_on_setup() + assert vn3_fixture.verify_on_setup() + + subnet1_objects = vn1_fixture.get_subnets() + subnet2_objects = vn3_fixture.get_subnets() + self.logger.info( + 'Exhausting all the VF avaiable for SRIOV NIC in compute %s' % (compute_1)) + total_vf=self.get_sriov_vf_number(compute_1) + self.logger.info( + 'Creating total %s number of SRIOV VM on compute %s' % (total_vf,compute_1)) + + vm_fixture_list=[] + for x in xrange(0, total_vf): + ports1=vn1_fixture.create_port(vn1_fixture.vn_id, + subnet_id=subnet1_objects[0]['id'], ip_address=vm1_sriov_ip, sriov=True) + ports2=vn3_fixture.create_port(vn3_fixture.vn_id, + subnet_id=subnet2_objects[0]['id'],ip_address=vm1_mgmt_ip) + + sriov_vm1_name = 'SRIOV_VM-' + str(x) + vm_fixture_list.append(self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_objs=[ + vn3_fixture.obj, + vn1_fixture.obj], + image_name='ubuntu', + vm_name=sriov_vm1_name, + node_name=compute_1, + port_ids = [ports1['id'],ports2['id']]))) + vm1_sriov_ip=self.ip_increment(vm1_sriov_ip,1) + vm1_mgmt_ip=self.ip_increment(vm1_mgmt_ip,1) + + # Wait tillVM is UP + assert vm_fixture_list[x].wait_till_vm_is_up() + self.logger.info( + 'Further VM launch should fail on compute %s. Max number of VF utilized' % (compute_1)) + ports1=vn1_fixture.create_port(vn1_fixture.vn_id, + subnet_id=subnet1_objects[0]['id'], ip_address=vm1_sriov_ip, sriov=True) + ports2=vn3_fixture.create_port(vn3_fixture.vn_id, + subnet_id=subnet2_objects[0]['id'],ip_address=vm1_mgmt_ip) + + vm_fixture_error=self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_objs=[ + vn3_fixture.obj, + vn1_fixture.obj], + image_name='ubuntu', + vm_name='VM-Error', + node_name=compute_1, + port_ids = [ports1['id'],ports2['id']])) + assert vm_fixture_error.wait_till_vm_status(status='ERROR'), "VM Status should be in Error as all VF already in use" + + # Force delete the VM + status=self.vm_force_delete(vm_fixture_error) + self.remove_from_cleanups(vm_fixture_error) + self.logger.info( + 'Delete a VM to freeup one VF on Compute %s' % (compute_1)) + vm_fixture_list[3].cleanUp(), 'Cleanup failed VM, Check logs' + self.remove_from_cleanups(vm_fixture_list[3]) + assert vm_fixture_list[3].verify_vm_not_in_nova() + + self.logger.info( + 'VM launch should be successful now on compute %s. Max number of VF utilized' % (compute_1)) + vm1_sriov_ip=self.ip_increment(vm1_sriov_ip,1) + vm1_mgmt_ip=self.ip_increment(vm1_mgmt_ip,1) + ports1=vn1_fixture.create_port(vn1_fixture.vn_id, + subnet_id=subnet1_objects[0]['id'], ip_address=vm1_sriov_ip, sriov=True) + ports2=vn3_fixture.create_port(vn3_fixture.vn_id, + subnet_id=subnet2_objects[0]['id'],ip_address=vm1_mgmt_ip) + + vm_fixture_new=self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_objs=[ + vn3_fixture.obj, + vn1_fixture.obj], + image_name='ubuntu', + vm_name='VM-New', + node_name=compute_1, + port_ids = [ports1['id'],ports2['id']])) + assert vm_fixture_new.wait_till_vm_is_up(),"New VM failed to launch" + + # End virtual_function_exhaustion_and_resue + + def communication_between_two_sriov_vm_with_large_mtu (self): + ''' + Configure two SRIOV VM in Same phynets and same VN. + VMs are configure across compute node. + Configure higher MTU value. + Verify Ping with higher packet size. + ''' + + result = True + sriov_compute_list= self.get_sriov_enabled_compute_list() + compute_1 = sriov_compute_list[0] + compute_2 = sriov_compute_list[0] + if len(sriov_compute_list) > 1: + compute_1 = sriov_compute_list[0] + compute_2 = sriov_compute_list[1] + vm1_sriov_ip = '55.1.1.11' + vm1_mgmt_ip = '33.1.1.11' + vm2_sriov_ip = '55.1.1.12' + vm2_mgmt_ip = '33.1.1.12' + (self.vn3_name, self.vn3_subnets) = ("SRIOV-MGMT-VN", ["33.1.1.0/24"]) + vn3_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=self.vn3_name, + subnets=self.vn3_subnets,)) + + sriov_vm1_name = 'SRIOV_VM1' + sriov_vm2_name = 'SRIOV_VM2' + + (self.vn1_name, self.vn1_subnets) = ("SRIOV-Test-VN1", ["55.1.1.0/24"]) + vn3_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=self.vn3_name, + subnets=self.vn3_subnets,)) + + sriov_vm1_name = 'SRIOV_VM1' + sriov_vm2_name = 'SRIOV_VM2' + + (self.vn1_name, self.vn1_subnets) = ("SRIOV-Test-VN1", ["55.1.1.0/24"]) + + vn1_fixture = self.useFixture( + VNFixture( + project_name=self.inputs.project_name, + connections=self.connections, + inputs=self.inputs, + vn_name=self.vn1_name, + subnets=self.vn1_subnets, + sriov_enable=True, + sriov_provider_network=self.get_sriov_physnets(compute_1)[0], + sriov_vlan='200')) + assert vn1_fixture.verify_on_setup() + assert vn3_fixture.verify_on_setup() + + subnet1_objects = vn1_fixture.get_subnets() + subnet2_objects = vn3_fixture.get_subnets() + ports1 = {} + ports2 = {} + ports1['subnet1'] = vn1_fixture.create_port(vn1_fixture.vn_id, + subnet_id=subnet1_objects[0]['id'], ip_address=vm1_sriov_ip, sriov=True) + ports1['subnet2'] = vn3_fixture.create_port(vn3_fixture.vn_id, + subnet_id=subnet2_objects[0]['id'],ip_address=vm1_mgmt_ip) + ports2['subnet1'] = vn1_fixture.create_port(vn1_fixture.vn_id, + subnet_id=subnet1_objects[0]['id'], ip_address=vm2_sriov_ip, sriov=True) + ports2['subnet2'] = vn3_fixture.create_port(vn3_fixture.vn_id, + subnet_id=subnet2_objects[0]['id'],ip_address=vm2_mgmt_ip) + + vm1_fixture = self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_objs=[ + vn3_fixture.obj, + vn1_fixture.obj], + image_name='ubuntu', + vm_name=sriov_vm1_name, + node_name=compute_1, + port_ids = [ports1['subnet1']['id'],ports1['subnet2']['id']])) + vm2_fixture = self.useFixture( + VMFixture( + project_name=self.inputs.project_name, + connections=self.connections, + vn_objs=[ + vn3_fixture.obj, + vn1_fixture.obj], + image_name='ubuntu', + vm_name=sriov_vm2_name, + node_name=compute_2, + port_ids = [ports2['subnet1']['id'],ports2['subnet2']['id']])) + + + # Wait till vm is up + assert vm1_fixture.wait_till_vm_is_up() + assert vm2_fixture.wait_till_vm_is_up() + + # Bring the intreface up forcefully + self.bringup_interface_forcefully(vm1_fixture) + self.bringup_interface_forcefully(vm2_fixture) + + # Configure IP address + cmd_to_pass1 = ['ifconfig eth1 %s' % (vm1_sriov_ip)] + vm1_fixture.run_cmd_on_vm(cmds=cmd_to_pass1, as_sudo=True, timeout=60) + cmd_to_pass2 = ['ifconfig eth1 %s' % (vm2_sriov_ip)] + vm2_fixture.run_cmd_on_vm(cmds=cmd_to_pass2, as_sudo=True, timeout=60) + + # Configure Higher MTU value + cmd_to_increase_mtu = ['ifconfig eth1 mtu 5000'] + vm1_fixture.run_cmd_on_vm(cmds=cmd_to_increase_mtu, as_sudo=True, timeout=60) + vm2_fixture.run_cmd_on_vm(cmds=cmd_to_increase_mtu, as_sudo=True, timeout=60) + vf_in_vm1=self.get_vf_in_use(vm1_fixture,self.get_sriov_pf(compute_1),self.get_sriov_mac(vm1_fixture,'eth1')) + vf_in_vm2=self.get_vf_in_use(vm2_fixture,self.get_sriov_pf(compute_2),self.get_sriov_mac(vm2_fixture,'eth1')) + self.set_mtu_on_vf(vm1_fixture,self.get_sriov_pf(compute_1),vf_in_vm1,'200','5000') + self.set_mtu_on_vf(vm2_fixture,self.get_sriov_pf(compute_2),vf_in_vm2,'200','5000') + + assert vm2_fixture.ping_to_ip(vm1_sriov_ip, count='15', + other_opt='-I eth1',return_output=True,size='5000') + # End communication_between_two_sriov_vm_with_large_mtu diff --git a/serial_scripts/system_test/flow_tests/base.py b/serial_scripts/system_test/flow_tests/base.py index ca591c24f..082e9be6c 100644 --- a/serial_scripts/system_test/flow_tests/base.py +++ b/serial_scripts/system_test/flow_tests/base.py @@ -1,24 +1,12 @@ -import test +import test_v1 from common import isolated_creds -class BaseFlowTest(test.BaseTestCase): +class BaseFlowTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseFlowTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.admin_inputs = cls.isolated_creds.get_admin_inputs() - cls.admin_connections = cls.isolated_creds.get_admin_connections() cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib @@ -30,8 +18,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseFlowTest, cls).tearDownClass() # end tearDownClass diff --git a/serial_scripts/tor_scale/lib/config.py b/serial_scripts/tor_scale/lib/config.py index 3a8b2d733..e8f2b1dc6 100644 --- a/serial_scripts/tor_scale/lib/config.py +++ b/serial_scripts/tor_scale/lib/config.py @@ -43,7 +43,7 @@ def create_project(self, project_name): # time.sleep(4) try: self.project = project_test.ProjectFixture(project_name=self.project_name, auth=self.auth, - vnc_lib_h=self.vnc_lib, username=self.user, password=self.password, + username=self.user, password=self.password, connections=self.connections) self.project.setUp() except Exception as e: @@ -71,7 +71,7 @@ def create_vn( self.vn = vn_test.VNFixture(project_name=self.project_name, connections=self.connections, inputs=self.inputs, vn_name=self.vn_name, subnets=self.vn_subnet, project_obj=self.project_obj, - option='api', + option='contrail', vxlan_id=self.vxlan_id) self.vn.setUp() except Exception as e: diff --git a/serial_scripts/upgrade/base.py b/serial_scripts/upgrade/base.py index cc440b169..3e42e57cb 100644 --- a/serial_scripts/upgrade/base.py +++ b/serial_scripts/upgrade/base.py @@ -1,20 +1,14 @@ -import test +import test_v1 from common.connections import ContrailConnections from common.contrail_test_init import ContrailTestInit from common import isolated_creds from verify import BaseResource -class UpgradeBaseTest(test.BaseTestCase): +class UpgradeBaseTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(UpgradeBaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, cls.inputs, ini_file = cls.ini_file, logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib @@ -28,7 +22,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.res.cleanUp() - cls.isolated_creds.delete_tenant() super(UpgradeBaseTest, cls).tearDownClass() #end tearDownClass diff --git a/serial_scripts/upgrade/verify.py b/serial_scripts/upgrade/verify.py index fe30a3a80..e4e082fd3 100644 --- a/serial_scripts/upgrade/verify.py +++ b/serial_scripts/upgrade/verify.py @@ -23,7 +23,7 @@ from fabric.state import connections from scripts.securitygroup.config import ConfigSecGroup -class BaseResource(fixtures.Fixture, ConfigSvcChain, VerifySvcChain, BaseTestLbaas, BaseNeutronTest): +class BaseResource(ConfigSvcChain, VerifySvcChain, BaseTestLbaas, BaseNeutronTest): def setUp(self, inputs, connections, logger): super(BaseResource , self).setUp() @@ -150,7 +150,7 @@ def setup_common_objects(self, inputs, connections): self.action_list = [] self.if_list = [['management', False], ['left', True], ['right', True]] - self.st_name = 'in_net_svc_template_1' + self.st_name = 'test_in_net_svc_template_1' si_prefix = 'in_net_svc_instance_' self.policy_name = 'policy_in_network' diff --git a/serial_scripts/vdns/base.py b/serial_scripts/vdns/base.py deleted file mode 100644 index a6dece927..000000000 --- a/serial_scripts/vdns/base.py +++ /dev/null @@ -1,445 +0,0 @@ -import test -from common.connections import ContrailConnections -from common import isolated_creds -from random import randint - -import os -import unittest -import fixtures -import testtools -import traceback -import signal -import traffic_tests -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from vm_test import * -from common.connections import ContrailConnections -from floating_ip import * -from control_node import * -from policy_test import * -from multiple_vn_vm_test import * -from vdns_fixture import * -from contrail_fixtures import * -from vnc_api import vnc_api -from vnc_api.gen.resource_test import * -from tcutils.wrappers import preposttest_wrapper - -class BasevDNSRestartTest(test.BaseTestCase): - - @classmethod - def setUpClass(cls): - super(BasevDNSRestartTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - cls.quantum_h= cls.connections.quantum_h - cls.nova_h = cls.connections.nova_h - cls.vnc_lib= cls.connections.vnc_lib - cls.orch = cls.connections.orch - cls.agent_inspect= cls.connections.agent_inspect - cls.cn_inspect= cls.connections.cn_inspect - cls.analytics_obj=cls.connections.analytics_obj - cls.dnsagent_inspect = cls.connections.dnsagent_inspect - cls.api_s_inspect = cls.connections.api_server_inspect - #end setUpClass - - @classmethod - def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() - super(BasevDNSRestartTest, cls).tearDownClass() - #end tearDownClass - - def verify_dns_record_order(self, record_order, test_type='test_record_order', record_num=10): - ''' This test tests DNS record order. - Round-Robin/Fixed/Random - ''' - random_number = randint(2500,5000) - vn1_ip = '10.10.10.1/24' - vn_name = 'vn' + str(random_number) - dns_server_name = 'vdns1' + str(random_number) - domain_name = 'juniper.net' - ttl = 100 - ipam_name = 'ipam1' + str(random_number) - project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order=record_order) - # Create VDNS server object. - vdns_fixt1 = self.useFixture(VdnsFixture( - self.inputs, self.connections, vdns_name=dns_server_name, dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - # Associate VDNS with IPAM. - ipam_fixt1 = self.useFixture(IPAMFixture(ipam_name, vdns_obj= - vdns_fixt1.obj, project_obj=project_fixture, ipamtype=ipam_mgmt_obj)) - # Launch VN with IPAM - vn_fixt = self.useFixture( - VNFixture( - self.connections, self.inputs, vn_name=vn_name, - subnets=[vn1_ip], ipam_fq_name= ipam_fixt1.fq_name, option='contrail')) - vn_quantum_obj = self.orch.get_vn_obj_if_present( - vn_name=vn_name, project_id=project_fixture.uuid) - vm_fixture = self.useFixture( - VMFixture(project_name=self.inputs.project_name, - connections=self.connections, vn_obj=vn_quantum_obj, vm_name='vm1-test')) - vm_fixture.verify_vm_launched() - vm_fixture.verify_on_setup() - vm_fixture.wait_till_vm_is_up() - - rec_ip_list = [] - i = 1 - j = 1 - k = 1 - l = 1 - verify_rec_name_list = [] - verify_rec_name_ip = {} - if test_type == 'recordscaling': - self.logger.info('Creating %s number of records', record_num) - for num in range(1, record_num): - rec = 'test-rec-' + str(j) + '-' + str(i) + str(random_number) - self.logger.info('Creating record %s', rec) - recname = 'rec' + str(j) + '-' + str(i) + str(random_number) - rec_ip = str(l) + '.' + str(k) + '.' + str(j) + '.' + str(i) - vdns_rec_data = VirtualDnsRecordType( - recname, 'A', 'IN', rec_ip, ttl) - vdns_rec_fix = self.useFixture(VdnsRecordFixture( - self.inputs, self.connections, rec, vdns_fixt1.get_fq_name(), vdns_rec_data)) - sleep(1) - i = i + 1 - if i > 253: - j = j + 1 - i = 1 - if j > 253: - k = k + 1 - j = 1 - i = 1 - # sleep for some time after configuring 10 records. - if num % 10 == 0: - sleep(0.5) - # pic some random records for nslookup verification - if num % 100 == 0: - verify_rec_name_list.append(recname) - verify_rec_name_ip[recname] = rec_ip - # Sleep for some time - DNS takes some time to sync with BIND - # server - self.logger.info( - 'Sleep for 180sec to sync vdns server with vdns record entry') - sleep(180) - # Verify NS look up works for some random records values - self.logger.info('****NSLook up verification****') - import re - for rec in verify_rec_name_list: - cmd = 'nslookup ' + rec - vm_fixture.run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture.return_output_cmd_dict[cmd] - result = result.replace("\r", "") - result = result.replace("\t", "") - result = result.replace("\n", " ") - m_obj = re.search( - r"Address:[0-9.]*#[0-9]*\s*.*Name:(.*\.juniper\.net)\s*Address:\s*([0-9.]*)", result) - if not m_obj: - #import pdb; pdb.set_trace() - self.assertTrue( - False, 'record search is failed,please check syntax of the regular expression/NSlookup is failed') - print ('vm_name is ---> %s \t ip-address is ---> %s' % - (m_obj.group(1), m_obj.group(2))) - else: - for num in range(1, record_num): - rec = 'test-rec-' + str(j) + '-' + str(i) + str(random_number) - rec_ip = '1.' + '1.' + str(j) + '.' + str(i) - vdns_rec_data = VirtualDnsRecordType( - 'test1', 'A', 'IN', rec_ip, ttl) - vdns_rec_fix = self.useFixture(VdnsRecordFixture( - self.inputs, self.connections, rec, vdns_fixt1.get_fq_name(), vdns_rec_data)) - result, msg = vdns_rec_fix.verify_on_setup() - i = i + 1 - if i > 253: - j = j + 1 - i = 1 - rec_ip_list.append(rec_ip) - sleep(2) - # Get the NS look up record Verify record order - cmd = 'nslookup test1' - vm_fixture.run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture.return_output_cmd_dict[cmd] - result = result.replace("\r", "") - result = result.replace("\t", "") - result = result.replace("\n", " ") - import re - m_obj = re.search( - r"Address:[0-9.]*#[0-9]*\s*Name:test1.juniper.net\s*(Address:\s*[0-9.]*)", result) - if not m_obj: - self.assertTrue( - False, 'record search is failed,please check syntax of regular expression') - print m_obj.group(1) - dns_record = m_obj.group(1).split(':') - dns_record_ip = dns_record[1].lstrip() - next_ip = self.next_ip_in_list(rec_ip_list, dns_record_ip) - for rec in rec_ip_list: - vm_fixture.run_cmd_on_vm(cmds=[cmd]) - result = vm_fixture.return_output_cmd_dict[cmd] - result = result.replace("\r", "") - result = result.replace("\t", "") - result = result.replace("\n", " ") - m_obj = re.search( - r"Address:[0-9.]*#[0-9]*\s*Name:test1.juniper.net\s*(Address:\s*[0-9.]*)", result) - print m_obj.group(1) - dns_record = m_obj.group(1).split(':') - dns_record_ip1 = dns_record[1].lstrip() - if record_order == 'round-robin': - if next_ip != dns_record_ip1: - print "\n VDNS records are not sent in round-robin order" - self.assertTrue( - False, 'VDNS records are not sent in round-robin order') - next_ip = self.next_ip_in_list(rec_ip_list, dns_record_ip1) - if record_order == 'random': - if dns_record_ip1 not in rec_ip_list: - print "\n VDNS records are not sent in random order" - self.assertTrue( - False, 'VDNS records are not sent random order') - if record_order == 'fixed': - if dns_record_ip != dns_record_ip1: - print "\n VDNS records are not sent fixed in fixed order" - self.assertTrue( - False, 'VDNS records are not sent fixed in fixed order') - return True - # end test_dns_record_order - - # This Test test vdns functionality with control node restart - def vdns_with_cn_dns_agent_restart(self, restart_process): - ''' - This test test the functionality of controlnode/dns/agent restart with vdns feature. - ''' - if restart_process == 'ControlNodeRestart': - if len(set(self.inputs.bgp_ips)) < 2: - raise self.skipTest( - "Skipping Test. At least 2 control nodes required to run the control node switchover test") - vn1_ip = '10.10.10.1/24' - vm_list = ['vm1-test', 'vm2-test'] - vn_name = 'vn1' - dns_server_name = 'vdns1' - domain_name = 'juniper.net' - ttl = 100 - ipam_name = 'ipam1' - rev_zone = vn1_ip.split('.') - rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) - rev_zone = rev_zone + '.in-addr.arpa' - project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) - dns_data = VirtualDnsType( - domain_name=domain_name, dynamic_records_from_client=True, - default_ttl_seconds=ttl, record_order='random') - # Create VDNS server object. - vdns_fixt1 = self.useFixture(VdnsFixture( - self.inputs, self.connections, vdns_name=dns_server_name, dns_data=dns_data)) - result, msg = vdns_fixt1.verify_on_setup() - self.assertTrue(result, msg) - dns_server = IpamDnsAddressType( - virtual_dns_server_name=vdns_fixt1.vdns_fq_name) - ipam_mgmt_obj = IpamType( - ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) - # Associate VDNS with IPAM. - ipam_fixt1 = self.useFixture(IPAMFixture(ipam_name, vdns_obj= - vdns_fixt1.obj, project_obj=project_fixture, ipamtype=ipam_mgmt_obj)) - # Launch VN with IPAM - vn_fixt = self.useFixture( - VNFixture( - self.connections, self.inputs, vn_name=vn_name, - subnets=[vn1_ip], ipam_fq_name= ipam_fixt1.fq_name, option='contrail')) - vm_fixture = {} - vm_dns_exp_data = {} - # Launch VM with VN Created above. This test verifies on launch of VM agent should updated DNS 'A' and 'PTR' records - # The following code will verify the same. Also, we should be able ping - # with VM name. - for vm_name in vm_list: - vn_quantum_obj = self.orch.get_vn_obj_if_present( - vn_name=vn_name, project_id=project_fixture.uuid) - vm_fixture[vm_name] = self.useFixture( - VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=vn_quantum_obj, vm_name=vm_name)) - vm_fixture[vm_name].verify_vm_launched() - vm_fixture[vm_name].verify_on_setup() - vm_fixture[vm_name].wait_till_vm_is_up() - vm_ip = vm_fixture[vm_name].get_vm_ip_from_vm( - vn_fq_name=vm_fixture[vm_name].vn_fq_name) - vm_rev_ip = vm_ip.split('.') - vm_rev_ip = '.'.join( - (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) - vm_rev_ip = vm_rev_ip + '.in-addr.arpa' - # Frame the Expected DNS data for VM, one for 'A' record and - # another 'PTR' record. - rec_name = vm_name + "." + domain_name - vm_dns_exp_data[vm_name] = [{'rec_data': vm_ip, 'rec_type': 'A', 'rec_class': 'IN', 'rec_ttl': str( - ttl), 'rec_name': rec_name, 'installed': 'yes', 'zone': domain_name}, {'rec_data': rec_name, 'rec_type': 'PTR', 'rec_class': 'IN', 'rec_ttl': str(ttl), 'rec_name': vm_rev_ip, 'installed': 'yes', 'zone': rev_zone}] - self.verify_vm_dns_data(vm_dns_exp_data[vm_name]) - # ping between two vms which are in same subnets by using name. - self.assertTrue(vm_fixture['vm1-test'] - .ping_with_certainty(ip=vm_list[1])) - active_controller = vm_fixture['vm1-test'].get_active_controller() - self.logger.info('Active control node from the Agent %s is %s' % - (vm_fixture['vm1-test'].vm_node_ip, active_controller)) - # Control node restart/switchover. - if restart_process == 'ControlNodeRestart': - # restart the Active control node - self.logger.info('restarting active control node') - self.inputs.restart_service( - 'contrail-control', [active_controller]) - sleep(5) - # Check the control node shifted to other control node - new_active_controller = vm_fixture[ - 'vm1-test'].get_active_controller() - self.logger.info('Active control node from the Agent %s is %s' % - (vm_fixture['vm1-test'].vm_node_ip, new_active_controller)) - if new_active_controller == active_controller: - self.logger.error( - 'Control node switchover fail. Old Active controlnode was %s and new active control node is %s' % - (active_controller, new_active_controller)) - return False - self.inputs.restart_service( - 'contrail-control', [new_active_controller]) - if restart_process == 'DnsRestart': - # restart the dns process in the active control node - self.logger.info( - 'restart the dns process in the active control node') - self.inputs.restart_service('contrail-dns', [active_controller]) - if restart_process == 'NamedRestart': - # restart the named process in the active control node - self.logger.info( - 'restart the named process in the active control node') - self.inputs.restart_service('contrail-named', [active_controller]) - # restart the agent process in the compute node - if restart_process == 'AgentRestart': - self.logger.info('restart the agent process') - for compute_ip in self.inputs.compute_ips: - self.inputs.restart_service('contrail-vrouter', [compute_ip]) - if restart_process == 'scp': - self.logger.info('scp using name of vm') - vm_fixture['vm1-test'].put_pub_key_to_vm() - vm_fixture['vm2-test'].put_pub_key_to_vm() - size = '1000' - file = 'testfile' - y = 'ls -lrt %s' % file - cmd_to_check_file = [y] - cmd_to_sync = ['sync'] - create_result = True - transfer_result = True - - self.logger.info("-" * 80) - self.logger.info("FILE SIZE = %sB" % size) - self.logger.info("-" * 80) - self.logger.info('Creating a file of the specified size on %s' % - vm_fixture['vm1-test'].vm_name) - - self.logger.info('Transferring the file from %s to %s using scp' % - (vm_fixture['vm1-test'].vm_name, vm_fixture['vm2-test'].vm_name)) - vm_fixture[ - 'vm1-test'].check_file_transfer(dest_vm_fixture=vm_fixture['vm2-test'], mode='scp', size=size) - - self.logger.info('Checking if the file exists on %s' % - vm_fixture['vm2-test'].vm_name) - vm_fixture['vm2-test'].run_cmd_on_vm(cmds=cmd_to_check_file) - output = vm_fixture['vm2-test'].return_output_cmd_dict[y] - print output - if size in output: - self.logger.info( - 'File of size %sB transferred via scp properly' % size) - else: - transfer_result = False - self.logger.error( - 'File of size %sB not transferred via scp ' % size) - assert transfer_result, 'File not transferred via scp' - # Verify after controlnode/dns/agent/named process restart ping vm's by - # using name. - for vm_name in vm_list: - msg = "Ping by using name %s is failed after controlnode/dns/agent/named process restart. Dns server should resolve VM name to IP" % ( - vm_name) - self.assertTrue(vm_fixture[vm_name] - .ping_with_certainty(ip=vm_name), msg) - self.verify_vm_dns_data(vm_dns_exp_data[vm_name]) - return True - # end test_vdns_controlnode_switchover - - def next_ip_in_list(self, iplist, item): - item_index = iplist.index(item) - next_item = None - # if it not end of list, return next element in the list - if item_index != (len(iplist) - 1): - next_item = iplist[item_index + 1] - # if the item is on end of list, the next element will be first element - # in the list - else: - next_item = iplist[0] - return next_item - - def verify_ns_lookup_data(self, vm_fix, cmd, expectd_data): - self.logger.info("Inside verify_ns_lookup_data") - self.logger.info( - "cmd string is %s and expected data %s for searching" % - (cmd, expectd_data)) - vm_fix.run_cmd_on_vm(cmds=[cmd]) - result = vm_fix.return_output_cmd_dict[cmd] - print ('\n result %s' % result) - if (result.find(expectd_data) == -1): - return False - return True - - def verify_vm_dns_data(self, vm_dns_exp_data): - self.logger.info("Inside verify_vm_dns_data") - result = True - dnsinspect_h = self.dnsagent_inspect[self.inputs.bgp_ips[0]] - dns_data = dnsinspect_h.get_dnsa_config() - vm_dns_act_data = [] - msg = '' - - # Traverse over expected record data - found_rec = False - for expected in vm_dns_exp_data: - # Get te actual record data from introspect - for act in dns_data: - for rec in act['records']: - if rec['rec_name'] in expected['rec_name']: - vm_dns_act_data = rec - found_rec = True - break - if found_rec: - break - if not vm_dns_act_data: - self.logger.info("DNS record match not found in dns agent") - return False - found_rec = False - # Compare the DNS entries populated dynamically on VM Creation. - self.logger.info( - "actual record data %s ,\n expected record data %s" % - (vm_dns_act_data, expected)) - if(vm_dns_act_data['rec_name'] not in expected['rec_name']): - result = result and False - if (vm_dns_act_data['rec_data'] not in expected['rec_data']): - msg = 'DNS record data info is not matching\n' - result = result and False - if(vm_dns_act_data['rec_type'] != expected['rec_type']): - msg = msg + 'DNS record_type info is not matching\n' - result = result and False - if(vm_dns_act_data['rec_ttl'] != expected['rec_ttl']): - msg = msg + 'DNS record ttl info is not matching\n' - result = result and False - if(vm_dns_act_data['rec_class'] != expected['rec_class']): - msg = msg + 'DNS record calss info is not matching\n' - result = result and False - vm_dns_act_data = [] - self.assertTrue(result, msg) - self.logger.info("Out of verify_vm_dns_data") - return True - # end verify_vm_dns_data diff --git a/serial_scripts/vdns/test_vdns.py b/serial_scripts/vdns/test_vdns.py index dd713a1d9..e04a576d1 100755 --- a/serial_scripts/vdns/test_vdns.py +++ b/serial_scripts/vdns/test_vdns.py @@ -10,6 +10,7 @@ import fixtures import testtools import traceback +import difflib from policy_test import * from multiple_vn_vm_test import * @@ -18,7 +19,7 @@ from tcutils.pkgs.Traffic.traffic.core.profile import create, ContinuousProfile from tcutils.pkgs.Traffic.traffic.core.helpers import Host from tcutils.pkgs.Traffic.traffic.core.helpers import Sender, Receiver -from base import BasevDNSRestartTest +from common.vdns.base import BasevDNSTest from common import isolated_creds import inspect from vnc_api import vnc_api @@ -29,8 +30,10 @@ from control_node import * from user_test import UserFixture import test +from common.contrail_test_init import ContrailTestInit +from tcutils.contrail_status_check import ContrailStatusChecker -class TestvDNSRestart(BasevDNSRestartTest): +class TestvDNSRestart(BasevDNSTest): @classmethod def setUpClass(cls): @@ -111,6 +114,612 @@ def test_vdns_named_restart(self): self.vdns_with_cn_dns_agent_restart(restart_process) return True + @preposttest_wrapper + def scale_vdns_records_restart_named(self): + ''' + This test verifies vdns record scaling as well as record update after named restart. + This test case is specifically for bug verification of bug ID 1583566 : [vDNS]: Records lost on named restart if scaled configuration is present + Steps: + 1. Create vDNS server + 2. Create 5000 records for the server. + 3. Create IPAM and VN objects. + 4. Restart contrail-named process on all control nodes. + 5. Wait for the zone.jnl file to restore all the 5000 VDNS records. + 6. Verify that all 5000 records are present in the zone file + Pass criteria: All records should get restored on all control nodes. + Maintainer: pulkitt@juniper.net + ''' + vn_name = 'vn-vdns' + domain_name = 'juniper.net' + ttl = 100 + dns_data = VirtualDnsType( domain_name=domain_name, dynamic_records_from_client=True, + default_ttl_seconds=ttl, record_order='random', reverse_resolution=True, + external_visible = True) + ipam_name = 'ipamTest' + dns_server_name = "vdnsTest" + # Create VDNS server object. + vdns_fixt = self.useFixture(VdnsFixture( + self.inputs, self.connections, vdns_name=dns_server_name, dns_data=dns_data)) + result, msg = vdns_fixt.verify_on_setup() + self.assertTrue(result, msg) + # Create IPAM management object + dns_server = IpamDnsAddressType( + virtual_dns_server_name=vdns_fixt.vdns_fq_name) + # Subnetting 11.11.0.0/16 into maximum 1024 subnets and mask of /26 + vn_ip = '11.11.0.0/16' + network, prefix = vn_ip.split('/') + record_counter = 5000 + for x in range(0,record_counter): + record_name = "vDNSrecForAliasVM%d" % x + actual_vm_name = "VM%d" % x + vdns_rec_data = VirtualDnsRecordType(record_name, 'CNAME', 'IN', actual_vm_name, ttl) + vdns_rec_fix = self.useFixture(VdnsRecordFixture( + self.inputs, self.connections, record_name, vdns_fixt.fq_name, vdns_rec_data)) + ipam_mgmt_obj = IpamType(ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) + # Associate VDNS with IPAM. + ipam_fixt = self.useFixture(IPAMFixture(ipam_name, vdns_obj= vdns_fixt.obj, + project_obj=self.project, ipamtype=ipam_mgmt_obj)) + assert ipam_fixt.verify_on_setup() + vn_fixt = self.useFixture(VNFixture(self.connections, self.inputs,vn_name=vn_name, + subnets=[vn_ip], ipam_fq_name=ipam_fixt.fq_name, option='contrail')) + assert vn_fixt.verify_on_setup() + self.logger.info("All configuration complete.") + # restarting contrail-named on all control nodes + self.inputs.stop_service('contrail-named', self.inputs.bgp_ips) + sleep(10) + self.inputs.start_service('contrail-named', self.inputs.bgp_ips) + for ip in self.inputs.bgp_ips: + assert self.inputs.confirm_service_active('contrail-named', ip) + zoneFile = vn_fixt.vn_fq_name.split(':')[0] +'-' + dns_server_name + '.' + domain_name + '.zone.jnl' + cmd = "ls -al /etc/contrail/dns/%s" % zoneFile + for node in self.inputs.bgp_ips: + output = self.inputs.run_cmd_on_server(node,cmd) + if "No such file or directory" in output: + msg = "Zone file not found for the configured domain on control node %s" % node + self.logger.error("Zone file not found for the configured domain on control node %s" % node) + result = False + assert result, msg + else: + outputList = output.split() + fileSize = outputList[4] + while 1: + self.logger.debug("Waiting till the record file get updated completely") + sleep(10) + output = self.inputs.run_cmd_on_server(node,cmd) + outputList = output.split() + if outputList[4] == fileSize: + self.logger.debug("Size of zone file is constant now. File update completed.") + break + fileSize = outputList[4] + # Command to Sync the jnl file with zone file. + newcmd = "contrail-rndc -c /etc/contrail/dns/contrail-rndc.conf sync" + self.inputs.run_cmd_on_server(node,newcmd) + readFileCmd = "cat /etc/contrail/dns/%s" % zoneFile.rstrip('.jnl') + fileContent = self.inputs.run_cmd_on_server(node, readFileCmd) + lines = fileContent.split('\n') + count = 0 + for lineNumber in range(0,len(lines)): + line = lines[lineNumber].split() + if len(line) > 1: + if line[1] == 'CNAME': + count = count +1 + self.logger.debug("Number of records file on control node %s are %d." % (node, count)) + if count ==5000: + self.logger.info("All records restored correctly on control node %s" % node ) + else : + self.logger.error("Records lost after named restart on control node %s" % node) + msg = "records lost after restart of named." + result = False + assert result, msg + # end scale_vdns_records_restart_named + + @preposttest_wrapper + def test_agent_query_all_dns_servers_policy_fixed(self): + '''Agent to request all available named servers from Disocvery Server while + connecting to two in the list to send DNS records, but querying all. + This script is specifically written to test + Bug Id 1551987 : "Agent to query all available bind server for vDNS records" + Also, this script assumes that DNS policy is *fixed* which is the default value. + Steps: + 1. Create a VN with IPAM having Virtual DNS configured. + 2. Create 2 VMs each on both compute nodes. + 3. Check that ping between 2 VMs on same compute and across different compute + 4. Ping local records and verify in introspect logs that DNS query is sent to all control nodes in cluster + 5. Search for all DNS servers assigned to vrouter agents by discovery + 6. Stop the *contrail-named* processes on both assigned DNS servers to vrouter agent of compute 1. + 7. Verify all cases of nslookup during and after subscription TTL expiry. + Pass criteria: DNS queries should reach every DNS server in network and any server can resolve it. + Entry Criteria: Minimum 3 control nodes and 2 Compute nodes are required for this test case. + Maintainer: pulkitt@juniper.net''' + if len(self.inputs.bgp_ips) <2 or len(self.inputs.compute_ips) < 2: + skip = True + msg = "Skipping this test case as minimum control nodes required are 2" + raise testtools.TestCase.skipException(msg) + vm_list = ['vm1-agent1', 'vm2-agent1', 'vm1-agent2', 'vm2-agent2'] + vn_name = 'vn1' + vn_nets = {'vn1' : '10.10.10.0/24'} + dns_server_name = 'vdns1' + domain_name = 'juniper.net' + ttl = 100 + ipam_name = 'ipam1' + dns_data = VirtualDnsType( + domain_name=domain_name, dynamic_records_from_client=True, + default_ttl_seconds=ttl, record_order='random', reverse_resolution=True) + vdns_fixt1 = self.useFixture(VdnsFixture(self.inputs, self.connections, + vdns_name=dns_server_name, dns_data=dns_data)) + result, msg = vdns_fixt1.verify_on_setup() + self.assertTrue(result, msg) + dns_server = IpamDnsAddressType( + virtual_dns_server_name=vdns_fixt1.vdns_fq_name) + ipam_mgmt_obj = IpamType( + ipam_dns_method='virtual-dns-server', ipam_dns_server=dns_server) + # Associate IPAM with VDNS server Object + ipam_fixt1 = self.useFixture(IPAMFixture(ipam_name, vdns_obj=vdns_fixt1.obj, + project_obj=self.project, ipamtype=ipam_mgmt_obj)) + # Launch VM with VN Created above. + vn_fixt = self.useFixture(VNFixture(self.connections, self.inputs,\ + vn_name=vn_name, subnets=[vn_nets['vn1']], \ + ipam_fq_name=ipam_fixt1.fq_name, option='contrail')) + vm_fixture = {} + for vm in vm_list: + if 'agent1' in vm: + vm_fixture[vm] = self.useFixture(VMFixture(project_name= + self.inputs.project_name, connections=self.connections, + vn_obj=vn_fixt.obj,vm_name=vm, + node_name = self.inputs.compute_names[0])) + elif 'agent2' in vm: + vm_fixture[vm] = self.useFixture(VMFixture(project_name= + self.inputs.project_name, connections=self.connections, + vn_obj=vn_fixt.obj, vm_name=vm, + node_name = self.inputs.compute_names[1])) + for vm in vm_list: + assert vm_fixture[vm].wait_till_vm_is_up() + # Verify connectivity between all Agents after configuration of VMs + self.assertTrue(vm_fixture['vm1-agent1'].ping_to_ip(ip='vm1-agent2', count=2)) + self.assertTrue(vm_fixture['vm1-agent1'].ping_to_ip(ip='vm2-agent1', count=2)) + self.assertTrue(vm_fixture['vm1-agent2'].ping_to_ip(ip='vm1-agent1', count=2)) + self.assertTrue(vm_fixture['vm1-agent2'].ping_to_ip(ip='vm2-agent2', count=2)) + # Ping from vm of agent 1 to VM of agent 2 and verify query sent to all DNS servers + inspect_h_agent1 = self.agent_inspect[vm_fixture['vm1-agent1'].vm_node_ip] + output_1 = str(inspect_h_agent1.get_vna_dns_query_to_named()) + self.assertTrue(vm_fixture['vm1-agent1'].ping_to_ip(ip='vm1-agent2', count=2)) + output_2 = str(inspect_h_agent1.get_vna_dns_query_to_named()) + diff = difflib.ndiff(output_1,output_2) + delta = ''.join(x[2:] for x in diff if x.startswith('+ ')) + # Getting the list of DNS servers in use by every Compute node as per discovery server assignment + for i in range(0,len(self.inputs.bgp_ips)): + if "DNS query sent to named server : %s" % self.inputs.bgp_control_ips[i] in delta: + self.logger.debug("DNS query sent successfully to DNS server on %s" % + self.inputs.bgp_control_ips[i]) + else: + self.logger.error("DNS query not sent to DNS server running on %s" % + self.inputs.bgp_control_ips[i]) + errmsg = "DNS query not sent to all DNS servers in the network" + self.logger.error(errmsg) + assert False, errmsg + dns_list_all_compute_nodes = [] + for entry in self.inputs.compute_ips: + inspect_h = self.agent_inspect[entry] + dns_list_all_compute_nodes.append( + inspect_h.get_vna_discovered_dns_server()) + self.logger.debug("The compute node %s is connected to DNS servers: %s" + %(entry,dns_list_all_compute_nodes[-1])) + # Specifically for fixed policy, verifying that all agents connected to same set of DNS servers + for i in range(0,(len(dns_list_all_compute_nodes)-1)): + if set(dns_list_all_compute_nodes[i]) == set(dns_list_all_compute_nodes[i+1]): + self.logger.info("All computes connected to same DNS server as expected") + else: + errmsg = "Computes connected to different DNS servers. This is not expected with policy as fixed" + self.logger.error(errmsg) + assert False, errmsg + # Making the named down on Control nodes associated with 1st vrouter agent + # Verifying that DNS resolve the queries as per the assigned DNS servers + for nodes in dns_list_all_compute_nodes[0]: + index = self.inputs.bgp_control_ips.index(nodes) + self.inputs.stop_service("contrail-named",[self.inputs.bgp_ips[index]]) + self.addCleanup(self.inputs.start_service,'contrail-named',\ + [self.inputs.bgp_ips[index]]) + verify = "once" + cmd_for_agent2 = 'nslookup -timeout=1 vm2-agent2' + '| grep ' +\ + '\'' + vm_fixture['vm2-agent2'].vm_ip + '\'' + cmd_for_agent1 = 'nslookup -timeout=1 vm2-agent1' + '| grep ' +\ + '\'' + vm_fixture['vm2-agent1'].vm_ip + '\'' + for i in range(0,360): + new_dns_list = [] + for entry in self.inputs.compute_ips[0],self.inputs.compute_ips[1]: + inspect_h = self.agent_inspect[entry] + new_dns_list.append( + inspect_h.get_vna_discovered_dns_server()) + self.logger.debug("The compute node %s is connected to DNS servers: %s" + %(entry,new_dns_list[-1])) + if i == 0 and new_dns_list[0] == new_dns_list[1] and\ + new_dns_list[0]==dns_list_all_compute_nodes[0]: + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent1'],\ + cmd_for_agent2, vm_fixture['vm2-agent2'].vm_ip, + expected_result = False) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent1'],\ + cmd_for_agent1, vm_fixture['vm2-agent1'].vm_ip, + expected_result = False) + continue + elif new_dns_list[0] != new_dns_list[1] and verify=="once" : + if new_dns_list[0] == dns_list_all_compute_nodes[0] and \ + new_dns_list[1] != dns_list_all_compute_nodes[1]: + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent1'],\ + cmd_for_agent1, vm_fixture['vm2-agent1'].vm_ip, + expected_result = False) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent2'],\ + cmd_for_agent1, vm_fixture['vm2-agent1'].vm_ip, + expected_result = False) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent2'],\ + cmd_for_agent2, vm_fixture['vm2-agent2'].vm_ip) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent1'],\ + cmd_for_agent2, vm_fixture['vm2-agent2'].vm_ip) + elif new_dns_list[0] != dns_list_all_compute_nodes[0] and \ + new_dns_list[1] == dns_list_all_compute_nodes[1]: + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent2'],\ + cmd_for_agent2, vm_fixture['vm2-agent2'].vm_ip, + expected_result = False) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent1'],\ + cmd_for_agent2, vm_fixture['vm2-agent2'].vm_ip, + expected_result = False) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent1'],\ + cmd_for_agent1, vm_fixture['vm2-agent1'].vm_ip) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent2'],\ + cmd_for_agent1, vm_fixture['vm2-agent1'].vm_ip) + verify="done" + continue + elif new_dns_list[0] != dns_list_all_compute_nodes[0] and \ + new_dns_list[1] != dns_list_all_compute_nodes[1]: + # Allowing some time for new DNS server to populate the records. + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent1'],\ + cmd_for_agent2, vm_fixture['vm2-agent2'].vm_ip) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent1'],\ + cmd_for_agent1, vm_fixture['vm2-agent1'].vm_ip) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent2'],\ + cmd_for_agent2, vm_fixture['vm2-agent2'].vm_ip) + assert self.verify_ns_lookup_data(vm_fixture['vm1-agent2'],\ + cmd_for_agent1, vm_fixture['vm2-agent1'].vm_ip) + self.assertTrue(vm_fixture['vm1-agent1'].ping_to_ip(ip='vm2-agent2', count=2)) + self.assertTrue(vm_fixture['vm1-agent1'].ping_to_ip(ip='vm2-agent1', count=2)) + self.assertTrue(vm_fixture['vm1-agent2'].ping_to_ip(ip='vm2-agent2', count=2)) + self.assertTrue(vm_fixture['vm1-agent2'].ping_to_ip(ip='vm2-agent1', count=2)) + break + else: + self.logger.debug("Waiting till new DNS server assignment takes place for agent 1") + sleep(5) + continue + + class InitForZoneTests: + ''' + Initialisation of variables to be used in 2 different test cases + "test_vdns_with_same_zone" and "test_vdns_with_diff_zone" + ''' + def __init__(self): + self.project_list = ['project1', + 'project2', + 'project3', + 'project4', + 'project5', + 'project6'] + self.ipam_list = {'project1': 'ipam1', + 'project2': 'ipam2', + 'project3': 'ipam3', + 'project4': 'ipam4', + 'project5': 'ipam5', + 'project6': 'ipam6'} + self.vn_list = {'project1': 'vn1', + 'project2': 'vn2', + 'project3': 'vn3', + 'project4': 'vn4', + 'project5': 'vn5', + 'project6': 'vn6'} + self.vn_nets = {'project1': ['10.10.10.0/24'], + 'project2': ['20.10.10.0/24'], + 'project3': ['30.10.10.0/24'], + 'project4': ['40.10.10.0/24'], + 'project5': ['50.10.10.0/24'], + 'project6': ['60.10.10.0/24']} + self.vm_list = {'project1': 'vm1', + 'project2': 'vm2', + 'project3': 'vm3', + 'project4': 'vm4', + 'project5': 'vm5', + 'project6': 'vm6'} + self.proj_user = {'project1': 'user1', + 'project2': 'user2', + 'project3': 'user3', + 'project4': 'user4', + 'project5': 'user5', + 'project6': 'user6'} + self.proj_pass = {'project1': 'user1', + 'project2': 'user2', + 'project3': 'user3', + 'project4': 'user4', + 'project5': 'user5', + 'project6': 'user6'} + self.proj_vdns = {'project1': 'vdns1', + 'project2': 'vdns2', + 'project3': 'vdns3', + 'project4': 'vdns4', + 'project5': 'vdns5', + 'project6': 'vdns6'} + + @preposttest_wrapper + def test_vdns_with_same_zone(self): + ''' Test vdns in same zone with multi projects/vdns-servers ''' + var_obj = self.InitForZoneTests() + vdns_fixt1 = {} + ipam_mgmt_obj = {} + for project in var_obj.project_list: + dns_server_name = var_obj.proj_vdns[project] + self.logger.info( + 'Creating vdns server:%s in project:%s', + dns_server_name, + project) + domain_name = 'juniper.net' + ttl = 100 + # VDNS creation + dns_data = VirtualDnsType( + domain_name=domain_name, dynamic_records_from_client=True, + default_ttl_seconds=ttl, record_order='random') + vdns_fixt1[project] = self.useFixture( + VdnsFixture( + self.inputs, + self.connections, + vdns_name=dns_server_name, + dns_data=dns_data)) + result, msg = vdns_fixt1[project].verify_on_setup() + self.assertTrue(result, msg) + dns_server = IpamDnsAddressType( + virtual_dns_server_name=vdns_fixt1[project].vdns_fq_name) + ipam_mgmt_obj[project] = IpamType( + ipam_dns_method='virtual-dns-server', + ipam_dns_server=dns_server) + ipam_fixt = {} + vn_fixt = {} + vm_fix = {} + pol_fixt = {} + for proj in var_obj.project_list: + # User creation + user_fixture = self.useFixture( + UserFixture( + connections=self.admin_connections, + username=var_obj.proj_user[proj], + password=var_obj.proj_pass[proj])) + # Project creation + project_fixture = self.useFixture( + ProjectFixture( + project_name=proj, + username=var_obj.proj_user[proj], + password=var_obj.proj_pass[proj], + connections=self.admin_connections)) + user_fixture.add_user_to_tenant(proj, var_obj.proj_user[proj], 'admin') + project_fixture.set_user_creds(var_obj.proj_user[proj], var_obj.proj_pass[proj]) + project_inputs = ContrailTestInit( + self.ini_file, + stack_user=project_fixture.project_username, + stack_password=project_fixture.project_user_password, + stack_tenant=proj, + logger=self.logger) + project_connections = ContrailConnections(project_inputs, + logger=self.logger) + self.logger.info( + 'Default SG to be edited for allow all on project: %s' % proj) + project_fixture.set_sec_group_for_allow_all(proj, 'default') + # Ipam creation + ipam_fixt[proj] = self.useFixture(IPAMFixture(var_obj.ipam_list[proj], vdns_obj= vdns_fixt1[proj].obj, + project_obj=project_fixture, ipamtype=ipam_mgmt_obj[proj])) + # VN Creation + vn_fixt[proj] = self.useFixture( + VNFixture( + project_name=proj, + connections=project_connections, + vn_name=var_obj.vn_list[proj], + inputs=project_inputs, + subnets=var_obj.vn_nets[proj], + ipam_fq_name=ipam_fixt[proj].getObj().get_fq_name())) + vn_quantum_obj = self.orch.get_vn_obj_if_present(vn_name=var_obj.vn_list[proj], project_id=project_fixture.uuid) + # VM creation + vm_fix[proj] = self.useFixture( + VMFixture( + project_name=proj, + connections=project_connections, + vn_obj=vn_quantum_obj, + vm_name=var_obj.vm_list[proj])) + vm_fix[proj].verify_vm_launched() + vm_fix[proj].verify_on_setup() + vm_fix[proj].wait_till_vm_is_up() + msg = "Ping by using name %s is failed. Dns server \ + should resolve VM name to IP" % (var_obj.vm_list[proj]) + self.assertTrue( + vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]), msg) + vm_ip = vm_fix[proj].get_vm_ip_from_vm( + vn_fq_name=vm_fix[proj].vn_fq_name) + vm_rev_ip = vm_ip.split('.') + vm_rev_ip = '.'.join( + (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) + vm_rev_ip = vm_rev_ip + '.in-addr.arpa' + rev_zone = var_obj.vn_nets[proj][0].split('/')[0].split('.') + rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) + rev_zone = rev_zone + '.in-addr.arpa' + # Frame the Expected DNS data for VM, one for 'A' record and + # another 'PTR' record. + rec_name = var_obj.vm_list[proj] + "." + domain_name + agent_inspect_h = self.agent_inspect[vm_fix[proj].vm_node_ip] + assigned_dns_ips = agent_inspect_h.get_vna_discovered_dns_server() + vm_dns_exp_data = [{'rec_data': vm_ip, + 'rec_type': 'A', + 'rec_class': 'IN', + 'rec_ttl': str(ttl), + 'rec_name': rec_name, + 'installed': 'yes', + 'zone': domain_name}, + {'rec_data': rec_name, + 'rec_type': 'PTR', + 'rec_class': 'IN', + 'rec_ttl': str(ttl), + 'rec_name': vm_rev_ip, + 'installed': 'yes', + 'zone': rev_zone}] + self.verify_vm_dns_data(vm_dns_exp_data, assigned_dns_ips[0]) + vm_dns_exp_data = [] + self.logger.info( + 'Restart supervisor-config & supervisor-control and test ping') + for bgp_ip in self.inputs.bgp_ips: + self.inputs.restart_service('supervisor-control', [bgp_ip]) + for cfgm_ip in self.inputs.cfgm_ips: + self.inputs.restart_service('supervisor-config', [cfgm_ip]) + status_checker = ContrailStatusChecker(self.inputs) + self.logger.debug("Waiting for all the services to be UP") + assert status_checker.wait_till_contrail_cluster_stable()[0],\ + "All services could not come UP after restart" + for proj in var_obj.project_list: + msg = "Ping by using name %s is failed. Dns server \ + should resolve VM name to IP" % (var_obj.vm_list[proj]) + self.assertTrue( + vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]), msg) + return True + # end test_vdns_with_same_zone + + + @preposttest_wrapper + def test_vdns_with_diff_zone(self): + ''' Test vdns in different zones with multi projects ''' + var_obj = self.InitForZoneTests() + vdns_fixt1 = {} + ipam_mgmt_obj = {} + for project in var_obj.project_list: + dns_server_name = var_obj.proj_vdns[project] + self.logger.info( + 'Creating vdns server:%s in project:%s', + dns_server_name, + project) + domain_name = '%s.net' % (project) + ttl = 100 + # VDNS creation + dns_data = VirtualDnsType( + domain_name=domain_name, dynamic_records_from_client=True, + default_ttl_seconds=ttl, record_order='random') + vdns_fixt1[project] = self.useFixture( + VdnsFixture( + self.inputs, + self.connections, + vdns_name=dns_server_name, + dns_data=dns_data)) + result, msg = vdns_fixt1[project].verify_on_setup() + self.assertTrue(result, msg) + dns_server = IpamDnsAddressType( + virtual_dns_server_name=vdns_fixt1[project].vdns_fq_name) + ipam_mgmt_obj[project] = IpamType( + ipam_dns_method='virtual-dns-server', + ipam_dns_server=dns_server) + ipam_fixt = {} + vn_fixt = {} + vm_fix = {} + pol_fixt = {} + for proj in var_obj.project_list: + # User creation + user_fixture = self.useFixture( + UserFixture( + connections=self.admin_connections, + username=var_obj.proj_user[proj], + password=var_obj.proj_pass[proj])) + # Project creation + project_fixture = self.useFixture( + ProjectFixture( + project_name=proj, + username=var_obj.proj_user[proj], + password=var_obj.proj_pass[proj], + connections=self.admin_connections)) + user_fixture.add_user_to_tenant(proj, var_obj.proj_user[proj], 'admin') + project_fixture.set_user_creds(var_obj.proj_user[proj], var_obj.proj_pass[proj]) + project_inputs = ContrailTestInit( + self.ini_file, + stack_user=project_fixture.project_username, + stack_password=project_fixture.project_user_password, + stack_tenant=proj, + logger=self.logger) + project_connections = ContrailConnections(project_inputs, + logger=self.logger) + self.logger.info( + 'Default SG to be edited for allow all on project: %s' % proj) + project_fixture.set_sec_group_for_allow_all(proj, 'default') + # Ipam creation + ipam_fixt[proj] = self.useFixture(IPAMFixture(var_obj.ipam_list[proj], vdns_obj= vdns_fixt1[proj].obj, + project_obj=project_fixture, ipamtype=ipam_mgmt_obj[proj])) + # VN Creation + vn_fixt[proj] = self.useFixture( + VNFixture( + project_name=proj, + connections=project_connections, + vn_name=var_obj.vn_list[proj], + inputs=project_inputs, + subnets=var_obj.vn_nets[proj], + ipam_fq_name=ipam_fixt[proj].getObj().get_fq_name())) + vn_quantum_obj = self.orch.get_vn_obj_if_present(vn_name=var_obj.vn_list[proj], project_id=project_fixture.uuid) + # VM creation + vm_fix[proj] = self.useFixture( + VMFixture( + project_name=proj, + connections=project_connections, + vn_obj=vn_quantum_obj, + vm_name=var_obj.vm_list[proj])) + vm_fix[proj].verify_vm_launched() + vm_fix[proj].verify_on_setup() + vm_fix[proj].wait_till_vm_is_up() + msg = "Ping by using name %s is failed. Dns server \ + should resolve VM name to IP" % (var_obj.vm_list[proj]) + self.assertTrue( + vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]), msg) + vm_ip = vm_fix[proj].get_vm_ip_from_vm( + vn_fq_name=vm_fix[proj].vn_fq_name) + vm_rev_ip = vm_ip.split('.') + vm_rev_ip = '.'.join( + (vm_rev_ip[3], vm_rev_ip[2], vm_rev_ip[1], vm_rev_ip[0])) + vm_rev_ip = vm_rev_ip + '.in-addr.arpa' + rev_zone = var_obj.vn_nets[proj][0].split('/')[0].split('.') + rev_zone = '.'.join((rev_zone[0], rev_zone[1], rev_zone[2])) + rev_zone = rev_zone + '.in-addr.arpa' + # Frame the Expected DNS data for VM, one for 'A' record and + # another 'PTR' record. + domain_name = '%s.net' % (proj) + rec_name = var_obj.vm_list[proj] + "." + domain_name + agent_inspect_h = self.agent_inspect[vm_fix[proj].vm_node_ip] + assigned_dns_ips = agent_inspect_h.get_vna_discovered_dns_server() + vm_dns_exp_data = [{'rec_data': vm_ip, + 'rec_type': 'A', + 'rec_class': 'IN', + 'rec_ttl': str(ttl), + 'rec_name': rec_name, + 'installed': 'yes', + 'zone': domain_name}, + {'rec_data': rec_name, + 'rec_type': 'PTR', + 'rec_class': 'IN', + 'rec_ttl': str(ttl), + 'rec_name': vm_rev_ip, + 'installed': 'yes', + 'zone': rev_zone}] + self.verify_vm_dns_data(vm_dns_exp_data, assigned_dns_ips[0]) + vm_dns_exp_data = [] + self.logger.info( + 'Restart supervisor-config & supervisor-control and test ping') + for bgp_ip in self.inputs.bgp_ips: + self.inputs.restart_service('supervisor-control', [bgp_ip]) + for cfgm_ip in self.inputs.cfgm_ips: + self.inputs.restart_service('supervisor-config', [cfgm_ip]) + status_checker = ContrailStatusChecker(self.inputs) + self.logger.debug("Waiting for all the services to be UP") + assert status_checker.wait_till_contrail_cluster_stable()[0],\ + "All services could not come UP after restart" + for proj in var_obj.project_list: + msg = "Ping by using name %s is failed. Dns server \ + should resolve VM name to IP" % (var_obj.vm_list[proj]) + self.assertTrue( + vm_fix[proj].ping_with_certainty(ip=var_obj.vm_list[proj]), msg) + return True + # end test_vdns_with_diff_zone + if __name__ == '__main__': unittest.main() # end of TestVdnsFixture diff --git a/serial_scripts/vgw/base.py b/serial_scripts/vgw/base.py index c2c6ca35a..c016bea30 100644 --- a/serial_scripts/vgw/base.py +++ b/serial_scripts/vgw/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 from common.connections import ContrailConnections from common import isolated_creds from project_test import * @@ -6,22 +6,14 @@ from vm_test import * -class BaseVgwTest(test.BaseTestCase): +class BaseVgwTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseVgwTest, cls).setUpClass() - cls.connections = ContrailConnections( - cls.inputs, - project_name=cls.inputs.project_name, - username=cls.inputs.stack_user, - password=cls.inputs.stack_password, - logger=cls.logger) - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib - #cls.logger= cls.inputs.logger cls.agent_inspect = cls.connections.agent_inspect cls.cn_inspect = cls.connections.cn_inspect cls.analytics_obj = cls.connections.analytics_obj @@ -30,8 +22,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - # cls.isolated_creds.delete_user() - # cls.isolated_creds.delete_tenant() for vn in cls.vn_fixture_dict: vn.verify_is_run = False vn.cleanUp() @@ -42,7 +32,6 @@ def tearDownClass(cls): def setup_common_objects(cls): cls.project_fixture = ProjectFixture( - vnc_lib_h=cls.vnc_lib, project_name=cls.inputs.project_name, connections=cls.connections) cls.project_fixture.setUp() @@ -71,7 +60,8 @@ def setup_common_objects(cls): cls.vn_fixture_dict = [] for key in cls.vgw_vn_list: vn = VNFixture( - project_name=cls.inputs.project_name, + project_name=key.split(':')[1], + option='contrail', connections=cls.connections, inputs=cls.inputs, vn_name=key.split(":")[3], diff --git a/serial_scripts/vgw/test_vgw.py b/serial_scripts/vgw/test_vgw.py index 7e2bd0e3c..7336297c1 100755 --- a/serial_scripts/vgw/test_vgw.py +++ b/serial_scripts/vgw/test_vgw.py @@ -8,7 +8,6 @@ from tcutils.wrappers import preposttest_wrapper from vgw import base from vgw.verify import VerifyVgwCases -from vgw.verify import VerifyDynamicVgwCases class TestVgwCases(base.BaseVgwTest, VerifyVgwCases): @@ -63,27 +62,3 @@ def test_vgw_with_restart_of_vgw_node(self): return self.vgw_restart_of_vgw_node() - -class TestDynamicVgwCases(base.BaseVgwTest, VerifyDynamicVgwCases): - - @classmethod - def setUpClass(cls): - super(TestDynamicVgwCases, cls).setUpClass() - - def runTest(self): - pass - # end runTest - - @preposttest_wrapper - def test_dynamic_vgw_compute_ping(self): - ''' - Test to validate dynamic VGW creation and communication from overlay VM to compute IP - 1: Create VGW interface dynamicaly - 2. Create corresponding vn and launch VM - 3. Ping from VM to the compute where VGW is created - 4. Delete VGW interface - - Pass criteria: Step 3 should pass - Maintainer: chhandak@juniper.net - ''' - return self.verify_dynamic_vgw_compute_ping() diff --git a/serial_scripts/vgw/verify.py b/serial_scripts/vgw/verify.py index 32405f81c..948db1c4e 100644 --- a/serial_scripts/vgw/verify.py +++ b/serial_scripts/vgw/verify.py @@ -39,7 +39,7 @@ def verify_vgw_with_fip(self, compute_type): if compute_type == 'same': vm_compute = self.inputs.host_data[vgw_compute]['name'] else: - host_list.remove(vgw_compute) + host_list.remove(self.inputs.host_data[vgw_compute]['name']) vm_compute = self.inputs.host_data[host_list[0]]['name'] else: vm_compute = self.inputs.host_data[host_list[0]]['name'] @@ -105,7 +105,7 @@ def verify_vgw_with_native_vm(self, compute_type): if compute_type == 'same': vm_compute = self.inputs.host_data[vgw_compute]['name'] else: - host_list.remove(vgw_compute) + host_list.remove(self.inputs.host_data[vgw_compute]['name']) vm_compute = self.inputs.host_data[host_list[0]]['name'] else: vm_compute = self.inputs.host_data[host_list[0]]['name'] @@ -231,7 +231,7 @@ def vgw_restart_of_vgw_node(self): if key.split(":")[3] == self.vn_fixture_dict[0].vn_name: vgw_compute = self.vgw_vn_list[ key]['host'].split("@")[1] - host_list.remove(vgw_compute) + host_list.remove(self.inputs.host_data[vgw_compute]['name']) vm_compute = self.inputs.host_data[host_list[0]]['name'] else: vm_compute = self.inputs.host_data[host_list[0]]['name'] @@ -293,81 +293,3 @@ def vgw_restart_of_vgw_node(self): return True -class VerifyDynamicVgwCases(): - - def verify_dynamic_vgw_compute_ping(self): - - result = True - host_list = [] - vgw_compute = None - vm_compute = None - vgw_intf = 'vgw1' - vgw_subnets = ['11.1.1.0/24'] - route = '0.0.0.0/0' - vgw_fq_name= 'default-domain:admin:vgwvn:vgwvn' - vm1_name= "vgw_vm" - host_list = self.connections.nova_h.get_hosts() - if len(host_list) > 1: - vm_compute = self.inputs.host_data[host_list[0]] - vgw_compute = self.inputs.host_data[host_list[1]] - else: - vm_compute = self.inputs.host_data[host_list[0]] - vgw_compute = self.inputs.host_data[host_list[0]] - - - # Configure VGW - self.logger.info("Creating VGW interface %s dynamically on %s" %(vgw_intf, vgw_compute['name'])) - self.logger.info("Configuring VGW on the Compute %s", (vgw_compute['ip'])) - cmd1 = "export PYTHONPATH=/usr/share/pyshared/contrail_vrouter_api/gen_py/instance_service" - - vgw_args = "--oper create --interface %s --subnets %s --vrf %s --routes %s" \ - %(vgw_intf,vgw_subnets[0],vgw_fq_name, route) - cmd2="python /opt/contrail/utils/provision_vgw_interface.py %s" %(vgw_args) - cmd= cmd1 + ";" + cmd2 - output = self.inputs.run_cmd_on_server(vgw_compute['ip'], cmd, - vgw_compute['username'], - vgw_compute['password']) - # Creating Virtual network with VGW FQ name - vn_fixture = self.useFixture( - VNFixture( - project_name=vgw_fq_name.split(":")[1], - connections=self.connections, - inputs=self.inputs, - vn_name=vgw_fq_name.split(":")[2], - subnets=vgw_subnets)) - # Verification of VN - assert vn_fixture.verify_on_setup() - - # Creation of VM and validation - vm1_fixture = self.useFixture( - VMFixture( - project_name=vgw_fq_name.split(":")[1], - connections=self.connections, - vn_obj=vn_fixture.obj, - vm_name=vm1_name, - node_name=vm_compute['name'])) - - # Verification on VM - assert vm1_fixture.verify_on_setup() - - self.logger.info("Now trying to ping underlay compute ip %s from VM %s" %(vgw_compute['ip'],vm1_name)) - if not vm1_fixture.ping_with_certainty(vgw_compute['ip']): - result = result and False - - # Delete VGW - self.logger.info("Deleting VGW interface %s on %s" %(vgw_intf, vgw_compute['name'])) - vgw_args = "--oper delete --interface %s --subnets %s --vrf %s --routes %s" \ - %(vgw_intf,vgw_subnets[0],vgw_fq_name, route) - cmd3="python /opt/contrail/utils/provision_vgw_interface.py %s" %(vgw_args) - cmd= cmd1 + ";" + cmd3 - output = self.inputs.run_cmd_on_server(vgw_compute['ip'], cmd, - vgw_compute['username'], - vgw_compute['password']) - - if not result: - self.logger.error( - 'Test ping to underlay compute ip from VM %s failed' % (vm1_name)) - - assert result - - return True diff --git a/serial_scripts/vgw/vgw_test_resource.py b/serial_scripts/vgw/vgw_test_resource.py index fc4afd297..65e663393 100755 --- a/serial_scripts/vgw/vgw_test_resource.py +++ b/serial_scripts/vgw/vgw_test_resource.py @@ -36,7 +36,7 @@ def setUp(self): def setup_common_objects(self): self.project_fixture = self.useFixture(ProjectFixture( - vnc_lib_h=self.vnc_lib, project_name=self.inputs.project_name, connections=self.connections)) + project_name=self.inputs.project_name, connections=self.connections)) self.logger.info( 'Default SG to be edited for allow all on project: %s' % self.inputs.project_name) diff --git a/serial_scripts/vm_regression/base.py b/serial_scripts/vm_regression/base.py index 79cdd2ba1..9533ec13a 100644 --- a/serial_scripts/vm_regression/base.py +++ b/serial_scripts/vm_regression/base.py @@ -1,28 +1,18 @@ -import test +import test_v1 from common.connections import ContrailConnections from common import isolated_creds from vm_test import VMFixture from vn_test import VNFixture import os -class BaseVnVmTest(test.BaseTestCase): +class BaseVnVmTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(BaseVnVmTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__, \ - cls.inputs, ini_file = cls.ini_file, \ - logger = cls.logger) - cls.isolated_creds.setUp() - cls.project = cls.isolated_creds.create_tenant() - cls.isolated_creds.create_and_attach_user_to_tenant() - cls.inputs = cls.isolated_creds.get_inputs() - cls.connections = cls.isolated_creds.get_conections() - #cls.connections= ContrailConnections(cls.inputs) cls.quantum_h= cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib= cls.connections.vnc_lib -# cls.logger= cls.inputs.logger cls.agent_inspect= cls.connections.agent_inspect cls.cn_inspect= cls.connections.cn_inspect cls.analytics_obj=cls.connections.analytics_obj @@ -31,8 +21,6 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - #cls.isolated_creds.delete_user() - cls.isolated_creds.delete_tenant() super(BaseVnVmTest, cls).tearDownClass() #end tearDownClass diff --git a/serial_scripts/vm_regression/test_vm_serial.py b/serial_scripts/vm_regression/test_vm_serial.py index 5aa367dca..fd5d79d9b 100644 --- a/serial_scripts/vm_regression/test_vm_serial.py +++ b/serial_scripts/vm_regression/test_vm_serial.py @@ -16,7 +16,10 @@ from common import isolated_creds import inspect from tcutils.util import skip_because +from tcutils.tcpdump_utils import start_tcpdump_for_intf,\ + stop_tcpdump_for_intf, verify_tcpdump_count import test +from tcutils.contrail_status_check import ContrailStatusChecker class TestBasicVMVN0(BaseVnVmTest): @@ -184,12 +187,16 @@ def test_ipam_persistence_across_restart_reboots(self): for bgp_ip in self.inputs.bgp_ips: self.inputs.restart_service('contrail-control',[bgp_ip]) pass - sleep(30) + cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable() + assert cluster_status, 'Cluster is not stable after restart' self.logger.info('Will check if the ipam persists and ping b/w VMs is still successful') - assert ipam_obj.verify_on_setup() - assert vm1_fixture.ping_to_ip( vm2_fixture.vm_ip ) + + msg = 'VM verification failed after process restarts' + assert vm1_fixture.verify_on_setup(), msg + assert vm2_fixture.verify_on_setup(), msg + assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip) return True @preposttest_wrapper @@ -308,7 +315,7 @@ def test_nova_com_sch_restart_with_multiple_vn_vm(self): create_multiple_vn_and_multiple_vm_fixture( connections=self.connections, vn_name=vn_name, vm_name=vm1_name, inputs=self.inputs, project_name=self.inputs.project_name, - subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1, image_name='cirros-0.3.0-x86_64-uec', + subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1, image_name='cirros', flavor='m1.tiny')) time.sleep(100) try: @@ -385,13 +392,13 @@ def test_process_restart_in_policy_between_vns(self): policy_name=policy2_name, rules_list=rev_rules, inputs=self.inputs, connections=self.connections)) - vn1_fixture = self.create_vn(vn1_name, vn1_subnets,option = 'api') + vn1_fixture = self.create_vn(vn1_name, vn1_subnets, option='contrail') assert vn1_fixture.verify_on_setup() vn1_fixture.bind_policies( [policy1_fixture.policy_fq_name], vn1_fixture.vn_id) self.addCleanup(vn1_fixture.unbind_policies, vn1_fixture.vn_id, [policy1_fixture.policy_fq_name]) - vn2_fixture = self.create_vn(vn2_name, vn2_subnets, option = 'api') + vn2_fixture = self.create_vn(vn2_name, vn2_subnets, option='contrail') assert vn2_fixture.verify_on_setup() vn2_fixture.bind_policies( [policy2_fixture.policy_fq_name], vn2_fixture.vn_id) @@ -428,7 +435,7 @@ def test_process_restart_in_policy_between_vns(self): vn3_name = get_random_name('vn3') vn3_subnets = ["192.168.4.0/24"] - vn3_fixture = self.create_vn(vn3_name, vn3_subnets,option = 'api') + vn3_fixture = self.create_vn(vn3_name, vn3_subnets, option='contrail') assert vn1_fixture.verify_on_setup() vm3_fixture = self.create_vm(vn1_fixture, vn1_vm2_name) @@ -465,7 +472,7 @@ def test_process_restart_with_multiple_vn_vm(self): create_multiple_vn_and_multiple_vm_fixture( connections=self.connections, vn_name=vn_name, vm_name=vm1_name, inputs=self.inputs, project_name=self.inputs.project_name, - subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1, image_name='cirros-0.3.0-x86_64-uec', + subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1, image_name='cirros', flavor='m1.tiny')) time.sleep(100) try: @@ -661,6 +668,8 @@ def test_control_node_switchover(self): 'With Peer %s peering is not Established. Current State %s ' % (entry['peer'], entry['state'])) + assert vm1_fixture.verify_on_setup(), 'VM Verification failed' + assert vm2_fixture.verify_on_setup(), 'VM Verification failed' # Check the ping self.logger.info('Checking the ping between the VM again') assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip) @@ -805,7 +814,7 @@ def test_max_vm_flows(self): sleep(3) # 4. Poll live traffic & verify VM flow count - flow_cmd = 'flow -l | grep %s -A1 |' % vm1_fixture.vm_ip + flow_cmd = 'flow -l | grep %s -A2 |' % vm1_fixture.vm_ip flow_cmd = flow_cmd + ' grep "Action" | grep -v "Action:D(FlowLim)" | wc -l' sample_time = 2 vm_flow_list=[] @@ -824,7 +833,7 @@ def test_max_vm_flows(self): % sample_time) vm_flow_list.sort(reverse=True) - if vm_flow_list[0] > int(1.1*vm_flow_limit): + if vm_flow_list[0] > int(1.4*vm_flow_limit): self.logger.error("TEST FAILED.") self.logger.error("VM flow count seen is greater than configured.") result = False @@ -851,4 +860,94 @@ def test_max_vm_flows(self): return result # end test_max_vm_flows + @test.attr(type=['sanity']) + @preposttest_wrapper + def test_underlay_broadcast_traffic_handling(self): + ''' Test the underlay brocast traffic handling by vrouter. (Bug-1545229). + 1. Send broadcast traffic from one compute node. + 2. Other compute in same subnet should receive that traffic. + 3. Receiving compute should treat this traffic as underlay. + 4. Compute should not replicate the packet and send the copy back. + Pass criteria: Step 3-4 should pass + Maintainer : chhandak@juniper.net + ''' + if (len(self.inputs.compute_ips) < 2): + raise self.skipTest( + "Skipping Test. At least 2 compute node required to run the test") + result = True + + # Find ignore brocast exiting value + ignore_broadcasts={} + cmd='cat /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts' + for item in self.inputs.compute_ips: + ignore_broadcasts[item]=self.inputs.run_cmd_on_server( + item, cmd, + self.inputs.host_data[item]['username'], + self.inputs.host_data[item]['password']) + + # Set ignore brocast to false + cmd='echo "0" > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts' + for item in self.inputs.compute_ips: + self.inputs.run_cmd_on_server( + item, cmd, + self.inputs.host_data[item]['username'], + self.inputs.host_data[item]['password']) + + # Find the Brocast address from first compute + cmd='ifconfig | grep %s' %(self.inputs.host_data[item]['host_control_ip']) + output=self.inputs.run_cmd_on_server( + item, cmd, + self.inputs.host_data[item]['username'], + self.inputs.host_data[item]['password']) + broadcast_address=output.split(" ")[3].split(":")[1] + + # Start tcpdump on receiving compute + inspect_h = self.agent_inspect[self.inputs.compute_ips[1]] + comp_intf = inspect_h.get_vna_interface_by_type('eth') + if len(comp_intf) == 1: + comp_intf = comp_intf[0] + self.logger.info('Agent interface name: %s' % comp_intf) + compute_ip = self.inputs.compute_ips[1] + compute_user = self.inputs.host_data[self.inputs.compute_ips[1]]['username'] + compute_password = self.inputs.host_data[self.inputs.compute_ips[1]]['password'] + filters = "host %s" %(broadcast_address) + + (session, pcap) = start_tcpdump_for_intf(compute_ip, compute_user, + compute_password, comp_intf, filters, self.logger) + + sleep(5) + + # Ping broadcast address + self.logger.info( + 'Pinging broacast address %s from compute %s' %(broadcast_address,\ + self.inputs.host_data[self.inputs.compute_ips[0]]['host_control_ip'])) + packet_count = 10 + cmd='ping -c %s -b %s' %(packet_count, broadcast_address) + output=self.inputs.run_cmd_on_server( + self.inputs.compute_ips[0], cmd, + self.inputs.host_data[item]['username'], + self.inputs.host_data[item]['password']) + sleep(5) + + # Stop tcpdump + stop_tcpdump_for_intf(session, pcap, self.logger) + + # Set back the ignore_broadcasts to original value + for item in self.inputs.compute_ips: + cmd='echo "%s" > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts' %(ignore_broadcasts[item]) + self.inputs.run_cmd_on_server( + item, cmd, + self.inputs.host_data[item]['username'], + self.inputs.host_data[item]['password']) + + # Analyze pcap + assert verify_tcpdump_count(self, session, pcap, exp_count=packet_count), "There should only be %s\ + packet from source %s on compute %s" %(packet_count, broadcast_address, compute_ip) + self.logger.info( + 'Packet count matched: Compute %s has receive only %s packet from source IP %s.\ + No duplicate packet seen' %(compute_ip, packet_count, broadcast_address)) + return result + + # end test_underlay_brodcast_traffic_handling + # end TestBasicVMVN0 diff --git a/serial_scripts/flow_tests/__init__.py b/serial_scripts/vrouter/__init__.py similarity index 100% rename from serial_scripts/flow_tests/__init__.py rename to serial_scripts/vrouter/__init__.py diff --git a/serial_scripts/vrouter/test_fat_flow_serial.py b/serial_scripts/vrouter/test_fat_flow_serial.py new file mode 100644 index 000000000..dda65586a --- /dev/null +++ b/serial_scripts/vrouter/test_fat_flow_serial.py @@ -0,0 +1,232 @@ +from tcutils.wrappers import preposttest_wrapper +from common.vrouter.base import BaseVrouterTest +import test +from common.servicechain.config import ConfigSvcChain +from common.servicechain.verify import VerifySvcChain +from tcutils.util import get_random_name +import random + +AF_TEST = 'v6' + +class FatFlowSerial(BaseVrouterTest, ConfigSvcChain, VerifySvcChain): + + @classmethod + def setUpClass(cls): + super(FatFlowSerial, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(FatFlowSerial, cls).tearDownClass() + + @preposttest_wrapper + def test_fat_flow_inter_vn_inter_node(self): + """ + Description: Verify Fat flow for inter-VN inter-Node traffic and with protocol based flow aging set + Steps: + 1. launch 2 VN and launch 2 client VMs on same node and server VM from other VN on different node. + 2. on server VM, config Fat flow for udp port 53. + 3. add flow aging for udp port 53 as 80 sec. + 4. from both client VM, send UDP traffic to server on port 53 twice with diff. src ports + Pass criteria: + 1. on client VMs compute, 4 set of flows and on server compute, 2 set of flows should be created + 2. on server compute, flow's source port should be 0 for Fat flow + 3. flow should be deleted after 60 sec + """ + vn_fixtures = self.create_vns(count=2, rt_number='10000') + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + vn2_fixture = vn_fixtures[1] + + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case," + "this test needs atleast 2 compute nodes") + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2, + node_name=compute_hosts[0]) + server_fixtures = self.create_vms(vn_fixture= vn2_fixture,count=1, + node_name=compute_hosts[1]) + self.verify_vms(client_fixtures) + self.verify_vms(server_fixtures) + + #Configure Fat flow on server VM + proto = 'udp' + port = 53 + server_vmi_id = server_fixtures[0].get_vmi_ids().values() + fat_flow_config = {'proto':proto,'port':port} + self.add_fat_flow_to_vmis(server_vmi_id, fat_flow_config) + + #Set udp aging timeout to 80 sec + flow_timeout = 80 + self.add_proto_based_flow_aging_time(proto, port, flow_timeout) + + self.verify_fat_flow_with_traffic(client_fixtures,server_fixtures[0], + proto, port) + + self.logger.info("Verifying if Fat flow gets " + "deleted after aging timeout, sleeping for %s seconds" % ( + flow_timeout)) + self.sleep(flow_timeout) + self.verify_fat_flow_with_traffic(client_fixtures, + server_fixtures[0], + proto, port, + traffic=False, expected_flow_count=0, + fat_flow_count=0) + + self.logger.info("Fat flow got deleted after aging timeout as expected") + + @preposttest_wrapper + def test_add_delete_fat_flow_config(self): + """ + Description: Verify adding and deleting Fat flow config, and verify flow after config deletion + Steps: + 1. launch 1 VN and launch 3 VMs in it.client VMs on same node and server VM on different node. + 2. on server VM, config Fat flow for udp port 53. + 3. from both client VM, send UDP traffic to server on port 53 twice with diff. src ports + 4. delete the Fat flow config and verify the flow again + Pass criteria: + 1. when Fat config is added, Fat flow should be created + 2. when Fat config is deleted, Fat flow should be not be created + """ + vn_fixtures = self.create_vns(count=1) + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case," + "this test needs atleast 2 compute nodes") + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2, + node_name=compute_hosts[0]) + server_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=1, + node_name=compute_hosts[1]) + self.verify_vms(client_fixtures) + self.verify_vms(server_fixtures) + + #Configure Fat flow on server VM + proto = 'udp' + port = 53 + server_vmi_id = server_fixtures[0].get_vmi_ids().values() + fat_flow_config = {'proto':proto,'port':port} + self.add_fat_flow_to_vmis(server_vmi_id, fat_flow_config) + + self.verify_fat_flow_with_traffic(client_fixtures,server_fixtures[0], + proto, port) + + self.remove_fat_flow_on_vmis(server_vmi_id, fat_flow_config) + self.delete_all_flows_on_vms_compute(server_fixtures + client_fixtures) + self.verify_fat_flow_with_traffic(client_fixtures,server_fixtures[0], + proto, port, fat_flow_count=0) + + + @preposttest_wrapper + def test_fat_flow_with_service_chain(self): + """ + Description: Verify Fat flow with service chain + Steps: + 1. launch 2 VN and launch 2 client VMs on same node and server VM from other VN on different node. + 2. on server VM, config Fat flow for tcp port dport 10000. + 3. add flow aging for tcp port dport as 100 sec. + 4. create service instance, create policy and attach to both the VNs + 5. from both client VM, send TCP traffic to server on port dport twice with diff. src ports + Pass criteria: + 1. on server compute, Fat flows should be created + 2. Fat flow should be deleted after 60 sec + """ + compute_hosts = self.orch.get_hosts() + if len(compute_hosts) < 2: + raise self.skipTest("Skipping test case," + "this test needs atleast 2 compute nodes") + + vn_fixtures = self.create_vns(count=3) + self.verify_vns(vn_fixtures) + vn1_fixture = vn_fixtures[0] + vn2_fixture = vn_fixtures[1] + vn_mgmt = vn_fixtures[2] + + image = 'ubuntu' + client_fixtures = self.create_vms(vn_fixture= vn1_fixture,count=2, + node_name=compute_hosts[0], image_name=image) + server_fixtures = self.create_vms(vn_fixture= vn2_fixture,count=1, + node_name=compute_hosts[1], image_name=image) + + st_name = get_random_name("in_net_svc_template_1") + si_prefix = get_random_name("in_net_svc_instance") + "_" + policy_name = get_random_name("policy_in_network") + si_count = 1 + svc_mode = 'in-network' + + st_fixture, si_fixtures = self.config_st_si( + st_name, si_prefix, si_count, + mgmt_vn=vn_mgmt.vn_fq_name, + left_vn=vn1_fixture.vn_fq_name, + right_vn=vn2_fixture.vn_fq_name, svc_mode=svc_mode, + svc_img_name='vsrx', + project=self.inputs.project_name, st_version=1) + action_list = self.chain_si( + si_count, si_prefix, self.inputs.project_name) + rules = [ + { + 'direction': '<>', + 'protocol': 'any', + 'source_network': vn1_fixture.vn_fq_name, + 'src_ports': [0, -1], + 'dest_network': vn2_fixture.vn_fq_name, + 'dst_ports': [0, -1], + 'simple_action': None, + 'action_list': {'apply_service': action_list} + }, + ] + policy_fixture = self.config_policy(policy_name, rules) + vn1_policy_fix = self.attach_policy_to_vn( + policy_fixture, vn1_fixture) + vn2_policy_fix = self.attach_policy_to_vn( + policy_fixture, vn2_fixture) + + self.verify_vms(client_fixtures) + self.verify_vms(server_fixtures) + + proto = 'tcp' + dport = 10000 + baseport = random.randint(12000, 65000) + sport = [str(baseport), str(baseport+1)] + + #Configure Fat flow on server VM + server_vmi_id = server_fixtures[0].get_vmi_ids().values() + fat_flow_config = {'proto':proto,'port':dport} + self.add_fat_flow_to_vmis(server_vmi_id, fat_flow_config) + + #Set udp aging timeout to 100 sec + flow_timeout = 100 + self.add_proto_based_flow_aging_time(proto, dport, flow_timeout) + + #Start the tcp traffic + for vm in client_fixtures: + for port in sport: + assert self.send_nc_traffic(vm, server_fixtures[0], + port, dport, proto) + + #FAT flow verification + assert self.verify_fat_flow(client_fixtures, server_fixtures[0], + proto, dport, fat_flow_count=1) + + self.logger.info("Verifying if Fat flow gets " + "deleted after aging timeout, sleeping for %s seconds" % ( + flow_timeout)) + self.sleep(flow_timeout) + + assert self.verify_fat_flow(client_fixtures, server_fixtures[0], + proto, dport, fat_flow_count=0) + + self.logger.info("Fat flow got deleted after aging timeout as expected") + +class FatFlowSerialIpv6(FatFlowSerial): + @classmethod + def setUpClass(cls): + super(FatFlowSerialIpv6, cls).setUpClass() + cls.inputs.set_af(AF_TEST) + + def is_test_applicable(self): + if self.inputs.orchestrator == 'vcenter' and not self.orch.is_feature_supported('ipv6'): + return(False, 'Skipping IPv6 Test on vcenter setup') + return (True, None) + diff --git a/serial_scripts/vrouter/test_flow_scenarios.py b/serial_scripts/vrouter/test_flow_scenarios.py new file mode 100644 index 000000000..a0a3c80ca --- /dev/null +++ b/serial_scripts/vrouter/test_flow_scenarios.py @@ -0,0 +1,355 @@ +from string import Template +import time + + +from common.neutron.base import * +from tcutils.wrappers import preposttest_wrapper +from tcutils.util import skip_because +from tcutils.traffic_utils.hping_traffic import Hping3 +from compute_node_test import ComputeNodeFixture +from common.agent.flow_table import FlowTable +from tcutils.traffic_utils.base_traffic import BaseTraffic + + +class ExtendedFlowTestsBase(BaseNeutronTest): + + @classmethod + def setUpClass(cls): + super(ExtendedFlowTestsBase, cls).setUpClass() + cls.vnc_api_h = cls.vnc_lib + # end setUpClass + + @classmethod + def tearDownClass(cls): + super(ExtendedFlowTestsBase, cls).tearDownClass() + # end tearDownClass + + def check_flow_is_evicted(self, compute_fixture, flow_entry_obj): + ''' flow_entry_obj : FlowEntry object + ''' + (f_entry, r_flow_entry) = compute_fixture.get_flow_entry( + index=flow_entry_obj.index) + assert f_entry is None, ("TCP flow is not evicted ", + "after it is closed. Flow details: %s" % (f_entry.items)) + assert r_flow_entry is None, ("TCP flow is not evicted ", + "after it is closed. Flow details: %s" % (r_flow_entry.items)) + self.logger.info('TCP flow %s is evicted after TCP close' % ( + flow_entry_obj.index)) + self.logger.info('TCP flow %s is evicted after TCP close' % ( + flow_entry_obj.r_flow_index)) + # check_flow_is_evicted + + +class SimpleTCPFlowEvictionTests(ExtendedFlowTestsBase): + + ''' Use cirros vms to do simple tcp flow eviction tests + ''' + @classmethod + def setUpClass(cls): + super(SimpleTCPFlowEvictionTests, cls).setUpClass() + cls.vnc_api_h = cls.vnc_lib + # end setUpClass + + @classmethod + def tearDownClass(cls): + super(SimpleTCPFlowEvictionTests, cls).tearDownClass() + # end tearDownClass + + @preposttest_wrapper + def test_flow_entry_after_tcp_session(self): + ''' + Check TCP flow eviction on a regular teardown + + Do a netcat based file tcp transfer and check flows get evicted later + Repeat this 3 times + ''' + sport = '10001' + dport = '10000' + filesize = '10000' + self.vn1_fixture = self.create_vn() + self.vn2_fixture = self.create_vn() + self.vn1_vm1_fixture = self.create_vm(self.vn1_fixture, + image_name='cirros') + self.vn1_vm2_fixture = self.create_vm(self.vn1_fixture, + image_name='cirros') + self.vn1_vm1_fixture.wait_till_vm_is_up() + self.vn1_vm2_fixture.wait_till_vm_is_up() + self.vn1_vm1_vrouter_fixture = self.useFixture(ComputeNodeFixture( + self.connections, + self.vn1_vm1_fixture.vm_node_ip)) + self.vn1_vm2_vrouter_fixture = self.useFixture(ComputeNodeFixture( + self.connections, + self.vn1_vm2_fixture.vm_node_ip)) + + for i in range(0, 3): + self.logger.debug('Iteration : %s' % (i)) + # Do file transfer + result = self.vn1_vm1_fixture.cirros_nc_file_transfer( + self.vn1_vm2_fixture, + size=filesize, + local_port=sport, + remote_port=dport) + assert result, "File transfer between cirros vms itself failed!" + + # Check on source and dest computes that the flow is evicted + for compute in [self.vn1_vm1_vrouter_fixture, + self.vn1_vm2_vrouter_fixture]: + (flow_entry, rev_flow) = compute.get_flow_entry( + source_ip=self.vn1_vm1_fixture.vm_ip, + dest_ip=self.vn1_vm2_fixture.vm_ip, + proto='tcp', + source_port=sport, + dest_port=dport, + vrf_id=compute.get_vrf_id(self.vn1_fixture.vn_fq_name) + ) + assert flow_entry is None, ('Flow not evicted ater tcp close.', + ' Flow : %s' % (flow_entry.dump)) + assert rev_flow is None, ('Flow not evicted ater tcp close.', + ' Flow : %s' % (flow_entry.dump)) + self.logger.info('TCP flow is evicted after tcp session close') + # end for + # end for + # end test_flow_entry_after_tcp_session + +# end class SimpleTCPFlowEvictionTests + + +class TCPFlowEvictionTests(ExtendedFlowTestsBase): + + @classmethod + def setUpClass(cls): + super(TCPFlowEvictionTests, cls).setUpClass() + cls.vnc_api_h = cls.vnc_lib + # end setUpClass + + @classmethod + def tearDownClass(cls): + super(TCPFlowEvictionTests, cls).tearDownClass() + # end tearDownClass + + def setUp(self): + super(TCPFlowEvictionTests, self).setUp() + self.vn1_fixture = self.create_vn() + self.vn2_fixture = self.create_vn() + self.vn1_vm1_fixture = self.create_vm(self.vn1_fixture) + self.vn1_vm2_fixture = self.create_vm(self.vn1_fixture) + self.vn2_vm1_fixture = self.create_vm(self.vn2_fixture) + self.vn1_vm1_fixture.wait_till_vm_is_up() + self.vn1_vm2_fixture.wait_till_vm_is_up() + self.vn2_vm1_fixture.wait_till_vm_is_up() + + self.vn1_vm1_vrouter_fixture = self.useFixture(ComputeNodeFixture( + self.connections, + self.vn1_vm1_fixture.vm_node_ip)) + self.vn1_vm2_vrouter_fixture = self.useFixture(ComputeNodeFixture( + self.connections, + self.vn1_vm2_fixture.vm_node_ip)) + self.vn2_vm1_vrouter_fixture = self.useFixture(ComputeNodeFixture( + self.connections, + self.vn2_vm1_fixture.vm_node_ip)) + # end setUp + + @preposttest_wrapper + def test_flow_on_normal_tcp_close(self): + ''' + Check TCP flow eviction on a regular four-way teardown + Start a TCP session between vn1_vm1 and vn1_vm2 + Check if the flow is active + After the TCP session is closed, + Check that no matching flow exists after it is done + Validate that the flow is marked inactive + + Repeat this 3 times + ''' + sport = 10000 + dport = 11000 + self.vn1_vm1_fixture.wait_till_vm_is_up() + self.vn1_vm2_fixture.wait_till_vm_is_up() + + f_flow_index = None + r_flow_index = None + + for i in range(0, 3): + traffic_obj = BaseTraffic.factory(proto='tcp') + traffic_obj.start( + self.vn1_vm1_fixture, self.vn1_vm2_fixture, 'tcp', + sport, dport) + time.sleep(3) + flow_table = self.vn1_vm1_vrouter_fixture.get_flow_table( + show_evicted=True) + (flow_entry, junk) = self.vn1_vm1_vrouter_fixture.get_flow_entry( + flow_table=flow_table, + source_ip=self.vn1_vm1_fixture.vm_ip, + dest_ip=self.vn1_vm2_fixture.vm_ip, + proto='tcp', + source_port=sport, + dest_port=dport, + vrf_id=self.vn1_vm1_vrouter_fixture.get_vrf_id( + self.vn1_fixture.vn_fq_name) + ) + if not f_flow_index: + f_flow_index = flow_entry.index + r_flow_index = flow_entry.r_flow_index + + assert not flow_entry.is_flow_evicted( + ), ("TCP flow shown as evicted", + " on an existing TCP session: %s" % (flow_entry.items)) + assert f_flow_index == flow_entry.index, ("Flow table not same on", + " a new TCP session with same 5-tuple. Expected: %s, Got %s" % ( + flow_entry.index, f_flow_index)) + assert r_flow_index == flow_entry.r_flow_index, ( + "Rev flow not same", + " on new TCP session with same 5-tuple. Expected: %s, Got %s" % ( + flow_entry.index, f_flow_index)) + (sent, recv) = traffic_obj.stop() + # Wait for atleast 15 secs for agent to evict the flow + time.sleep(15) + self.check_flow_is_evicted( + self.vn1_vm1_vrouter_fixture, flow_entry) + assert flow_entry.packets > recv, ("Unexpected Flow pkt count", + "Expected: >%s, Seen: %s" % (recv, flow_entry.packets)) + # end for + # end test_flow_on_normal_tcp_close + + @preposttest_wrapper + def test_flow_eviction_on_tcp_rst(self): + ''' + Check TCP flow eviction TCP session gets closed due to a TCP RST + ''' + sport = 10000 + self.vn1_vm1_fixture.wait_till_vm_is_up() + self.vn1_vm2_fixture.wait_till_vm_is_up() + # Unassigned dest port. TCP session to this port should end with RST + dport = 26 + traffic_obj = BaseTraffic.factory(proto='tcp') + traffic_obj.start(self.vn1_vm1_fixture, self.vn1_vm2_fixture, 'tcp', + sport, dport) + + traffic_obj.stop() + time.sleep(15) + flow_table = self.vn1_vm1_vrouter_fixture.get_flow_table(show_evicted=True) + (flow_entry, junk) = self.vn1_vm1_vrouter_fixture.get_flow_entry( + flow_table=flow_table, + source_ip=self.vn1_vm1_fixture.vm_ip, + dest_ip=self.vn1_vm2_fixture.vm_ip, + proto='tcp', + source_port=sport, + dest_port=dport, + vrf_id=self.vn1_vm1_vrouter_fixture.get_vrf_id( + self.vn1_fixture.vn_fq_name)) + + assert flow_entry is None, ('Flow not evicted ater tcp close. Flow: ', + '%s' % (flow_entry.dump)) + self.logger.info('TCP flow is evicted after a TCP RST') + # end test_flow_eviction_on_tcp_rst + + @preposttest_wrapper + def test_hping3_tcp_traffic_for_eviction(self): + ''' + Between two VMs, have a large number of hping3-based tcp sessions + which are setup and teared down quickly. + Validate that hping3 does not report any loss and the corresponding + flows are evicted + Repeat this a few times so that the same flow indices are used + ''' + count = 1000 + # Set flow table size to 2M + self.vn1_vm1_vrouter_fixture.setup_vrouter_module_params( + {'vr_flow_entries': str(2 * 1024 * 1024)}) + self.vn1_vm1_fixture.wait_till_vm_is_up() + self.vn1_vm2_fixture.wait_till_vm_is_up() + + destport = '22' + baseport = '1000' + interval = 'u1000' + # Create flows using hping + hping_h = Hping3(self.vn1_vm1_fixture, + self.vn1_vm2_fixture.vm_ip, + syn=True, + destport=destport, + baseport=baseport, + count=count, + interval=interval) + for i in range(0, 5): + self.logger.info('Iteration : %s' % (i)) + hping_h.start(wait=True) + (stats, hping_log) = hping_h.stop() + self.logger.debug('Hping3 log : %s' % (hping_log)) + assert stats['loss'] == '0', ('Some loss seen in hping3 session' + 'Stats : %s, Check logs..' % (stats)) + self.logger.info('No packet loss seen with hping traffic') + # Agent does aggressive tcp flow eviction in 15s + time.sleep(30) + + # Check if these flows got evicted + flow_table = self.vn1_vm1_vrouter_fixture.get_flow_table() + (ff_count, rf_count) = self.vn1_vm1_vrouter_fixture.get_flow_count( + flow_table=flow_table, + source_ip=self.vn1_vm1_fixture.vm_ip, + dest_ip=self.vn1_vm2_fixture.vm_ip, + proto='tcp', + dest_port='22', + vrf_id=self.vn1_vm1_vrouter_fixture.get_vrf_id( + self.vn1_fixture.vn_fq_name) + ) + if ff_count or rf_count: + self.logger.debug('Flow table : %s' % (flow_table.get_as_table)) + assert ff_count == 0, 'One or more flows not evicted yet. Check logs' + assert rf_count == 0, 'One or more flows not evicted yet. Check logs' + self.logger.info('Validated that all hping flows got evicted') + # end for + # end test_hping3_tcp_traffic_for_eviction + + +class ExtendedFlowTests(ExtendedFlowTestsBase): + + @classmethod + def setUpClass(cls): + super(ExtendedFlowTests, cls).setUpClass() + cls.vnc_api_h = cls.vnc_lib + # end setUpClass + + @classmethod + def tearDownClass(cls): + super(ExtendedFlowTests, cls).tearDownClass() + # end tearDownClass + + @preposttest_wrapper + #@skip_because(bug='1530034') + def test_with_fuzz_bug_1504710(self): + ''' + This test makes sure that the vrouter doesnt misbehave + with various IP protocols + This testcase can fail due to bug 1530034 + But the test is enabled to make sure that vrouter does not crash + ''' + + # TODO + # Unable to figure out what scapy profile can fuzz + # packets. Currently use raw scapy code itself + python_code = Template(''' +from scapy.all import * +a=fuzz(IP(dst='$dest_ip')/Raw(RandString(size=300))) +send(a, count=10000, inter=0, iface='eth0') +''') + + vn_fixture = self.create_vn() + vm1_fixture = self.create_vm(vn_fixture) + vm2_fixture = self.create_vm(vn_fixture) + vm1_fixture.wait_till_vm_is_up() + vm2_fixture.wait_till_vm_is_up() + compute_ips = list(set([vm1_fixture.vm_node_ip, + vm2_fixture.vm_node_ip])) + python_code = python_code.substitute(dest_ip=vm2_fixture.vm_ip) + vm1_fixture.run_python_code(python_code) + # Below steps does not really make the ping work consistently + # Keep the code as is for now + #self.logger.info('Sometimes pkts from/to VM get stuck,' + # 'Restarting agents as a workaround(Bug 1530034)') + #self.inputs.restart_service('contrail-vrouter-agent', compute_ips) + #status = Constatuscheck(self.inputs) + #status.wait_till_contrail_cluster_stable(compute_ips) + + # Now validate that later pings between vms work + self.do_ping_test(vm1_fixture, vm1_fixture.vm_ip, vm2_fixture.vm_ip) + # end test_with_fuzz_bug_1504710 diff --git a/serial_scripts/webui/base.py b/serial_scripts/webui/base.py index 3c4e310bd..a28c62221 100644 --- a/serial_scripts/webui/base.py +++ b/serial_scripts/webui/base.py @@ -1,4 +1,4 @@ -import test +import test_v1 from common import isolated_creds from vn_test import * from vm_test import * @@ -21,18 +21,11 @@ import random -class WebuiBaseTest(test.BaseTestCase): +class WebuiBaseTest(test_v1.BaseTestCase_v1): @classmethod def setUpClass(cls): super(WebuiBaseTest, cls).setUpClass() - cls.isolated_creds = isolated_creds.IsolatedCreds( - cls.__name__, - cls.inputs, - ini_file=cls.ini_file, - logger=cls.logger) - cls.inputs = cls.isolated_creds.get_admin_inputs() - cls.connections = cls.isolated_creds.get_admin_connections() cls.quantum_h = cls.connections.quantum_h cls.nova_h = cls.connections.nova_h cls.vnc_lib = cls.connections.vnc_lib @@ -54,7 +47,6 @@ def setUpClass(cls): def tearDownClass(cls): cls.UicleanUp() cls.res.cleanUp() - # cls.isolated_creds.delete_tenant() super(WebuiBaseTest, cls).tearDownClass() # end tearDownClass diff --git a/serial_scripts/webui/test_webui.py b/serial_scripts/webui/test_webui.py index 8a5578596..3d7dbb8ad 100644 --- a/serial_scripts/webui/test_webui.py +++ b/serial_scripts/webui/test_webui.py @@ -13,7 +13,10 @@ import test from tcutils.wrappers import preposttest_wrapper import base - +from webui_topology import * +topo = sdn_webui_config() +global count +count = 1 class WebuiTestSanity(base.WebuiBaseTest): @@ -283,4 +286,958 @@ def test_verify_monitor_networking_instance_advance_details(self): return True # end test_instance_advance_details + @preposttest_wrapper + def test3_1_edit_net_without_change(self): + '''Test to edit the existing network without changing anything + 1. Go to Configure->Networking->Networks. Then select any of the vn and + click the edit button + 2. Click the save button without changing anything + 3. Check the UUID in UI page and API and OPS + + Pass Criteria: UUID shouldn't be changed after editing + ''' + result = True + opt_list = [] + self.webui.logger.debug("Step 1 : Get the uuid before editing") + uuid = self.webui_common.get_vn_detail_ui('UUID') + vn_name = self.webui_common.get_vn_detail_ui('Display Name') + self.webui.logger.debug("UUID before editing " + uuid) + self.webui.logger.debug("Step 2 : Verify WebUI before editing") + if not self.webui.verify_vn_after_edit_ui('UUID', uuid, opt_list): + self.webui.logger.debug("Virtual networks config data verification in WebUI failed") + result = result and False + self.webui.logger.debug("Step 3 : Verify API server before editing") + if not self.webui.verify_vn_after_edit_api('UUID', uuid, uuid, opt_list): + self.webui.logger.debug("Virtual networks config data verification in API failed") + result = result and False + self.webui.logger.debug("Step 4 : Verify OPS server before editing") + if not self.webui.verify_vn_after_edit_ops('UUID', vn_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Edit the VN without changing anything") + if not self.webui_common.edit_vn_without_change(): + self.webui.logger.debug('Editing Network failed') + result = result and False + self.webui.logger.debug("Step 6 : Verify WebUI server after editing") + if not self.webui.verify_vn_after_edit_ui('UUID', uuid, opt_list): + self.webui.logger.debug("Virtual networks config data verification in UI failed") + result = result and False + self.webui.logger.debug("Step 7 : Verify API server after editing") + if not self.webui.verify_vn_after_edit_api('UUID', uuid, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 8 : Verify OPS server after editing") + if not self.webui.verify_vn_after_edit_ops('UUID', vn_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + return result + #end test_edit_vn_witout_change + + @preposttest_wrapper + def test3_2_edit_net_disp_name_change(self): + ''' Test to edit the existing network by changing VN display name + 1. Go to Configure->Networking->Networks. Then select any of the vn and + click the edit button + 2. Change the Display name and click the save button + 3. Check that new display name got reflected in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + self.webui.logger.debug("Step 1 : Get the display name of the VN before editing") + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + opt_list = [topo.vn_disp_name] + result = True + if self.vn_disp_name: + self.webui.logger.debug("Getting VN display name is successful and \ + the VN name is %s" %(self.vn_disp_name)) + self.webui.logger.debug("Step 2 : Editing the VN by the name") + if not self.webui_common.edit_vn_disp_name_change(topo.vn_disp_name): + self.webui.logger.debug("Editing network failed") + result = result and False + self.webui.logger.debug("Step 3 : Verify WebUI server after editing") + if not self.webui.verify_vn_after_edit_ui('Display Name', \ + topo.vn_disp_name, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify API server after editing") + if not self.webui.verify_vn_after_edit_api('Display Name', topo.vn_disp_name, \ + uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 5 : Verify OPS server after editing") + if not self.webui.verify_vn_after_edit_ops('Display Name', \ + self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 6 : Editing the VN with the previous vn name") + if not self.webui_common.edit_vn_disp_name_change(self.vn_disp_name): + self.webui.logger.debug('Editing Network failed') + result = result and False + opt_list = [self.vn_disp_name] + self.webui.logger.debug("Step 7 : Verify WebUI after editing with previous vn name") + if not self.webui.verify_vn_after_edit_ui('Display Name', self.vn_disp_name, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 8 : Verifying the VN after editing \ + previous vn name in API") + if not self.webui.verify_vn_after_edit_api('Display Name', self.vn_disp_name, \ + uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 9 : Verify OPS server after editing with previous name") + if not self.webui.verify_vn_after_edit_ops('Display Name', \ + self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + else: + self.webui.logger.error("Not able to get the display name. \ + So Editing Vn is not possible") + result = result and False + return result + #end test_edit_vn_witout_change + + @preposttest_wrapper + def test3_3_edit_net_disp_name_change_with_spl_char(self): + ''' Test to edit the existing network by changing VN display name with special character + 1. Go to Configure->Networking->Networks. Then select any of the vn and + click the edit button + 2. Change the Display name with special character and click the save button + 3. Check that new display name got reflected in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + opt_list = [topo.vn_disp_name_spl_char_ops] + result = True + self.webui.logger.debug("Step 1 : Get the display name of the VN before editing") + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + if self.vn_disp_name: + self.webui.logger.debug("Getting VN display name is successful \ + and the VN name is %s" %(self.vn_disp_name)) + self.webui.logger.debug("Step 2 : Editing the VN by the name with special characters") + if not self.webui_common.edit_vn_disp_name_change(topo.vn_disp_name_spl_char): + self.webui.logger.debug('Editing Network failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify WebUI server after editing") + if not self.webui.verify_vn_after_edit_ui('Display Name', topo.vn_disp_name_spl_char, \ + opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify API server after editing") + if not self.webui.verify_vn_after_edit_api('Display Name', \ + topo.vn_disp_name_spl_char, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 5 : Verify OPS server after editing") + if not self.webui.verify_vn_after_edit_ops('Display Name', self.vn_disp_name, \ + uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 6 : Editing the VN with the previous vn name") + if not self.webui_common.edit_vn_disp_name_change(self.vn_disp_name): + self.webui.logger.debug('Editing Network failed') + result = result and False + self.webui.logger.debug("Step 7 : Verify WebUI after editing with previous vn name") + opt_list = [self.vn_disp_name] + if not self.webui.verify_vn_after_edit_ui('Display Name', self.vn_disp_name, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 8 : Verifying the VN after editing previous \ + vn name in API") + if not self.webui.verify_vn_after_edit_api('Display Name', self.vn_disp_name, \ + uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + self.webui.logger.debug("Step 9 : Verify OPS server after editing") + if not self.webui.verify_vn_after_edit_ops('Display Name', \ + self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + else: + self.webui.logger.error("Not able to get the display name. \ + So Editing Vn is not possible") + result = result and False + return result + #end test_edit_vn_witout_change + + @preposttest_wrapper + def test3_4_edit_net_by_add_policy(self): + ''' Test to edit the existing network by policy + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Attach one policy for the vn and save. + 3. Check that attached policy is there in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + self.webui.logger.debug("Step 1 : Attach policy to the VN") + pol_name = "" + pol_name = self.webui_common.add_vn_with_policy(pol_name) + result = True + opt_list = [pol_name] + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + self.vn_policy = str(self.webui_common.get_vn_detail_ui('Policy')) + self.webui.logger.debug("Step 2 : Verify the VN for the attached policy \ + through WebUI server") + if not self.webui.verify_vn_after_edit_ui('Policy', self.vn_policy, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify the VN for the attached policy through API server") + if not self.webui.verify_vn_after_edit_api("Policy", "Policy", uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for the attached policy through OPS server") + if not self.webui.verify_vn_after_edit_ops('Policy', self.vn_disp_name, \ + self.vn_disp_name, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the policy which is attached") + if not self.webui_common.del_vn_with_policy(pol_name): + self.webui.logger.debug('Editing network with policy failed') + result = result and False + return result + #end test3_4_edit_net_policy + + @preposttest_wrapper + def test3_5_edit_net_by_add_subnet(self): + ''' Test to edit the existing network by subnet + 1. Go to configure->Networking->Networks. Create a new VN + 2. Edit the created VN and add subnet with all options and save + 3. Check that subnet with all options got reflected in WebUI,API and OPS. + 4. Remove the subnet and and add it back with subnet-gate option. + 5. Check the same got updated in WebUI, API and OPs. + Similarly doing for subnet-dns and subnet-dhcp + + Pass Criteria : Step 3,4,5 should pass + ''' + opt_list = [topo.subnet_edit, topo.mask, topo.subnet_sip, topo.subnet_eip, + topo.subnet_dns_ip, topo.subnet_gate_ip, topo.subnet_default_gate_ip] + result = True + if self.webui_common.click_configure_networks(): + add = self.webui_common.find_element("//i[contains(@class,'icon-plus')]", 'xpath') + add.click() + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + self.webui_common.find_element("//input[contains(@name,'display_name')]", \ + 'xpath').send_keys(topo.vn_disp_name) + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + self.webui_common.click_element('configure-networkbtn1') + + self.webui_common.wait_till_ajax_done(self.browser, wait=5) + verify_list = ['Subnet', 'Subnet-gate', 'Subnet-dns', 'Subnet-dhcp'] + for subnet_type in verify_list: + if subnet_type == 'Subnet': + str1 = 'all' + else: + str1 = subnet_type + 'disabled' + self.webui.logger.debug("Step 1 - " + subnet_type + \ + ": Add subnet with " + str1 + "options") + ind = self.webui_common.edit_vn_with_subnet(subnet_type, topo.subnet_edit + \ + "/" + topo.mask, \ + topo.subnet_sip + "-" + \ + topo.subnet_eip, \ + topo.subnet_gate_ip, topo.vn_disp_name) + if not ind: + self.webui.logger.debug('Editing network with subnet failed') + result = result and False + uuid = self.webui_common.get_vn_detail_ui('UUID', index=ind) + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name', index=ind) + subnet = self.webui_common.get_vn_detail_ui('Subnet', index=ind) + self.webui.logger.debug("Step 2 - " + subnet_type + \ + ": Verify the VN for subnet in WebUI") + if not self.webui.verify_vn_after_edit_ui(subnet_type, subnet, opt_list, index=ind): + self.webui.logger.debug('Virtual networks config data \ + verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 - " + subnet_type + \ + ": Verify the VN for subnet in API server") + if not self.webui.verify_vn_after_edit_api(subnet_type, subnet, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification \ + in API failed') + result = result and False + self.webui.logger.debug("Step 4 - " + subnet_type + \ + ": Verify the VN for subnet in OPS server") + if not self.webui.verify_vn_after_edit_ops(subnet_type, self.vn_disp_name, \ + uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification \ + in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the subnet which is added") + if not self.webui_common.del_vn_with_subnet(topo.vn_disp_name): + self.webui.logger.debug('Editing network with subnet failed') + result = result and False + self.webui.logger.debug("Step 6 : Remove the VN which is added") + if not self.webui_common.edit_remove_option("Networks", 'remove', \ + vn_name=topo.vn_disp_name): + self.webui.logger.debug('Editing network with advanced options is failed') + result = result and False + return result + #end test3_5_edit_net_subnet + + @preposttest_wrapper + def test3_6_edit_net_host_opt(self): + ''' Test to edit the existing network by Host routes + 1. Go to Configure->Networking->Networks. + Then select any of the vn and click the edit button + 2. Add Host route with route prefix and next hop and save. + 3. Check that host route is added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + result = True + opt_list = [topo.host_prefix, topo.host_nexthop] + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + self.webui.logger.debug("Step 1 : Add Host Route under VN") + if not self.webui_common.edit_vn_with_host_route('add', 'pos', topo.host_prefix, \ + topo.host_nexthop): + self.webui.logger.debug('Editing network with host routes failed') + result = result and False + host_route = self.webui_common.get_vn_detail_ui('Host Route') + self.webui.logger.debug("Step 2 : Verify the host route in WebUI") + if not self.webui.verify_vn_after_edit_ui('Host Route', host_route, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify the host route in API server") + if not self.webui.verify_vn_after_edit_api('Host Route', host_route, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for host route in OPS server") + if not self.webui.verify_vn_after_edit_ops('Host Route', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the Host Route which is added") + if not self.webui_common.edit_vn_with_host_route('remove', 'pos', \ + topo.host_prefix, topo.host_nexthop): + self.webui.logger.debug('Editing network with host routes failed') + result = result and False + return result + # end test3_6_edit_net_host_opt + + @preposttest_wrapper + def test5_1_edit_net_host_opt_neg(self): + ''' Test to edit the existing network by Invalid Host routes + 1. Go to Configure->Networking->Networks. + Then select any of the vn and click the edit button + 2. Add Host route with invalid route prefix and invalid next hop and save it. + 3. WebUI should throw an error message while saving. + + Pass Criteria : Step 3 should pass + ''' + result = True + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.webui.logger.debug("Step 1 : Add Host Route under VN") + assert self.webui_common.edit_vn_with_host_route('add', 'neg', topo.dns_ip, \ + topo.host_nexthop), \ + 'Editing network with host routes failed \ + as expected for negative scenario' + + # end test5_1_edit_net_host_opt_neg + + @preposttest_wrapper + def test3_7_edit_net_adv_opt(self): + ''' Test to edit the existing network by Advanced Options + 1. Go to Configure->Networking->Networks. + Then select any of the vn and click the edit button + 2. Select all the options under advanced option and save. + 3. Check that all the options under advanced option got reflected in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + result = True + opt_list = [topo.vlan_id, topo.phy_net, topo.subnet_adv_option, topo.vn_disp_name] + self.webui.logger.debug("Step 1 : Add advanced options under VN") + index = self.webui_common.edit_vn_with_adv_option(1, 'pos-phy', opt_list) + if not index: + self.webui.logger.debug('Editing network with advanced options is failed') + result = result and False + uuid = self.webui_common.get_vn_detail_ui('UUID', index=index) + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name', index=index) + adv_option = self.webui_common.get_vn_detail_ui('Adv Option', index=index) + self.webui.logger.debug("Step 2 : Verify the advanced option in WebUI") + if not self.webui.verify_vn_after_edit_ui('Adv Option', adv_option, opt_list, index=index): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify advanced option in API server") + if not self.webui.verify_vn_after_edit_api('Adv Option', adv_option, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for advancded option in OPS server") + if not self.webui.verify_vn_after_edit_ops('Adv Option', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the VN which is added") + if not self.webui_common.edit_remove_option("Networks", 'remove', \ + vn_name=topo.vn_disp_name): + self.webui.logger.debug('Editing network with advanced options is failed') + result = result and False + return result + + # end test3_7_edit_net_adv_opt + + @preposttest_wrapper + def test5_2_edit_net_adv_opt_neg(self): + ''' Test to edit the existing network by Invalid physical network + and invalid vlan id under Advanced option + 1. Go to Configure->Networking->Networks. + Then select any of the vn and click the edit button + 2. Select all the options under advanced option and give + invalid physical network and invalid vlan and save it. + 3. WebUI should throw an error message while saving. + + Pass Criteria : Step 3 should pass + ''' + result = True + self.webui.logger.debug("Step 1 : Add advanced options under VN") + opt_list = [topo.vlan_id, topo.phy_net, topo.subnet_adv_option, topo.vn_disp_name] + index = self.webui_common.edit_vn_with_adv_option(1, 'pos-phy', opt_list) + if not index: + self.webui.logger.debug('Editing network with advanced options is failed') + result = result and False + self.webui.logger.debug("Step 2 : Edit the vn using advanced options") + opt_list_invalid = [topo.invalid_vlan_id, topo.phy_net, topo.subnet_edit, topo.vn_disp_name] + if not self.webui_common.edit_vn_with_adv_option(0, 'neg-phy', opt_list_invalid): + self.webui.logger.debug('Editing network with advanced option is failed') + result = result and False + self.webui.logger.debug("Step 3 : Remove the VN which is added") + if not self.webui_common.edit_remove_option("Networks", 'remove', \ + vn_name=topo.vn_disp_name): + self.webui.logger.debug('Editing network with advanced options is failed') + result = result and False + return result + + # end test5_2_edit_net_adv_opt_neg + + @preposttest_wrapper + def test3_8_edit_net_dns(self): + ''' Test to edit the existing network by DNS + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add dns IP under DNS Server. + 3. Check that dns Ip got added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + result = True + opt_list = [topo.dns_ip] + self.webui.logger.debug("Step 1 : Add dns server IP under VN") + if not self.webui_common.edit_vn_with_dns('add', 'pos', topo.dns_ip): + self.webui.logger.debug('Editing network with dns is failed') + result = result and False + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + dns = self.webui_common.get_vn_detail_ui('DNS') + self.webui.logger.debug("Step 2 : Verify the DNS server IP in WebUI") + if not self.webui.verify_vn_after_edit_ui('DNS', dns, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify DNS server IP in API server") + if not self.webui.verify_vn_after_edit_api('DNS', dns, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for DNS server IP in OPS server") + if not self.webui.verify_vn_after_edit_ops('DNS', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the VN which is added") + if not self.webui_common.edit_vn_with_dns('remove', 'pos', topo.dns_ip): + self.webui.logger.debug('Editing network with dns is failed') + result = result and False + return result + + # end test3_8_edit_net_dns + + @preposttest_wrapper + def test3_9_edit_net_dns_neg(self): + ''' Test to edit the existing network by DNS + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add Invalid dns IP under DNS Server. + 3. WebUI should thrown an error message while saving + + Pass Criteria : Step 3 should pass + ''' + self.webui.logger.debug("Step 1 : Add dns server IP under VN") + assert self.webui_common.edit_vn_with_dns('add', 'neg', topo.invalid_dns_ip), \ + 'Editing network with dns is failed' + + # end test3_9_edit_dns_neg + + @preposttest_wrapper + def test4_1_edit_net_fip(self): + ''' Test to edit the existing network by Floating IP + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add Pool name and project name under Floating IP. + 3. Check that pool and project name got added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + result = True + opt_list = [topo.fpool] + self.webui.logger.debug("Step 1 : Add Floating server IP under VN") + if not self.webui_common.edit_vn_with_fpool('add', topo.fpool): + self.webui.logger.debug('Editing network with FIP is failed') + result = result and False + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + fip = self.webui_common.get_vn_detail_ui('FIP') + self.webui.logger.debug("Step 2 : Verify the Floating IP in WebUI") + if not self.webui.verify_vn_after_edit_ui('FIP', fip, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify Floating IP in API server") + if not self.webui.verify_vn_after_edit_api('FIP', fip, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for Floating IP in OPS server") + if not self.webui.verify_vn_after_edit_ops('FIP', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the FIP which is added") + if not self.webui_common.edit_vn_with_fpool('remove', topo.fpool): + self.webui.logger.debug('Editing network with FIP is failed') + result = result and False + return result + + # end test4_1_edit_net_fip + + @preposttest_wrapper + def test4_2_edit_net_route_target_asn_num(self): + ''' Test to edit the existing network by Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add ASN number and Target number under Route Target. + 3. Check the asn and target number got added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + result = True + opt_list = [topo.asn_num, topo.target_num, topo.asn_ip] + self.webui.logger.debug("Step 1 : Add Route Target under VN") + if not self.webui_common.edit_vn_with_route_target('add', 'pos', 'RT', \ + topo.asn_num, topo.target_num): + self.webui.logger.debug('Editing network with Route target failed') + result = result and False + self.webui_common.wait_till_ajax_done(self.browser) + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + rt = self.webui_common.get_vn_detail_ui('RT') + self.webui.logger.debug("Step 2 : Verify the Route Target in WebUI") + if not self.webui.verify_vn_after_edit_ui('RT', rt, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify advanced option in API server") + if not self.webui.verify_vn_after_edit_api('RT', rt, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for n in OPS server") + if not self.webui.verify_vn_after_edit_ops('RT', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the Route Target which is added") + if not self.webui_common.edit_vn_with_route_target('remove', 'pos', 'RT', \ + topo.asn_num, topo.target_num): + self.webui.logger.debug('Editing network with Route Target is failed') + result = result and False + return result + + # end test4_2_edit_net_route_target_asn_num + + @preposttest_wrapper + def test4_3_edit_net_route_target_asn_ip(self): + ''' Test to edit the existing network by Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add IP as asn and Target number under Route Target. + 3. Check the asn ip and target number got added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + result = True + opt_list = [topo.asn_num, topo.target_num, topo.asn_ip] + self.webui.logger.debug("Step 1 : Add Route Target under VN") + if not self.webui_common.edit_vn_with_route_target('add', 'pos', 'RT', \ + topo.asn_ip, topo.target_num): + self.webui.logger.debug('Editing network with Route target failed') + result = result and False + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + rt = self.webui_common.get_vn_detail_ui('RT') + self.webui.logger.debug("Step 2 : Verify the advanced option in WebUI") + if not self.webui.verify_vn_after_edit_ui('RT', rt, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify advanced option in API server") + if not self.webui.verify_vn_after_edit_api('RT', rt, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for advancded option in OPS server") + if not self.webui.verify_vn_after_edit_ops('RT', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the Route Target which is added") + if not self.webui_common.edit_vn_with_route_target('remove', 'pos', 'RT', \ + topo.asn_ip, topo.target_num): + self.webui.logger.debug('Editing network with Route Target is failed') + result = result and False + return result + + # end test4_3_edit_net_route_target_asn_ip + + @preposttest_wrapper + def test5_3_edit_net_route_target_neg_asn_ip(self): + ''' Test to edit the existing network by Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add invalid IP as asn and invalid Target number under Route Target. + 3. WebUI should throw an error message while saving. + + Pass Criteria : Step 3 should pass + ''' + result = True + self.webui.logger.debug("Step 1 : Add Route Target under VN") + assert self.webui_common.edit_vn_with_route_target('add', 'neg', 'RT', \ + topo.invalid_asn_ip, \ + topo.invalid_target_num), \ + 'Editing network with Route \ + target failed' + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + + # end test5_3_edit_net_route_target_neg_asn_ip + + @preposttest_wrapper + def test5_4_edit_net_route_target_neg_asn_num(self): + ''' Test to edit the existing network by Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add invalid asn number and invalid Target number under Route Target. + 3. WebUI should throw an error message while saving. + + Pass Criteria : Step 3 should pass + ''' + result = True + self.webui.logger.debug("Step 1 : Add Route Target under VN") + assert self.webui_common.edit_vn_with_route_target('add', 'neg', 'RT', \ + topo.invalid_asn_num, \ + topo.invalid_target_num), \ + 'Editing network with \ + Route target failed' + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + + # end test5_4_edit_net_route_target_neg_asn_num + + @preposttest_wrapper + def test5_5_edit_net_exp_route_target_asn_num(self): + ''' Test to edit the existing network by Export Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add asn number and Target number under Export Route Target. + 3. Check the asn number and target number got added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + result = True + opt_list = [topo.asn_num, topo.target_num, topo.asn_ip] + self.webui.logger.debug("Step 1 : Add Export Route Target under VN") + if not self.webui_common.edit_vn_with_route_target('add', 'pos', 'ERT', \ + topo.asn_num, topo.target_num): + self.webui.logger.debug('Editing network with Export Route target failed') + result = result and False + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + ert = self.webui_common.get_vn_detail_ui('ERT') + self.webui.logger.debug("Step 2 : Verify the advanced option in WebUI") + if not self.webui.verify_vn_after_edit_ui('ERT', ert, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify advanced option in API server") + if not self.webui.verify_vn_after_edit_api('ERT', ert, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for advancded option in OPS server") + if not self.webui.verify_vn_after_edit_ops('ERT', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the Route Target which is added") + if not self.webui_common.edit_vn_with_route_target('remove', 'pos', 'ERT', \ + topo.asn_num, topo.target_num): + self.webui.logger.debug('Editing network with Export Route Target is failed') + result = result and False + return result + + # end test5_5_edit_net_exp_route_target_asn_num + + @preposttest_wrapper + def test4_4_edit_net_exp_route_target_asn_ip(self): + ''' Test to edit the existing network by Export Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add IP as asn and Target number under Export Route Target. + 3. Check the asn ip and target number got added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + result = True + opt_list = [topo.asn_num, topo.target_num, topo.asn_ip] + self.webui.logger.debug("Step 1 : Add Export Route Target under VN") + if not self.webui_common.edit_vn_with_route_target('add', 'pos', 'ERT', \ + topo.asn_ip, topo.target_num): + self.webui.logger.debug('Editing network with Export Route target failed') + result = result and False + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + ert = self.webui_common.get_vn_detail_ui('ERT') + self.webui.logger.debug("Step 2 : Verify the advanced option in WebUI") + if not self.webui.verify_vn_after_edit_ui('ERT', ert, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify advanced option in API server") + if not self.webui.verify_vn_after_edit_api('ERT', ert, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for advancded option in OPS server") + if not self.webui.verify_vn_after_edit_ops('ERT', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the Export Route which is added") + if not self.webui_common.edit_vn_with_route_target('remove', 'pos', 'ERT', \ + topo.asn_ip, topo.target_num): + self.webui.logger.debug('Editing network with Export Route Target is failed') + result = result and False + return result + + # end test4_4_edit_net_exp_route_target_asn_ip + + @preposttest_wrapper + def test5_6_edit_net_exp_route_target_neg_asn_ip(self): + ''' Test to edit the existing network by Export Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add invalid IP as asn and invalid Target number under Export Route Target. + 3. WebUI should throw an error message while saving. + + Pass Criteria : Step 3 should pass + ''' + result = True + self.webui.logger.debug("Step 1 : Add Export Route Target under VN") + assert self.webui_common.edit_vn_with_route_target('add', 'neg', 'ERT', \ + topo.invalid_asn_ip, \ + topo.invalid_target_num), \ + 'Editing network with \ + Export Route target failed' + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + + # end test5_6_edit_net_exp_route_target_neg_asn_ip + + @preposttest_wrapper + def test5_8_edit_net_exp_route_target_neg_asn_num(self): + ''' Test to edit the existing network by Export Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add invalid asn number and invalid Export Target number under Route Target. + 3. WebUI should throw an error message while saving. + + Pass Criteria : Step 3 should pass + ''' + result = True + self.webui.logger.debug("Step 1 : Add Export Route Target under VN") + assert self.webui_common.edit_vn_with_route_target('add', 'neg', 'ERT', \ + topo.invalid_asn_num, \ + topo.invalid_target_num), \ + 'Editing network with \ + Export Route target failed' + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + + # end test5_8_edit_net_exp_route_target_neg_asn_num + + @preposttest_wrapper + def test4_5_edit_net_imp_route_target_asn_num(self): + ''' Test to edit the existing network by Import Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add asn and Target number under Import Route Target. + 3. Check the asn and target number got added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + global count + count = count + 1 + result = True + opt_list = [topo.asn_num, topo.target_num, topo.asn_ip] + self.webui.logger.debug("Step 1 : Add Import Route Target under VN") + if not self.webui_common.edit_vn_with_route_target('add', 'pos', 'IRT', \ + topo.asn_num, topo.target_num, \ + count=count): + self.webui.logger.debug('Editing network with import Route target failed') + result = result and False + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + irt = self.webui_common.get_vn_detail_ui('IRT') + self.webui.logger.debug("Step 2 : Verify the advanced option in WebUI") + if not self.webui.verify_vn_after_edit_ui('IRT', irt, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify advanced option in API server") + if not self.webui.verify_vn_after_edit_api('IRT', irt, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for advancded option in OPS server") + if not self.webui.verify_vn_after_edit_ops('IRT', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the Import Route Target which is added") + if not self.webui_common.edit_vn_with_route_target('remove', 'pos', \ + 'IRT', topo.asn_ip, topo.target_num): + self.webui.logger.debug('Editing network with Import Route Target is failed') + result = result and False + return result + + # end test4_5_edit_net_imp_route_target_asn_num + + @preposttest_wrapper + def test4_6_edit_net_imp_route_target_asn_ip(self): + ''' Test to edit the existing network by Import Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add IP as asn and Target number under Import Route Target. + 3. Check the asn ip and target number got added in WebUI,API and OPS. + + Pass Criteria : Step 3 should pass + ''' + global count + count = count + 1 + result = True + opt_list = [topo.asn_num, topo.target_num, topo.asn_ip] + self.webui.logger.debug("Step 1 : Add Import Route Target under VN") + self.webui_common.wait_till_ajax_done(self.browser) + if not self.webui_common.edit_vn_with_route_target('add', 'pos', 'IRT', \ + topo.asn_ip, topo.target_num, \ + count=count): + self.webui.logger.debug('Editing network with Import Route target failed') + result = result and False + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + uuid = self.webui_common.get_vn_detail_ui('UUID') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name') + irt = self.webui_common.get_vn_detail_ui('IRT') + self.webui.logger.debug("Step 2 : Verify the advanced option in WebUI") + if not self.webui.verify_vn_after_edit_ui('IRT', irt, opt_list): + self.webui.logger.debug('Virtual networks config data verification in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify advanced option in API server") + if not self.webui.verify_vn_after_edit_api('IRT', irt, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify the VN for advancded option in OPS server") + if not self.webui.verify_vn_after_edit_ops('IRT', self.vn_disp_name, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the Import Route Target which is added") + if not self.webui_common.edit_vn_with_route_target('remove', 'pos', 'IRT', \ + topo.asn_ip, topo.target_num): + self.webui.logger.debug('Editing network with Import Route Target is failed') + result = result and False + return result + + # end test4_6_edit_net_route_target_asn_ip + + @preposttest_wrapper + def test5_7_negative_case_edit_net_with_invalid_route_target_ip(self): + ''' Test to edit the existing network by Import Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add invalid IP as asn and invalid Import Target number under Import Route Target. + 3. WebUI should throw an error message while saving. + + Pass Criteria : Step 3 should pass + ''' + global count + count += 1 + self.webui.logger.debug("Step 1 : Add Import Route Target under VN") + assert self.webui_common.edit_vn_with_route_target('add', 'neg', 'IRT', \ + topo.invalid_asn_ip, \ + topo.invalid_target_num, \ + count=count), \ + 'Editing network with \ + Import Route target failed' + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + + # end test5_7_negative_case_edit_net_with_invalid_route_target_ip + + @preposttest_wrapper + def test5_9_negative_case_edit_net_with_invalid_route_target_num(self): + ''' Test to edit the existing network by Import Route Target + 1. Go to Configure->Networking->Networks. Then select any of the vn + and click the edit button + 2. Add invalid asn number and invalid Target number under Import Route Target. + 3. WebUI should throw an error message while saving. + + Pass Criteria : Step 3 should pass + ''' + global count + count += 1 + self.webui.logger.debug("Step 1 : Add Import Route Target under VN") + assert self.webui_common.edit_vn_with_route_target('add', 'neg', 'IRT', \ + topo.invalid_asn_num, \ + topo.invalid_target_num, \ + count=count), \ + 'Editing network with \ + Import Route target failed' + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + + # end test5_9_negative_case_edit_net_with_invalid_route_target_num + + @preposttest_wrapper + def test4_7_create_vn_with_spl_char(self): + ''' Test to create vn with combination of spl char and verify in all API, OPS and WebUI + 1. Go to Configure->Networking->Networks. Then create VN with all + combination of spl characters + 2. Verify the VN in WebUI, OPS and API server. + + Pass Criteria : Step 2 should pass + ''' + vn_list = [topo.vn_name_beg_spl_char, topo.vn_name_end_spl_char, topo.vn_name_mid_spl_char] + opt_list = [] + result = True + for vn in vn_list: + self.webui.logger.debug("Step 1 : Create vn %s " %(vn)) + if self.webui_common.click_configure_networks(): + add = self.webui_common.find_element("//i[contains(@class,'icon-plus')]", 'xpath') + add.click() + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + self.webui_common.find_element("//input[contains(@name,'display_name')]", \ + 'xpath').send_keys(vn) + self.webui_common.wait_till_ajax_done(self.browser, wait=3) + self.webui_common.click_element('configure-networkbtn1') + self.webui_common.wait_till_ajax_done(self.browser) + uuid = self.webui_common.get_vn_detail_ui('UUID', vn_name='vn1') + self.vn_disp_name = self.webui_common.get_vn_detail_ui('Display Name', \ + vn_name='vn1') + self.webui.logger.debug("Step 2 : Verify WebUI server after editing") + if not self.webui.verify_vn_after_edit_ui('Display Name', vn, opt_list): + self.webui.logger.debug('Virtual networks config data verification \ + in UI failed') + result = result and False + self.webui.logger.debug("Step 3 : Verify API server after editing") + if not self.webui.verify_vn_after_edit_api('Display Name', vn, uuid, opt_list): + self.webui.logger.debug('Virtual networks config data verification \ + in API failed') + result = result and False + self.webui.logger.debug("Step 4 : Verify OPS server after editing") + if not self.webui.verify_vn_after_edit_ops('Display Name', vn, vn, opt_list): + self.webui.logger.debug('Virtual networks config data verification \ + in OPS failed') + result = result and False + self.webui.logger.debug("Step 5 : Remove the VN which is added") + if not self.webui_common.edit_remove_option("Networks", 'remove', vn_name='vn1'): + self.webui.logger.debug('Editing network with advanced options is failed') + result = result and False + return result + # test3_25_create_vn_with_spl_char + # end WebuiTestSanity diff --git a/serial_scripts/webui/webui_common.py b/serial_scripts/webui/webui_common.py index 719f79320..98c2ff708 100644 --- a/serial_scripts/webui/webui_common.py +++ b/serial_scripts/webui/webui_common.py @@ -5,6 +5,8 @@ from selenium.common.exceptions import WebDriverException from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException +from selenium.common.exceptions import TimeoutException +from selenium.common.exceptions import StaleElementReferenceException import os import time import datetime @@ -66,7 +68,6 @@ def check_login(self, login_type='horizon'): def wait_till_ajax_done(self, browser, jquery=True, wait=5): jquery = False - wait = 5 if jquery: WebDriverWait( browser, @@ -261,9 +262,12 @@ def click_on_create( select_project=True, prj_name='admin'): browser = self.browser - if element_type == 'Security Group': - element = 'Edit ' + element_type + if element_type in ('Security Group', 'DNS Server', 'DNS Record'): + element = 'Create ' + element_type element_new = func_suffix[:-1] + elif element_type == 'Port': + element = 'Create ' + element_type + element_new = 'Ports' elif element_type == 'Floating IP': element = 'Allocate ' + element_type element_new = func_suffix @@ -284,23 +288,24 @@ def click_on_create( (click_func)) return False if select_project: - if element_type == 'DNSRecord': + if element_type == 'DNS Record': self.select_dns_server(prj_name) - elif not element_type in ['DNSServer']: + elif not element_type in ['DNS Server']: self.select_project(prj_name) self.logger.info("Creating %s %s using contrail-webui" % (element_type, name)) - try: - browser.find_element_by_xpath( - "//a[@class='widget-toolbar-icon' and @title='%s']" % - element).click() - except WebDriverException: + if not save: try: - self.click_element('close', 'class', screenshot=False) - except: - pass - raise - self.wait_till_ajax_done(self.browser) + browser.find_element_by_xpath( + "//a[@class='widget-toolbar-icon' and @title='%s']" % + element).click() + except WebDriverException: + try: + self.click_element('close', 'class', screenshot=False) + except: + pass + raise + self.wait_till_ajax_done(self.browser) if save: self.click_element(elem) if not self.check_error_msg( @@ -416,16 +421,25 @@ def send_keys( element_name_list, element_by_list='id', browser=None, + clear=False, if_elements=[], elements=False): if not browser: browser = self.browser send_keys_to_element = self.find_element( element_name_list, element_by_list, browser, if_elements, elements) + if clear: + send_keys_to_element.clear() send_keys_to_element.send_keys(keys) time.sleep(2) # end send_keys + def click_on_caret_down(self, browser=None): + if not browser: + browser = self.browser + self.click_element('icon-caret-down', 'class', browser, wait=2) + # end click_on_caret_down + def find_element( self, element_name_list, @@ -676,6 +690,45 @@ def select_from_dropdown(self, element_text, browser=None, grep=False): return True # end select_from_dropdown_list + def find_select_from_dropdown( + self, element_text, + browser=None, index=0, + case=None): + flag = False + result = True + if not browser: + browser = self.browser + br = self.find_element( + 'ui-autocomplete', 'class', elements=True) + for index in range(len(br)): + if br[index].text: + break + ele_types = self.find_element( + 'ui-menu-item', 'class', elements=True, browser=br[index]) + if not ele_types: + self.logger.debug('Drop-down list not found') + return False + ele_dropdown = [element.find_element_by_tag_name('a') + for element in ele_types] + for ele in ele_dropdown: + if case == None: + comp_ele = ele.text + elif case == 'lower': + comp_ele = ele.text.lower() + elif case == 'upper': + comp_ele = ele.text.upper() + if comp_ele == element_text: + flag = True + ele.click() + break + if not flag: + self.logger.debug('%s not found in the dropdown' % element_text) + result = result and False + else: + result = result and False + return result + # end find_select_from_dropdown + def dropdown(self, id, element_name, element_type=None, browser_obj=None): if browser_obj: obj = browser_obj @@ -706,7 +759,8 @@ def click_select_multiple(self, element_type, element_list): self.click_element([element_type, 'input'], ['id', 'tag']) select_list = self.browser.find_elements_by_xpath( "//*[@class = 'select2-match']/..") - self._click_if_element_found(element, select_list) + if not self._click_if_element_found(element, select_list): + return False self.logger.info( 'All elements from %s successfully got selected' % (element_list)) @@ -722,7 +776,10 @@ def _click_if_element_found(self, element_name, elements_list): for element in elements_list: if element.text == element_name: element.click() - break + return True + self.find_element('select2-drop-mask').click() + self.logger.error('No matches found error') + return False # end _click_if_element_found def click_if_element_found(self, objs, element_text, grep=False): @@ -752,13 +809,24 @@ def select_project(self, project_name='admin'): # end select_project def select_dns_server(self, dns_server_name): - current_dns_server = self.find_element('s2id_ddDNSServers').text + current_dns_server = self.find_element( + 's2id_undefined').text if not current_dns_server == dns_server_name: - self.click_element('s2id_ddDNSServers') + self.click_element('s2id_undefined') elements_obj_list = self.find_select2_drop_elements(self.browser) self.click_if_element_found(elements_obj_list, dns_server_name) # end select_dns_server + def select_network(self, network_name='all networks'): + current_network = self.find_element( + ['s2id_networks\-breadcrumb\-dropdown', 'span'], ['id', 'tag']).text + if not current_network == network_name: + self.click_element( + ['s2id_networks\-breadcrumb\-dropdown', 'span'], ['id', 'tag'], jquery=False, wait=4) + elements_obj_list = self.find_select2_drop_elements(self.browser) + self.click_if_element_found(elements_obj_list, network_name) + # end select_network + def get_element(self, name, key_list): get_element = '' for key in key_list: @@ -775,10 +843,13 @@ def append_to_list(self, elements_list, key_value): elements_list.append({'key': k, 'value': v}) # end append_to_dict - def get_memory_string(self, dictn, unit='B'): + def get_memory_string(self, dictn, unit='B', control_flag=0): memory_list = [] if isinstance(dictn, dict): - memory = dictn.get('cpu_info').get('meminfo').get('res') + if not control_flag: + memory = dictn.get('cpu_info').get('meminfo').get('res') + else: + memory = dictn.get('cpu_info')[0].get('mem_res') else: memory = dictn memory = memory / 1024.0 @@ -825,7 +896,10 @@ def get_memory_string(self, dictn, unit='B'): def get_cpu_string(self, dictn): offset = 15 - cpu = float(dictn.get('cpu_info').get('cpu_share')) + if isinstance(dictn.get('cpu_info'), list): + cpu = float(dictn.get('cpu_info')[0].get('cpu_share')) + else: + cpu = float(dictn.get('cpu_info').get('cpu_share')) cpu_range = range(int(cpu * 100) - offset, int(cpu * 100) + offset) cpu_range = map(lambda x: x / 100.0, cpu_range) cpu_list = [str('%.2f' % cpu) + ' %' for cpu in cpu_range] @@ -843,7 +917,7 @@ def get_analytics_msg_count_string(self, dictn, size): int(analytics_msg_count) + offset) analytics_messages_string = [ - str(count) + + str(count) + ' [' + str(size) + ']' for count in analytics_msg_count_list for size in tx_socket_size] @@ -973,30 +1047,41 @@ def check_rows(self, length, obj): return rows # end check_rows - def click_icon_caret(self, row_index, obj=None, length=None, indx=0): - if not obj: - obj = self.find_element('grid-canvas', 'class') - rows = None - rows = self.get_rows(obj) - if length: - rows = self.check_rows(length, obj) - br = rows[row_index] + def click_icon_caret(self, row_index, obj=None, length=None, indx=0, net=0): element0 = ('slick-cell', indx) - element1 = ('div', 'i') - self.click_element( - [element0, element1], ['class', 'tag'], br, if_elements=[0]) + if not net: + element1 = ('div', 'span') + else: + element1 = ('div', 'i') + try: + if not obj: + obj = self.find_element('grid-canvas', 'class') + rows = None + rows = self.get_rows(obj) + if length: + rows = self.check_rows(length, obj) + br = rows[row_index] + self.click_element( + [element0, element1], ['class', 'tag'], br, if_elements=[0], delay=25) + except StaleElementReferenceException: + rows = self.get_rows(obj) + if length: + rows = self.check_rows(length, obj) + br = rows[row_index] + self.click_element( + [element0, element1], ['class', 'tag'], br, if_elements=[0], delay=25) # end click_icon_caret def click_monitor_instances_basic(self, row_index, length=None): self.click_monitor_instances() self.wait_till_ajax_done(self.browser) - self.click_icon_caret(row_index, length=length) + self.click_icon_caret(row_index, length=length, net=1) # end click_monitor_instances_basic_in_webui def click_monitor_networks_basic(self, row_index): self.click_element('Networks', 'link_text', jquery=False) time.sleep(2) - self.click_icon_caret(row_index) + self.click_icon_caret(row_index, net=1) rows = self.get_rows() self.click_element('icon-list', 'class', browser=rows[row_index + 1]) self.wait_till_ajax_done(self.browser) @@ -1086,6 +1171,7 @@ def click_configure_service_instance(self): def delete_element(self, fixture=None, element_type=None): result = True delete_success = None + ver_flag = False if WebuiCommon.count_in == False: if not element_type == 'svc_template_delete': self.click_configure_networks() @@ -1095,14 +1181,14 @@ def delete_element(self, fixture=None, element_type=None): if not self.click_configure_service_instance(): result = result and False element_name = fixture.si_name - element_id = 'btnDeletesvcInstances' - popup_id = 'btnCnfDelSInstPopupOK' + element_id = 'btnActionDelSvcInst' + popup_id = 'configure-service_instancebtn1' elif element_type == 'vn_delete': if not self.click_configure_networks(): result = result and False element_name = fixture.vn_name - element_id = 'btnDeleteVN' - popup_id = 'btnCnfRemoveMainPopupOK' + element_id = 'linkVNDelete' + popup_id = 'configure-networkbtn1' elif element_type == 'svc_template_delete': if not self.click_configure_service_template(): result = result and False @@ -1119,45 +1205,45 @@ def delete_element(self, fixture=None, element_type=None): if not self.click_configure_fip(): result = result and False element_name = fixture.pool_name + ':' + fixture.vn_name - element_id = 'btnDeletefip' - popup_id = 'btnCnfReleasePopupOK' + element_id = 'linkFipRelease' + popup_id = 'configure-fipbtn1' elif element_type == 'policy_delete': if not self.click_configure_policies(): result = result and False element_name = fixture.policy_name - element_id = 'btnDeletePolicy' - popup_id = 'btnCnfRemoveMainPopupOK' + element_id = 'icon-trash' + popup_id = 'configure-policybtn1' elif element_type == 'disassociate_fip': if not self.click_configure_fip(): result = result and False element_name = fixture.vn_name + ':' + fixture.pool_name - element_id = 'btnDeletefip' - popup_id = 'btnCnfReleasePopupOK' + element_id = 'linkFipRelease' + popup_id = 'configure-fipbtn1' elif element_type == 'port_delete': if not self.click_configure_ports(): result = result and False element_name = fixture.vn_name - element_id = 'icon-trash' - id_port_delete = 'btnDeletePorts' - popup_id = 'btnCnfRemoveMainPopupOK' + element_id = 'btnDeletePort' + id_port_delete = 'icon-trash' + popup_id = 'configure-Portsbtn1' elif element_type == 'router_delete': if not self.click_configure_routers(): result = result and False element_name = 'all' - element_id = 'btnDeleteLogicalRouter' - popup_id = 'btnCnfDelLRPopupOK' + element_id = 'icon-trash' + popup_id = 'configure-logical_routerbtn1' elif element_type == 'dns_server_delete': if not self.click_configure_dns_servers(): result = result and False element_name = 'all' - element_id = 'btnDeleteDNSServer' - popup_id = 'btnCnfDelPopupOK' + element_id = 'btnActionDelDNS' + popup_id = 'configure-dns_serverbtn1' elif element_type == 'dns_record_delete': if not self.click_configure_dns_records(): result = result and False element_name = 'all' - element_id = 'btnDeleteDNSRecord' - popup_id = 'btnCnfDelMainPopupOK' + element_id = 'btnActionDelDNS' + popup_id = 'configure-dns_recordbtn1' elif element_type == 'security_group_delete': if not self.click_configure_security_groups(): result = result and False @@ -1187,15 +1273,23 @@ def delete_element(self, fixture=None, element_type=None): element_text = element.find_elements_by_tag_name( 'div')[2].text div_obj = element.find_elements_by_tag_name('div')[1] + if not ver_flag: + if element_type == 'svc_template_delete': + version = re.match('\S+(\s.*)', element_text) + element_name+= version.group(1) + ver_flag = True if (element_text == element_name): div_obj.find_element_by_tag_name('input').click() if_select = True rows = self.get_rows(canvas=True) if if_select: - self.click_element(element_id) + if element_type in ['policy_delete', 'router_delete']: + self.click_element(element_id, 'class') + else: + self.click_element(element_id) if element_type == 'port_delete': - self.click_element(id_port_delete) + self.click_element("//a[@data-original-title='Delete']", 'xpath') self.click_element(popup_id, screenshot=False) delete_success = True if not self.check_error_msg( @@ -1224,7 +1318,8 @@ def click_configure_networks(self): self.click_element('btn-configure') time.sleep(2) self._click_on_config_dropdown(self.browser) - self.click_element(['config_net_vn', 'Networks'], ['id', 'link_text']) + self.click_element(['config_networking_networks', 'Networks'], [ + 'id', 'link_text']) time.sleep(1) return self.check_error_msg("configure networks") # end click_configure_networks_in_webui @@ -1237,7 +1332,7 @@ def __wait_for_networking_items(self, a): def click_configure_fip(self): self._click_on_config_dropdown(self.browser) - self.click_element(['config_net_fip', 'a'], ['id', 'tag']) + self.click_element(['config_networking_fip', 'a'], ['id', 'tag']) self.wait_till_ajax_done(self.browser) time.sleep(1) return self.check_error_msg("configure fip") @@ -1280,6 +1375,7 @@ def click_monitor_debug(self): ['menu', 'item'], ['id', 'class'], if_elements=[1]) children[2].find_element_by_tag_name('span').click() self.wait_till_ajax_done(self.browser) + time.sleep(5) # end click_monitor_debug def click_monitor_packet_capture(self): @@ -1294,7 +1390,7 @@ def click_monitor_networking(self): children = self.find_element( ['menu', 'item'], ['id', 'class'], if_elements=[1]) children[1].find_element_by_tag_name('span').click() - time.sleep(2) + time.sleep(5) self.wait_till_ajax_done(self.browser) # end click_monitor_in_webui @@ -1365,22 +1461,26 @@ def click_monitor_analytics_nodes_advance(self, row_index): def click_monitor_common_advance(self, row_index): self.click_icon_caret(row_index) - self.click_element(['dashboard-box', 'icon-cog'], ['id', 'class']) - self.click_element(['dashboard-box', 'icon-code'], ['id', 'class']) + self.click_element(["div[class*='widget-box transparent']", \ + 'icon-cog'], ['css', 'class']) + self.click_element(["div[class*='widget-box transparent']", \ + 'icon-code'], ['css', 'class']) # end click_monitor_common_advance_in_webui def click_monitor_common_basic(self, row_index): self.wait_till_ajax_done(self.browser) time.sleep(3) self.click_icon_caret(row_index) - self.click_element(['dashboard-box', 'icon-cog'], ['id', 'class']) - self.click_element(['dashboard-box', 'icon-list'], ['id', 'class']) + self.click_element(["div[class*='widget-box transparent']", \ + 'icon-cog'], ['css', 'class']) + self.click_element(["div[class*='widget-box transparent']", \ + 'icon-list'], ['css', 'class']) # end click_monitor_common_basic_in_webui def click_monitor_networks_advance(self, row_index): self.click_element('Networks', 'link_text') self.check_error_msg("monitor networks") - self.click_icon_caret(row_index) + self.click_icon_caret(row_index, net=1) rows = self.get_rows() self.click_element('icon-code', 'class', browser=rows[row_index + 1]) self.wait_till_ajax_done(self.browser) @@ -1421,6 +1521,15 @@ def click_configure_ipam_basic(self, row_index): self.wait_till_ajax_done(self.browser) # end click_configure_ipam_basic_in_webui + def click_configure_fip_basic(self, row_index): + self.click_element('Floating IPs', 'link_text') + self.check_error_msg("configure fip") + rows = self.get_rows() + rows[row_index].find_elements_by_tag_name( + 'div')[0].find_element_by_tag_name('i').click() + self.wait_till_ajax_done(self.browser) + # end click_configure_fip_basic + def click_configure_project_quotas(self): self._click_on_config_dropdown(self.browser, index=0) self.click_element( @@ -1479,7 +1588,6 @@ def click_configure_interfaces(self): def click_configure_dns_servers(self): self.wait_till_ajax_done(self.browser) self._click_on_config_dropdown(self.browser, 4) - # self.click_element(['config_dns_dnsservers', 'a'], ['id', 'tag']) self.click_element(['config_dns_servers', 'a'], ['id', 'tag']) time.sleep(2) return self.check_error_msg("configure dns servers") @@ -1488,7 +1596,7 @@ def click_configure_dns_servers(self): def click_configure_dns_records(self): self.wait_till_ajax_done(self.browser) self._click_on_config_dropdown(self.browser, 4) - self.click_element(['config_dns_dnsrecords', 'a'], ['id', 'tag']) + self.click_element(['config_dns_records', 'a'], ['id', 'tag']) time.sleep(2) return self.check_error_msg("configure dns records") # end click_configure_dns_records @@ -1544,7 +1652,7 @@ def select_project_in_openstack( browser, jquery=False, wait=4) - if os_release != 'juno': + elif os_release == 'icehouse': ui_proj = self.find_element( ['tenant_switcher', 'h3'], ['id', 'css'], browser).get_attribute('innerHTML') if ui_proj != project_name: @@ -1553,9 +1661,13 @@ def select_project_in_openstack( tenants = self.find_element( ['tenant_list', 'a'], ['id', 'tag'], browser, [1]) self.click_if_element_found(tenants, project_name) - if os_release == 'juno': - self.click_element( - ['button', 'caret'], ['tag', 'class'], browser) + else: + if os_release in ('liberty', 'mitaka'): + self.click_element( + 'fa-caret-down', 'class', browser) + else: + self.click_element( + ['button', 'caret'], ['tag', 'class'], browser) prj_obj = self.find_element( ['dropdown-menu', 'a'], ['class', 'tag'], browser, [1]) for element in prj_obj: @@ -1758,7 +1870,7 @@ def get_process_status_string( def get_advanced_view_str(self): domArry = json.loads(self.browser.execute_script( - "var eleList = $('pre').find('span'), dataSet = []; for(var i = 0; i < eleList.length-4; i++){if(eleList[i].className == 'key' && eleList[i + 4].className == 'string'){ var j = i + 4 , itemArry = []; while(j < eleList.length && eleList[j].className == 'string' ){ itemArry.push(eleList[j].innerHTML); j++;} dataSet.push({key : eleList[i].innerHTML, value :itemArry});}} return JSON.stringify(dataSet);")) + "var eleList = $('pre').find('span'), dataSet = []; for(var i = 0; i < eleList.length-4; i++){if(eleList[i].className == 'key' && eleList[i + 4].className == 'value string'){ var j = i + 4 , itemArry = []; while(j < eleList.length && eleList[j].className == 'value string' ){ itemArry.push(eleList[j].innerHTML); j++;} dataSet.push({key : eleList[i].innerHTML, value :itemArry});}} return JSON.stringify(dataSet);")) domArry = self.trim_spl_char(domArry) return domArry # end get_advanced_view_str @@ -1772,7 +1884,7 @@ def get_advanced_view_str_special(self): def get_advanced_view_num(self): domArry = json.loads(self.browser.execute_script( - "var eleList = $('pre').find('span'), dataSet = []; for(i = 0; i < eleList.length-4; i++){if(eleList[i].className == 'key'){if(eleList[i + 1].className == 'preBlock' && eleList[i + 4].className == 'number'){dataSet.push({key : eleList[i+3].innerHTML, value : eleList[i + 4].innerHTML});}}} return JSON.stringify(dataSet);")) + "var eleList = $('pre').find('span'), dataSet = []; for(i = 0; i < eleList.length-4; i++){if(eleList[i+3].className == 'value'){if(eleList[i + 8].className == 'key' && eleList[i + 9].className == 'value number'){dataSet.push({key : eleList[i + 8].innerHTML, value : eleList[i + 9].innerHTML});}}} return JSON.stringify(dataSet);")) domArry = self.trim_spl_char(domArry) return domArry # end get_advanced_view_num @@ -1791,10 +1903,33 @@ def get_vm_basic_view(self): def get_basic_view_infra(self): domArry = json.loads(self.browser.execute_script( - "var eleList = $('[id^=detail-columns]').find('li').find('div'),dataSet = []; for(var i = 0; i < eleList.length-1; i++){if(eleList[i].className== 'key span5' && eleList[i + 1].className == 'value span7'){dataSet.push({key : eleList[i].innerHTML.replace(/( )*/g,''),value:eleList[i+1].innerHTML.replace(/^\s+|\s+$/g, '')});}} return JSON.stringify(dataSet);")) + "var eleList = $('[class^=item-list]').find('li').find('span'),dataSet = []; for(var i = 0; i < eleList.length-1; i++){if(eleList[i].classList.contains('key', 'span5') && eleList[i + 1].classList.contains('value', 'span7')){dataSet.push({key : eleList[i].innerHTML.replace(/( )*/g&&/^\s+|\s+$/g,''),value:eleList[i+1].innerHTML.replace(/\s+/g, ' ')});}} return JSON.stringify(dataSet);")) return domArry # end get_basic_view_infra + def get_advanced_view_list(self, name, key_val, index=3): + key_val_lst1 = self.find_element('pre', 'tag') + key_val_lst2 = self.find_element( + 'key-value', 'class', elements=True, browser=key_val_lst1) + for element in key_val_lst2: + if name in element.text: + keys_arry = self.find_element( + 'key', 'class', elements=True, browser=element) + # Find and click are separated here to avoid timeout issues and capture screenshot in case find fails + plus_element = self.find_element('icon-plus', 'class', elements=True, browser=element)[index] + plus_element.click() + vals_arry = self.find_element( + 'value', 'class', elements=True, browser=element) + for ind, ele in enumerate(keys_arry): + if key_val == ele.text: + key1 = key_val + val1 = [str(vals_arry[ind].text.strip('[ \n]'))][0].split('\n') + flag = 1 + break + break + return key1, val1, flag + # end get_advanced_view_list + def trim_spl_char(self, d): data = [] for item in d: @@ -1864,6 +1999,7 @@ def list_in_dict(self, dict_ele): def match_ui_values(self, complete_ops_data, webui_list): error = 0 match_count = 0 + count = 0 for ops_items in complete_ops_data: match_flag = 0 for webui_items in webui_list: @@ -1879,11 +2015,19 @@ def match_ui_values(self, complete_ops_data, webui_list): elif self.list_in_dict(ops_items['value']) and self.list_in_dict(webui_items['value']) and (ops_items['key'] == webui_items['key']): list_ops = ops_items['value'].split(', ') list_webui = webui_items['value'].split(', ') - - if set(list_ops) == set(list_webui): + for list_webui_index in range(len(list_webui)): + for list_ops_index in range(len(list_ops)): + if (list_webui[ + list_webui_index] == list_ops[list_ops_index]): + count += 1 + break + elif isinstance(list_webui[list_webui_index], (str, unicode)) and list_webui[list_webui_index].strip() == list_ops[list_ops_index]: + count += 1 + break + if(count == len(list_ops) or count == len(list_webui)): self.logger.info( - "Ops key '%s' with ops_value '%s' matched with webui_value '%s'" % - (ops_items['key'], ops_items['value'], webui_items['value'])) + "Ops key %s.0 : value %s matched" % + (ops_items['key'], list_ops)) match_flag = 1 match_count += 1 break @@ -1894,7 +2038,6 @@ def match_ui_values(self, complete_ops_data, webui_list): (len(list_ops), match_count)) error = 1 break - if not match_flag: self.logger.error( "Ops key %s ops_value %s not found/matched with %s" % @@ -1917,8 +2060,8 @@ def date_time_string(self): # end date_time_string def match_ui_kv(self, complete_ops_data, merged_arry): - # self.logger.info("opserver data to be matched : %s"% complete_ops_data) - # self.logger.info("webui data to be matched : %s"% merged_arry) + self.logger.info("opserver data to be matched : %s"% complete_ops_data) + self.logger.info("webui data to be matched : %s"% merged_arry) self.logger.debug(self.dash) no_error_flag = True match_count = 0 @@ -1979,6 +2122,48 @@ def match_ui_kv(self, complete_ops_data, merged_arry): 'discards', 'ds_flow_action_drop', 'ds_flood', + 'ds_mcast_df_bit', + 'ds_flow_no_memory', + 'ds_push', + 'ds_invalid_if', + 'ds_pull', + 'ds_no_fmd', + 'ds_invalid_arp', + 'ds_trap_no_if', + 'ds_vlan_fwd_tx', + 'ds_invalid_mcast_source', + 'ds_invalid_source', + 'ds_flow_action_invalid', + 'ds_invalid_packet', + 'ds_flow_invalid_protocol', + 'ds_invalid_vnid', + 'ds_flow_table_full', + 'ds_invalid_label', + 'ds_garp_from_vm', + 'ds_frag_err', + 'ds_vlan_fwd_enq', + 'ds_clone_fail', + 'ds_arp_no_route', + 'ds_misc', + 'ds_interface_rx_discard', + 'ds_flow_unusable', + 'ds_mcast_clone_fail', + 'ds_invalid_protocol', + 'ds_head_space_reserve_fail', + 'ds_interface_tx_discard', + 'ds_nowhere_to_go', + 'ds_arp_no_where_to_go', + 'ds_l2_no_route', + 'ds_cksum_err', + 'ds_flow_queue_limit_exceeded', + 'ds_ttl_exceeded', + 'ds_flow_nat_no_rflow', + 'ds_invalid_nh', + 'ds_head_alloc_fail', + 'ds_pcow_fail', + 'ds_rewrite_fail', + 'primary', + 'no_config_intf_list', 'total_flows', 'active_flows', 'aged_flows', @@ -2005,7 +2190,17 @@ def match_ui_kv(self, complete_ops_data, merged_arry): 'where', 'select', 'disk_used_bytes', - 'mem_virt' + 'mem_virt', + 'average_blocked_duration', + 'admin_down', + 'sm_back_pressure', + 'log_local', + 'log_category', + 'error_intf_list', + 'max_sm_queue_count', + 'status', + 'control_node_list_cfg', + 'dns_servers', 'chunk_select_time'] key_list = ['exception_packets_dropped', 'l2_mcast_composites'] index_list = [] @@ -2044,7 +2239,7 @@ def match_ui_kv(self, complete_ops_data, merged_arry): item_webui_value, list) if (item_ops_key == item_webui_key and (item_ops_value == item_webui_value or ( - item_ops_value == 'None' and item_webui_value == 'null'))): + item_ops_value == 'None' and item_webui_value == 'null') or (item_ops_value == 'None Total' and item_webui_value == '0 Total'))): self.logger.info( "Ops/api key %s : value %s matched" % (item_ops_key, item_ops_value)) @@ -2080,24 +2275,30 @@ def match_ui_kv(self, complete_ops_data, merged_arry): matched_flag = 1 match_count += 1 break + elif item_ops_key == item_webui_key and not isinstance(item_ops_value, list) and isinstance(item_webui_value, list) and (item_ops_value in item_webui_value): + self.logger.info( + "Ops/api key %s : value %s matched in webui value range list %s " % + (item_ops_key, item_ops_value, item_webui_value)) + matched_flag = 1 + match_count += 1 + break elif item_ops_key == item_webui_key and isinstance(item_webui_value, list) and isinstance(item_ops_value, list): count = 0 - if len(item_webui_value) == len(item_ops_value): - for item_webui_index in range(len(item_webui_value)): - for item_ops_index in range(len(item_ops_value)): - if (item_webui_value[ - item_webui_index] == item_ops_value[item_ops_index]): - count += 1 - break - elif isinstance(item_webui_value[item_webui_index], (str, unicode)) and item_webui_value[item_webui_index].strip() == item_ops_value[item_ops_index]: - count += 1 - break - if(count == len(item_webui_value)): - self.logger.info( - "Ops key %s.0 : value %s matched" % - (item_ops_key, item_ops_value)) - matched_flag = 1 - match_count += 1 + for item_webui_index in range(len(item_webui_value)): + for item_ops_index in range(len(item_ops_value)): + if (item_webui_value[ + item_webui_index] == item_ops_value[item_ops_index]): + count += 1 + break + elif isinstance(item_webui_value[item_webui_index], (str, unicode)) and item_webui_value[item_webui_index].strip() == item_ops_value[item_ops_index]: + count += 1 + break + if(count == len(item_webui_value)): + self.logger.info( + "Ops key %s.0 : value %s matched" % + (item_ops_key, item_ops_value)) + matched_flag = 1 + match_count += 1 break elif item_ops_key == item_webui_key: webui_match_try_list.append( @@ -2131,6 +2332,12 @@ def match_ui_kv(self, complete_ops_data, merged_arry): str(not_matched_count)) self.logger.info("Total ops/api key-value match skipped count is %s" % str(skipped_count)) + if not_matched_count <= 3: + no_error_flag = True + if not_matched_count > 0: + self.logger.debug( + "Check the %s mismatched key-value pair(s)" % + str(not_matched_count)) return no_error_flag # end match_ui_kv @@ -2181,6 +2388,7 @@ def get_slick_cell_text(self, br=None, index=1): def click_on_cancel_if_failure(self, element_id): try: + element_id = 'cancelBtn' obj = self.find_element(element_id, screenshot=False) obj.click() except: @@ -2203,12 +2411,631 @@ def get_item_list(self, ui_list): return ui_list # end get_item_list - def expand_advance_details(self): - while True: + def expand_advance_details(self, count=20): + flag = 0 + while flag < count: + plus_objs = [] try: plus_objs = self.find_element("i[class*='icon-plus expander']",'css', elements=True,screenshot=False) + flag += 1 self.click(plus_objs) - except WebDriverException: + time.sleep(3) + except (WebDriverException, TimeoutException): break # end expand_advance_details + def get_api_detail(self, uuid, option): + self.vn_api_url = option + uuid + return self._get_list_api(self.vn_api_url) + # end get_api_detail + + def get_vn_detail_ops(self, domain, project_vn, vn_name): + self.vn_ops_url = 'virtual-network/' + domain + project_vn + ":" + \ + vn_name + "?flat" + return self._get_list_ops(self.vn_ops_url) + # end get_vn_detail_ops + + def click_icon_cog(self, index, browser, option, type): + self.click_element('icon-cog', 'class', index) + self.wait_till_ajax_done(index) + tool_tip_option = "//a[contains(@class,'tooltip-success')]" + tool_tip = self.find_element(tool_tip_option, 'xpath', index, elements=True) + if option == 'edit': + tool_tip[0].click() + else: + if type == 'Networks': + tool_tip[1].click() + self.click_element('configure-networkbtn1', browser=browser) + elif type =='Ports': + tool_tip[2].click() + self.click_element('configure-Portsbtn1', browser=browser) + self.wait_till_ajax_done(index) + # end click_icon_cog + + def get_vn_detail_ui(self, search_key, index=0, vn_name=None): + option = 'Networks' + if not self.click_configure_networks(): + self.dis_name = None + self.wait_till_ajax_done(self.browser) + if not index: + rows = self.get_rows(canvas=True) + if vn_name: + for row in rows: + out = re.search(vn_name, str(row.text)) + index += 1 + if out: + break + else: + index = len(rows) + toggle_icon = "//i[contains(@class,'toggleDetailIcon')]" + edit = self.find_element(toggle_icon, 'xpath', elements=True) + edit[index-1].click() + self.wait_till_ajax_done(self.browser) + item = self.find_element("//ul[contains(@class,'item-list')]", 'xpath') + out_split = re.split("\n",item.text) + join_res = "-".join(out_split) + if search_key == 'Display Name': + regexp = "Display Name\-(.*)\-UUID" + flag = True + elif search_key == 'UUID': + regexp = "UUID\-(.*)\-Admin" + flag = True + elif search_key == 'Policy': + regexp = "Policies\-(.*)\-Forwarding Mode" + flag = True + elif search_key == 'Subnet': + regexp = "Subnet(.*)Name" + flag = True + elif search_key == 'Host Route': + regexp = "Host Route\(s\)(.*)DNS" + flag = True + elif search_key == 'Adv Option': + regexp = "Shared.*Floating" + flag = False + elif search_key == 'DNS': + regexp = "DNS Server\(s\)(.*)Ecmp" + flag = False + elif search_key == 'FIP': + regexp = "Floating IP Pool\(s\)(.*)Route" + flag = False + elif search_key == 'RT': + regexp = "Route Target\(s\)(.*)Export" + flag = False + elif search_key == 'ERT': + regexp = "Export Route Target\(s\)(.*)Import" + flag = False + elif search_key == 'IRT': + regexp = "Import Route Target\(s\)(.*)" + flag = False + out = re.search(regexp,join_res) + if flag: + result = out.group(1) + else: + result = out.group(0) + return result + # get_vn_detail_ui + + def edit_remove_option(self, option, category, vn_name=None): + self.option = option + index = 0 + try: + if self.option == "Networks": + self.logger.info("Go to Configure->Networking->Networks page") + if not self.click_configure_networks(): + result = result and False + elif self.option == "Ports": + if not self.click_configure_ports(): + result = result and False + rows = self.get_rows(canvas=True) + if rows: + self.logger.info("%d rows are there under %s " % (len(rows),self.option)) + self.logger.info("%s are available to edit. Editing the %s" % (option,option)) + if vn_name: + for row in rows: + out = re.search(vn_name, str(row.text)) + index += 1 + if out: + break + else: + index = len(rows) + if len(rows): + self.wait_till_ajax_done(self.browser) + self.click_icon_cog(rows[index-1], self.browser, category, option) + else: + self.logger.error("No %s are available to edit" % (option)) + self.screenshot(option) + self.wait_till_ajax_done(self.browser) + result = index + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_remove_option + + def edit_vn_without_change(self): + result = True + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit') + if self.edit_vn_result: + try: + self.logger.info("Click on save button") + self.click_element('configure-networkbtn1') + except WebDriverException: + self.logger.error("Error while trying to save %s" %(option)) + result = result and False + self.screenshot(option) + self.click_on_cancel_if_failure('cancelBtn') + raise + else: + self.logger.error("Clicking the Edit Button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_vn_without_change + + def edit_vn_disp_name_change(self, vn_name): + result = True + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit') + if self.edit_vn_result: + self.click_element('display_name') + self.send_keys(vn_name, 'span12', 'class', clear=True) + self.click_element('configure-networkbtn1') + else: + self.logger.error("Clicking the Edit Button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_vn_disp_name_change + + def add_vn_with_policy(self,pol_name): + result = True + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit') + if self.edit_vn_result: + self.click_element('s2id_network_policy_refs_dropdown') + select_highlight = "//li[contains(@class,'select2-highlighted')]" + select = self.find_element(select_highlight, 'xpath') + pol_name = select.text + select.click() + self.click_element('configure-networkbtn1') + return pol_name + else: + self.logger.error("Clicking the Edit Button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # add_vn_with_policy + + def del_vn_with_policy(self,pol_name): + result = True + option = "Networks" + try: + policy_ui = str(self.get_vn_detail_ui('Policy')) + policy = pol_name.split(":") + out = re.search(policy[-1],policy_ui) + if out: + index = 1 + self.edit_vn_result = self.edit_remove_option(option, 'edit') + if self.edit_vn_result: + del_row = self.find_element('s2id_network_policy_refs_dropdown') + count = 0 + if index > 0: + close_option = "//a[contains(@class,'select2-search-choice-close')]" + for element in self.find_element(close_option, 'xpath', elements=True): + count = count + 1 + if count == index: + element.click() + self.logger.info("Policy got removed successfully") + self.click_element('configure-networkbtn1') + self.wait_till_ajax_done(self.browser) + else: + self.logger.warn("There is no policy to edit") + else: + self.logger.error("Clicking the edit button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # del_vn_with_policy + + def edit_vn_with_subnet(self, category, subnet, dfrange, dfgate, vn): + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit', vn_name=vn) + if self.edit_vn_result: + self.wait_till_ajax_done(self.browser) + self.click_element('ui-accordion-subnets-header-0') + self.wait_till_ajax_done(self.browser) + self.click_element('icon-plus', 'class') + data_row = "//tr[contains(@class,'data-row')]" + data = self.find_element(data_row, 'xpath', elements=True) + data_new = [] + for item in data: + if item == '': + pass + else: + data_new.append(item) + data_len = len(data_new) + ipam = self.find_element('s2id_user_created_ipam_fqn_dropdown', elements=True) + if data_len> 3 : + index = data_len-3 + elif data_len>1 or data_len <=3: + index = data_len-1 + else: + index = 0 + cidr_option = "//input[contains(@name,'user_created_cidr')]" + self.send_keys(subnet, cidr_option, 'xpath', clear=True, if_elements=[index]) + allocation_pool = "//textarea[contains(@name,'allocation_pools')]" + self.send_keys(dfrange, allocation_pool, 'xpath', clear=True, if_elements=[index]) + if category == 'Subnet': + default_gateway = "//input[contains(@name,'default_gateway')]" + self.send_keys(dfgate, default_gateway, 'xpath', \ + clear=True, if_elements=[index]) + elif category == 'Subnet-gate': + gateway_option = "//input[contains(@name,'user_created_enable_gateway')]" + self.click_element(gateway_option, 'xpath', elements=True, index=index) + elif category == 'Subnet-dns': + dns_option = "//input[contains(@name,'user_created_enable_dns')]" + self.click_element(dns_option, 'xpath', elements=True, index=index) + elif category == 'Subnet-dhcp': + dhcp_option = "//input[contains(@name,'enable_dhcp')]" + self.click_element(dhcp_option, 'xpath', elements=True, index=index) + self.click_element('configure-networkbtn1') + self.wait_till_ajax_done(self.browser) + result = self.edit_vn_result + else: + self.logger.error("Clicking the Edit Button is not working") + result = False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_vn_with_subnet + + def del_vn_with_subnet(self, vn): + result = True + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit', vn_name=vn) + if self.edit_vn_result: + self.click_element('ui-accordion-subnets-header-0') + self.wait_till_ajax_done(self.browser) + data_row = "//tr[contains(@class,'data-row')]" + data = self.find_element(data_row, 'xpath', elements=True) + ind = 0 + act_cell = self.find_element('action-cell', 'class') + minus_icon = "//i[contains(@class,'icon-minus')]" + self.click_element(minus_icon, 'xpath', elements=True, index=ind) + self.click_element('configure-networkbtn1') + self.wait_till_ajax_done(self.browser) + else: + self.logger.error("Clicking the Edit Button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # del_vn_with_subnet + + def edit_vn_with_host_route(self, button, tc, hprefix, hnexthop): + result = True + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit') + if self.edit_vn_result: + self.click_element('host_routes') + self.wait_till_ajax_done(self.browser) + if button == 'add': + edit_grid = "//a[contains(@class,'editable-grid-add-link')]" + add_link = self.find_element(edit_grid, 'xpath', elements=True) + add_link[1].click() + prefix = "//input[contains(@name,'prefix')]" + self.send_keys(hprefix, prefix, 'xpath') + next_hop = "//input[contains(@name,'next_hop')]" + self.send_keys(hnexthop, next_hop, 'xpath') + else: + minus_icon = "//i[contains(@class,'icon-minus')]" + minus = self.find_element(minus_icon, 'xpath', elements=True) + index = len(minus) + minus[index-1].click() + self.click_element('configure-networkbtn1') + self.wait_till_ajax_done(self.browser) + if tc == 'neg': + warn_button_host_route = "//span[contains(@data-bind,'hostRoutes')]" + warn_button = self.find_element(warn_button_host_route, 'xpath') + if warn_button.get_attribute('style') == "": + self.click_on_cancel_if_failure('cancelBtn') + self.wait_till_ajax_done(self.browser) + return result + else: + result = result and False + else: + self.logger.error("Clicking the Edit Button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_vn_with_host_route + + def edit_vn_with_adv_option(self, category, tc, var_list): + option = "Networks" + try: + self.wait_till_ajax_done(self.browser) + if not self.click_configure_networks(): + result = False + if category == 1: + add_icon = "//i[contains(@class,'icon-plus')]" + self.click_element(add_icon, 'xpath') + disp_name = "//input[contains(@name,'display_name')]" + self.send_keys(var_list[3], disp_name, 'xpath') + self.click_element('ui-accordion-subnets-header-0') + self.click_element("icon-plus", 'class') + cidr = "//input[contains(@name,'user_created_cidr')]" + self.send_keys(var_list[2], cidr, 'xpath') + self.wait_till_ajax_done(self.browser, wait=3) + self.click_element("configure-networkbtn1") + self.edit_vn_result = self.edit_remove_option(option, 'edit', vn_name=var_list[3]) + if self.edit_vn_result: + self.click_element('advanced_options') + is_shared = "//input[contains(@name,'is_shared')]" + check = self.click_element(is_shared, 'xpath') + router_external = "//input[contains(@name,'router_external')]" + self.click_element(router_external, 'xpath') + allow_transit = "//input[contains(@name,'allow_transit')]" + self.click_element(allow_transit, 'xpath') + unknown_unicast = "//input[contains(@name,'flood_unknown_unicast')]" + self.click_element(unknown_unicast, 'xpath') + service_chain = "//input[contains(@name,'multi_policy_service_chains_enabled')]" + self.click_element(service_chain, 'xpath') + ecmp_hash = "//div[contains(@id,'s2id_ecmp_hashing_include_fields_dropdown')]" + self.click_element(ecmp_hash, 'xpath') + select_highlight = "//li[contains(@class,'select2-highlighted')]" + self.click_element(select_highlight, 'xpath') + if tc == 'pos-phy': + self.click_element('s2id_route_table_refs_dropdown') + self.wait_till_ajax_done(self.browser, wait=3) + self.click_element(select_highlight, 'xpath') + sriov_option = "//input[contains(@name,'user_created_sriov_enabled')]" + self.click_element(sriov_option, 'xpath') + phy_network = "//input[contains(@name,'physical_network')]" + self.send_keys(var_list[1], phy_network, 'xpath') + seg_id = "//input[contains(@name,'segmentation_id')]" + self.send_keys(var_list[0], seg_id, 'xpath') + else: + phy_net = "//input[contains(@name,'physical_network')]" + self.send_keys(var_list[1], phy_net, 'xpath', clear=True) + seg_id = "//input[contains(@name,'segmentation_id')]" + self.send_keys(var_list[0], seg_id, 'xpath', clear=True) + self.click_element('configure-networkbtn1') + result = self.edit_vn_result + if tc == 'neg-phy': + warn_advance = "//span[contains(@data-bind,'advanced')]" + warn_button = self.find_element(warn_advance, 'xpath') + if warn_button.get_attribute('style') == "": + self.click_on_cancel_if_failure('cancelBtn') + self.wait_till_ajax_done(self.browser) + result = self.edit_vn_result + else: + result = False + else: + self.logger.error("Clicking the Edit Button is not working") + result = False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_vn_with_adv_option + + def edit_vn_with_dns(self, button, tc, dns_ip): + result = True + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit') + if self.edit_vn_result: + self.click_element('ui-accordion-dns_servers-header-0') + self.wait_till_ajax_done(self.browser, wait=3) + if button == 'add': + add_link = self.find_element('editable-grid-add-link', 'class', elements=True) + add_link[2].click() + ip_address = "//input[contains(@name,'ip_address')]" + text = self.find_element(ip_address, 'xpath') + if tc == 'pos': + text.send_keys(dns_ip) + else: + text.send_keys(dns_ip) + else: + minus_icon = "//i[contains(@class,'icon-minus')]" + minus = self.find_element(minus_icon, 'xpath', elements=True) + index = len(minus) + minus[index-1].click() + self.click_element('configure-networkbtn1') + self.wait_till_ajax_done(self.browser) + if tc == 'neg': + dns_server = "//span[contains(@data-bind,'dnsServers')]" + warn_button = self.find_element(dns_server, 'xpath') + if warn_button.get_attribute('style') == "": + self.click_on_cancel_if_failure('cancelBtn') + self.wait_till_ajax_done(self.browser) + return result + else: + result = result and False + else: + self.logger.error("Clicking the Edit Button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_vn_with_dns + + def edit_vn_with_fpool(self, button, fpool): + result = True + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit') + if self.edit_vn_result: + self.click_element('fip_pool_accordian') + self.wait_till_ajax_done(self.browser) + if button == 'add': + edit_grid = "//a[contains(@class,'editable-grid-add-link')]" + add_link = self.find_element(edit_grid, 'xpath', elements=True) + add_link[3].click() + self.wait_till_ajax_done(self.browser) + pool_name = "//input[contains(@placeholder,'Enter Pool Name')]" + self.send_keys(fpool, pool_name, 'xpath') + self.wait_till_ajax_done(self.browser) + self.click_element('s2id_projects_dropdown') + select_highlight = "//li[contains(@class,'select2-highlighted')]" + select = self.find_element(select_highlight, 'xpath') + self.project = select.text + select.click() + else: + minus_icon = "//i[contains(@class,'icon-minus')]" + minus = self.find_element(minus_icon, 'xpath', elements=True) + index = len(minus) + minus[index-1].click() + self.click_element('configure-networkbtn1') + self.wait_till_ajax_done(self.browser) + else: + self.logger.error("Clicking the Edit Button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_vn_with_fpool + + def edit_vn_with_route_target(self, button, tc, rt_type, asn_no_ip, target_no, count=0): + result = True + option = "Networks" + try: + self.edit_vn_result = self.edit_remove_option(option, 'edit') + self.wait_till_ajax_done(self.browser, wait=10) + if self.edit_vn_result: + if rt_type == 'RT': + self.click_element('route_target_accordian') + ind = 4 + elif rt_type == 'ERT': + self.click_element('export_route_target_accordian') + ind = 5 + elif rt_type == 'IRT': + if button == 'add': + self.wait_till_ajax_done(self.browser, wait=10) + imp_route_target = 'ui-accordion-import_route_target_accordian-header-0' + route = self.find_element(imp_route_target) + self.browser.execute_script( + "return arguments[0].scrollIntoView();", route) + route.click() + ind = 6 + self.wait_till_ajax_done(self.browser) + if count == 1: + route.click() + self.wait_till_ajax_done(self.browser) + if button == 'add': + add_link = self.find_element('editable-grid-add-link', 'class', \ + elements=True) + self.browser.execute_script( + "return arguments[0].scrollIntoView();", add_link[ind]) + add_link[ind].click() + self.wait_till_ajax_done(self.browser) + self.send_keys(asn_no_ip, 'asn', 'name') + self.send_keys(target_no, 'target', 'name') + else: + if rt_type == 'IRT': + self.click_element('import_route_target_accordian') + self.wait_till_ajax_done(self.browser) + imp_route_target_vcfg = "//div[contains(@id,'import_route_target_vcfg')]" + irt = self.find_element(imp_route_target_vcfg, 'xpath', elements=True) + user_imp_route_target = "//div[contains(@id, \ + 'user_created_import_route_targets')]" + user_irt = self.find_element(user_imp_route_target, 'xpath', elements=True) + minus_icon = "//i[contains(@class,'icon-minus')]" + minus = self.find_element(minus_icon, 'xpath', elements=True) + index = len(minus) - 1 + minus[index].click() + self.click_element('configure-networkbtn1') + self.wait_till_ajax_done(self.browser, wait=10) + if tc == 'neg': + if rt_type == 'RT': + warn_button_rt = "//span[contains(@data-bind,'route_target_vcfg')]" + warn_button = self.find_element(warn_button_rt, 'xpath') + elif rt_type == 'ERT': + warn_button_ert = "//span[contains(@data-bind,'export_route_target_vcfg')]" + warn_button = self.find_element(warn_button_ert, 'xpath') + elif rt_type == 'IRT': + warn_button_irt = "//span[contains(@data-bind,'import_route_target_vcfg')]" + warn_button = self.find_element(warn_button_irt, 'xpath') + if warn_button.get_attribute('style') == "": + self.click_on_cancel_if_failure('cancelBtn') + self.wait_till_ajax_done(self.browser) + return result + else: + result = result and False + else: + self.logger.error("Clicking the Edit Button is not working") + result = result and False + + except WebDriverException: + self.logger.error("Error while trying to edit %s" % (option)) + self.screenshot(option) + result = result and False + self.click_on_cancel_if_failure('cancelBtn') + raise + return result + # edit_vn_with_route_target diff --git a/serial_scripts/webui/webui_topology.py b/serial_scripts/webui/webui_topology.py index d58c33ef7..64d03eb0c 100644 --- a/serial_scripts/webui/webui_topology.py +++ b/serial_scripts/webui/webui_topology.py @@ -7,8 +7,8 @@ def __init__( self, domain='default-domain', project='admin', - username=None, - password=None): + username='admin', + password='contrail123'): # # Domain and project defaults: Do not change until support for # non-default is tested! @@ -342,6 +342,35 @@ def __init__( 'src_ports': [3, 4]}] + self.subnet_edit = "20.20.20.0" + self.subnet_adv_option = "20.20.20.0/24" + self.asn_ip = "20.20.20.3" + self.invalid_asn_ip = "20,20,20,3" + self.asn_num = "65534" + self.invalid_asn_num = "65534ab" + self.target_num = "4294967295" + self.invalid_target_num = "4294967295abc" + self.mask = "24" + self.subnet_sip = "20.20.20.5" + self.subnet_eip = "20.20.20.20" + self.subnet_dns_ip = "20.20.20.2" + self.subnet_gate_ip = "20.20.20.1" + self.subnet_default_gate_ip = "0.0.0.0" + self.host_prefix = "1.1.1.1/24" + self.host_nexthop = "2.2.2.2" + self.phy_net = "phy1" + self.vlan_id = "4094" + self.fpool = "pool1" + self.invalid_vlan_id = "4094abcd49494" + self.dns_ip = "1.1.1.1" + self.invalid_dns_ip = "1,1,1,1" + self.vn_disp_name = "vn-test" + self.vn_disp_name_spl_char = "vn1~`!@#$%^&*()_+}{|:\"?><,./;\'[]\=-" + self.vn_disp_name_spl_char_ops = "vn1~`!@#$%^&*()_+}{|:\\\"?><,./;\'[]\\\\=-" + self.vn_name_beg_spl_char = "~!@#$%^&*()_+|}{\"?><,./;\'[]\=-`vn1" + self.vn_name_end_spl_char = "vn1~!@#$%^&*()_+|}{\"?><,./;\'[]\=-`" + self.vn_name_mid_spl_char = "vn1~!@#$%^&*()_+|}{\"?><,./;\'[]\=-`vn1" + self.port_name = 'port1' # end __init__ if __name__ == '__main__': diff --git a/serial_scripts/xmpp/__init__.py b/serial_scripts/xmpp/__init__.py new file mode 100644 index 000000000..e869f55d0 --- /dev/null +++ b/serial_scripts/xmpp/__init__.py @@ -0,0 +1 @@ +"Test XMPP auth" diff --git a/serial_scripts/xmpp/base.py b/serial_scripts/xmpp/base.py new file mode 100644 index 000000000..616463596 --- /dev/null +++ b/serial_scripts/xmpp/base.py @@ -0,0 +1,259 @@ +import test_v1 +from vn_test import MultipleVNFixture +from vnc_api.vnc_api import * +from vm_test import MultipleVMFixture +from fabric.api import run, hide, settings +from vn_test import VNFixture +from vm_test import VMFixture +from policy_test import PolicyFixture +from policy_test import PolicyFixture +from common.policy.config import ConfigPolicy +import os +import re +from physical_router_fixture import PhysicalRouterFixture +from time import sleep +from tcutils.verification_util import * +from tcutils.contrail_status_check import * + + +class XmppBase(test_v1.BaseTestCase_v1, ConfigPolicy): + + @classmethod + def setUpClass(cls): + super(XmppBase, cls).setUpClass() + cls.quantum_h = cls.connections.quantum_h + cls.nova_h = cls.connections.nova_h + cls.vnc_lib = cls.connections.vnc_lib + cls.agent_inspect = cls.connections.agent_inspect + cls.cn_inspect = cls.connections.cn_inspect + cls.analytics_obj = cls.connections.analytics_obj + # end setUpClass + + @classmethod + def tearDownClass(cls): + super(XmppBase, cls).tearDownClass() + # end tearDownClass + + def setUp(self): + super(XmppBase, self).setUp() + + def tearDown(self): + super(XmppBase, self).tearDown() + + def config_basic(self): + vn61_name = "test_vnv6sr" + vn61_net = ['2001::101:0/120'] + vn61_fixture = self.useFixture(VNFixture( + project_name=self.inputs.project_name, connections=self.connections, + vn_name=vn61_name, inputs=self.inputs, subnets=vn61_net)) + vn62_name = "test_vnv6dn" + vn62_net = ['2001::201:0/120'] + vn62_fixture = self.useFixture(VNFixture( + project_name=self.inputs.project_name, connections=self.connections, + vn_name=vn62_name, inputs=self.inputs, subnets=vn62_net)) + vm61_name = 'source_vm' + vm62_name = 'dest_vm' + vm61_fixture = self.useFixture(VMFixture( + project_name=self.inputs.project_name, connections=self.connections, + vn_obj=vn61_fixture.obj, vm_name=vm61_name, node_name=None, + image_name='cirros', flavor='m1.tiny')) + + vm62_fixture = self.useFixture(VMFixture( + project_name=self.inputs.project_name, connections=self.connections, + vn_obj=vn62_fixture.obj, vm_name=vm62_name, node_name=None, + image_name='cirros', flavor='m1.tiny')) + vm61_fixture.wait_till_vm_is_up() + vm62_fixture.wait_till_vm_is_up() + + rule = [ + { + 'direction': '<>', + 'protocol': 'any', + 'source_network': vn61_name, + 'src_ports': [0, -1], + 'dest_network': vn62_name, + 'dst_ports': [0, -1], + 'simple_action': 'pass', + }, + ] + policy_name = 'allow_all' + policy_fixture = self.config_policy(policy_name, rule) + + vn61_policy_fix = self.attach_policy_to_vn( + policy_fixture, vn61_fixture) + vn62_policy_fix = self.attach_policy_to_vn( + policy_fixture, vn62_fixture) + + vn1 = "vn1" + vn2 = "vn2" + vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']} + rules = [ + { + 'direction': '<>', + 'protocol': 'any', + 'source_network': vn1, + 'src_ports': [0, -1], + 'dest_network': vn2, + 'dst_ports': [0, -1], + 'simple_action': 'pass', + }, + ] + + self.logger.info("Configure the policy with allow any") + self.multi_vn_fixture = self.useFixture(MultipleVNFixture( + connections=self.connections, inputs=self.inputs, subnet_count=2, + vn_name_net=vn_s, project_name=self.inputs.project_name)) + vns = self.multi_vn_fixture.get_all_fixture_obj() + (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0] + (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1] + self.config_policy_and_attach_to_vn(rules) + + self.multi_vm_fixture = self.useFixture(MultipleVMFixture( + project_name=self.inputs.project_name, connections=self.connections, + vm_count_per_vn=1, vn_objs=vns, image_name='cirros', + flavor='m1.tiny')) + vms = self.multi_vm_fixture.get_all_fixture() + (self.vm1_name, self.vm1_fix) = vms[0] + (self.vm2_name, self.vm2_fix) = vms[1] + + def config_policy_and_attach_to_vn(self, rules): + randomname = get_random_name() + policy_name = "sec_grp_policy_" + randomname + policy_fix = self.config_policy(policy_name, rules) + policy_vn1_attach_fix = self.attach_policy_to_vn( + policy_fix, self.vn1_fix) + policy_vn2_attach_fix = self.attach_policy_to_vn( + policy_fix, self.vn2_fix) + + def enable_auth_on_cluster(self): + for node in self.inputs.bgp_control_ips: + self.update_contrail_conf( + conf_file='/etc/contrail/contrail-control.conf', + operation='set', + section='DEFAULT', + knob='xmpp_auth_enable', + value='true', + node=node, + service='supervisor-control') + for node in self.inputs.compute_ips: + self.update_contrail_conf( + conf_file='/etc/contrail/contrail-vrouter-agent.conf', + operation='set', + section='DEFAULT', + knob='xmpp_auth_enable', + value='true', + node=node, + service='supervisor-vrouter') + + def update_contrail_conf( + self, + conf_file, + operation, + section, + knob, + node, + service, + value=None): + + if operation == 'del': + cmd = 'openstack-config --del %s %s %s' % (conf_file, + section, knob) + xmpp_status = self.inputs.run_cmd_on_server(node, cmd) + if operation == 'set': + cmd = 'openstack-config --set %s %s %s %s' % (conf_file, + section, knob, value) + xmpp_status = self.inputs.run_cmd_on_server(node, cmd) + self.inputs.restart_service(service, [node]) + cluster_status, error_nodes = ContrailStatusChecker( + ).wait_till_contrail_cluster_stable() + assert cluster_status, 'Hash of error nodes and services : %s' % ( + error_nodes) + + def check_xmpp_status(self, node): + + result = True + self.cn_inspect = self.connections.cn_inspect + for index in range(6): + try: + xmpp_match = re.findall( + "XMPP", + str(self.cn_inspect[node].get_cn_bgp_neigh_entry())) + if len(xmpp_match) > len(self.inputs.compute_ips): + break + else: + sleep(5) + except: + sleep(5) + + table_list = self.cn_inspect[node].get_cn_bgp_neigh_entry() + if isinstance(table_list, dict): + dict_item = table_list + table_list = [] + table_list.append(dict_item) + for item in range(len(table_list)): + if table_list[item]['encoding'] == 'XMPP': + if table_list[item]['peer_address'] in self.inputs.compute_ips: + if not 'Established' in table_list[item]['state']: + self.logger.error( + "Node %s has a problem with XMPP status. Status is %s" % + (table_list[item]['peer_address'], table_list[item]['state'])) + result = False + return result + + def check_if_xmpp_connections_present(self, node): + + self.cn_inspect = self.connections.cn_inspect + for index in range(6): + try: + xmpp_match = re.findall( + "XMPP", + str(self.cn_inspect[node].get_cn_bgp_neigh_entry())) + if len(xmpp_match) > len(self.inputs.compute_ips): + break + else: + sleep(5) + except: + sleep(5) + + table_list = self.cn_inspect[node].get_cn_bgp_neigh_entry() + if isinstance(table_list, dict): + dict_item = table_list + table_list = [] + table_list.append(dict_item) + + for item in range(len(table_list)): + if "encoding" in table_list[item]: + if table_list[item]['encoding'] == 'XMPP': + if table_list[item]['peer_address'] in self.inputs.compute_ips: + return True + return False + + def check_if_xmpp_auth_enabled(self, node, status='TLS'): + result = True + self.cn_inspect = self.connections.cn_inspect + table_list = self.cn_inspect[node].get_cn_bgp_neigh_entry() + if isinstance(table_list, dict): + dict_item = table_list + table_list = [] + table_list.append(dict_item) + + for item in range(len(table_list)): + if table_list[item]['encoding'] == 'XMPP': + if table_list[item]['peer_address'] in self.inputs.compute_ips: + if not status in table_list[item]['auth_type']: + self.logger.error( + "Node %s has a problem with XMPP auth status. Auth status is %s" % + (table_list[item]['peer_address'], table_list[item]['auth_type'])) + result = False + + return result + + def check_if_cluster_has_xmpp(self): + + result = False + for node in self.inputs.bgp_control_ips: + if self.check_if_xmpp_connections_present(node): + result = True + return result + +# end class XmppBase diff --git a/serial_scripts/xmpp/test_xmpp_auth.py b/serial_scripts/xmpp/test_xmpp_auth.py new file mode 100644 index 000000000..dea22f5f6 --- /dev/null +++ b/serial_scripts/xmpp/test_xmpp_auth.py @@ -0,0 +1,200 @@ +from tcutils.wrappers import preposttest_wrapper +from vn_test import MultipleVNFixture +from vm_test import MultipleVMFixture +from base import XmppBase +from common.policy.config import ConfigPolicy +from vn_test import VNFixture +from vm_test import VMFixture +import os +import sys +import test +from tcutils.contrail_status_check import * + + +class TestXmpptests(XmppBase, ConfigPolicy): + + @classmethod + def setUpClass(cls): + super(TestXmpptests, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestXmpptests, cls).tearDownClass() + + def is_test_applicable(self): + try: + assert self.inputs.xmpp_auth_enable + assert self.inputs.xmpp_dns_auth_enable + if not (self.inputs.xmpp_auth_enable and self.inputs.xmpp_dns_auth_enable): + return (False, 'Xmpp auth should be set before running tests') + except: + return (False, 'Xmpp auth should be set before running tests') + + return (True, None) + + def setUp(self): + super(TestXmpptests, self).setUp() + result = self.is_test_applicable() + if result[0]: + self.config_basic() + else: + return + + @test.attr(type=['sm_sanity']) + @preposttest_wrapper + def test_precedence_xmpp_auth(self): + """ + Precedence in XMPP auth is that server auth takes precedence over agent auth. To check this, + undo xmpp auth on server side and check if XMPP is up and it shows no "TLS" string in introspect. + """ + # Have to add cleanup here before entering for loop to disable auth as + # there are asserts in the loop + self.addCleanup(self.enable_auth_on_cluster) + for node in self.inputs.bgp_control_ips: + self.update_contrail_conf( + conf_file='/etc/contrail/contrail-control.conf', + operation='del', + section='DEFAULT', + knob='xmpp_auth_enable', + node=node, + service='supervisor-control') + assert (self.check_xmpp_status(node) + ), "XMPP between nodes not up after deleting xmpp auth" + assert (self.check_if_xmpp_auth_enabled(node, 'NIL') + ), "Xmpp auth still set after disabling it on server side" + assert (self.check_if_cluster_has_xmpp), "XMPP connections not found" + for node in self.inputs.bgp_control_ips: + self.update_contrail_conf( + conf_file='/etc/contrail/contrail-control.conf', + operation='set', + section='DEFAULT', + knob='xmpp_auth_enable', + value='true', + node=node, + service='supervisor-control') + assert (self.check_xmpp_status(node) + ), "XMPP between nodes not up after adding back xmpp auth" + assert (self.check_if_xmpp_auth_enabled(node) + ), "Xmpp auth not set after enabling it on server side" + assert (self.check_if_cluster_has_xmpp), "XMPP connections not found" + return True + + # end test_precedence_xmpp_auth + + @test.attr(type=['sm_sanity']) + @preposttest_wrapper + def test_undo_xmpp_auth(self): + """ + Check if undoing setting and deleting xmpp auth creates any issues. + Also confirm if introspect reflects the changes as and when they are done + """ + for node in self.inputs.bgp_control_ips: + self.update_contrail_conf( + conf_file='/etc/contrail/contrail-control.conf', + operation='del', + section='DEFAULT', + knob='xmpp_auth_enable', + node=node, + service='supervisor-control') + # adding cleanup before assert + self.addCleanup(self.enable_auth_on_cluster) + assert (self.check_xmpp_status(node) + ), "XMPP between nodes not up after deleting xmpp auth" + assert (self.check_if_cluster_has_xmpp), "XMPP connections not found" + assert (self.check_if_xmpp_auth_enabled(node, 'NIL') + ), "Xmpp auth still set after disabling it on server side" + for node in self.inputs.bgp_control_ips: + self.update_contrail_conf( + conf_file='/etc/contrail/contrail-control.conf', + operation='set', + section='DEFAULT', + knob='xmpp_auth_enable', + value='true', + node=node, + service='supervisor-control') + assert (self.check_xmpp_status(node) + ), "XMPP between nodes not up after adding back xmpp auth" + assert (self.check_if_cluster_has_xmpp), "XMPP connections not found" + assert (self.check_if_xmpp_auth_enabled(node) + ), "Xmpp auth not set after enabling it on server side" + return True + # end test_undo_xmpp_auth + + @test.attr(type=['sm_sanity']) + @preposttest_wrapper + def test_compute_negative_xmpp_auth(self): + """ + Configure xmpp auth only on agent side. Because of server precedence over agent, XMPP should go down. + Reconfigure auth back and confirm if things go back to normal. + """ + + for node in self.inputs.compute_ips: + self.update_contrail_conf( + conf_file='/etc/contrail/contrail-vrouter-agent.conf', + operation='del', + section='DEFAULT', + knob='xmpp_auth_enable', + node=node, + service='supervisor-vrouter') + # adding cleanup before assert + self.addCleanup(self.enable_auth_on_cluster) + for node in self.inputs.bgp_control_ips: + assert (not (self.check_if_xmpp_connections_present(node)) + ), "XMPP between nodes should not be up after deleting xmpp auth on agent side" + assert (not (self.check_if_cluster_has_xmpp()) + ), "XMPP connections should not be found" + + for node in self.inputs.compute_ips: + self.update_contrail_conf( + conf_file='/etc/contrail/contrail-vrouter-agent.conf', + operation='set', + section='DEFAULT', + knob='xmpp_auth_enable', + value='true', + node=node, + service='supervisor-vrouter') + for node in self.inputs.bgp_control_ips: + assert (self.check_xmpp_status(node) + ), "XMPP between nodes not up after adding back xmpp auth" + assert (self.check_if_xmpp_auth_enabled(node) + ), "Xmpp auth not set after enabling it on agent side" + assert (self.check_if_cluster_has_xmpp), "XMPP connections not found" + return True + # end test_compute_negative_xmpp_auth + + @preposttest_wrapper + def test_restart_services_xmpp_auth(self): + """ + Configure xmpp auth and restart control and vrouter services for 10 iterations. + Cluster status is checked after each restart iteration. XMPP status is check after + all iterations are done. + """ + + # adding cleanup here before for loop as the loop has assert + self.addCleanup(self.enable_auth_on_cluster) + for i in range(1, 10): + for node in self.inputs.compute_ips: + self.inputs.restart_service('supervisor-vrouter', [node]) + cluster_status, error_nodes = ContrailStatusChecker( + ).wait_till_contrail_cluster_stable(nodes=[node]) + assert cluster_status, 'Hash of error nodes and services : %s' % ( + error_nodes) + for node in self.inputs.bgp_control_ips: + assert (self.check_xmpp_status(node)), "XMPP between nodes not up" + assert (self.check_if_xmpp_auth_enabled(node)), "Xmpp auth not set" + assert (self.check_if_cluster_has_xmpp), "XMPP connections not found" + for i in range(1, 10): + for node in self.inputs.bgp_control_ips: + self.inputs.restart_service('supervisor-control', [node]) + cluster_status, error_nodes = ContrailStatusChecker( + ).wait_till_contrail_cluster_stable(nodes=[node]) + assert cluster_status, 'Hash of error nodes and services : %s' % ( + error_nodes) + for node in self.inputs.bgp_control_ips: + assert (self.check_xmpp_status(node)), "XMPP between nodes not up" + assert (self.check_if_xmpp_auth_enabled(node)), "Xmpp auth not set" + assert (self.check_if_cluster_has_xmpp), "XMPP connections not found" + return True + # end test_restart_services_xmpp_auth + +# end class Xmpptests diff --git a/tcutils/Process.py b/tcutils/Process.py deleted file mode 100644 index 3860b8e8c..000000000 --- a/tcutils/Process.py +++ /dev/null @@ -1,130 +0,0 @@ -from multiprocessing import TimeoutError, Pool -from copy_reg import pickle -import threading -import marshal -import thread -import types -import sys -from common import log_orig as logging -LOG = logging.getLogger(__name__) - -def wrapper(func): - ''' Decorator to create n tasks - Optional: - :param max_process: No of concurrent processes to create to handle the tcount tasks (default 30) - :param tcount : No of tasks to create if less than 1 run the task in the current process context (default 1) - :param timeout : Max wait time in secs for the task to complete execution (default 600s) - :param args_list : list of args for each task (default: same args is passed to each task) - :param kwargs_list : list of kwargs for each task (default: same kwargs is passed to each task) - ''' - def inner(*args, **kwargs): - if kwargs.get('tcount', 1) > 1 and kwargs.get('max_process', 30) > 1: - return multi_process(func, *args, **kwargs) - else: - kwargs.pop('tcount', None) - kwargs.pop('max_process', None) - return func(*args, **kwargs) - return inner - -def _pickle_method(method): - func_name = method.im_func.__name__ - obj = method.im_self - cls = method.im_class - return _unpickle_method, (func_name, obj, cls) -def _unpickle_method(func_name, obj, cls): - for cls in cls.mro(): - try: - func = cls.__dict__[func_name] - except KeyError: - pass - else: - break - return func.__get__(obj, cls) -pickle(types.MethodType, _pickle_method, _unpickle_method) - -lock = dict() -def get_lock(key): - global lock - if key not in lock.keys(): - lock[key] = threading.Lock() - return lock[key] -def _pickle_lock(lock): - return _unpickle_lock, (lock.__hash__(),) -def _unpickle_lock(key): - return get_lock(key) -pickle(thread.LockType, _pickle_lock, _unpickle_lock) - -def _pickle_file(fobj): - return _unpickle_file, (fobj.name, fobj.mode) -def _unpickle_file(name, mode): - if '/' in name: - return open(name, mode) - if 'stdout' in name: - return sys.stdout - elif 'stderr' in name: - return sys.stderr - elif 'stdin' in name: - return sys.stdin -pickle(types.FileType, _pickle_file, _unpickle_file) - -def _pickle_func(func): - fn_glob = dict() - modules = dict() - supported_types = [v for k, v in types.__dict__.iteritems() - if k.endswith('Type')] - for k,v in func.func_globals.iteritems(): - if type(v) in supported_types: - fn_glob[k] = v - if type(v) == types.ModuleType: - modules.update({k: v.__name__}) - del fn_glob[k] - return _unpickle_func, (marshal.dumps(func.func_code), fn_glob, modules, - func.func_name, func.func_defaults, - func.func_closure, func.func_dict) - -def _unpickle_func(code_string, fn_glob, modules, func_name, - func_defaults, func_closure, func_dict): - code = marshal.loads(code_string) - for k,v in modules.iteritems(): - fn_glob.update({k: __import__(v)}) - fn = types.FunctionType(code, fn_glob, func_name, - func_defaults, func_closure) - fn.__dict__.update(func_dict) - return fn -pickle(types.FunctionType, _pickle_func, _unpickle_func) - - -def multi_process(target, *args, **kwargs): - count = kwargs.pop('tcount', 0) - timeout = kwargs.pop('timeout', 600) - max_process = kwargs.pop('max_process', 30) - kwargs_list = kwargs.pop('kwargs_list', None) - args_list = kwargs.pop('args_list', None) - - n_instances = int(count) if count else 1 - if not kwargs_list: - kwargs_list = [kwargs for i in range(n_instances)] - if not args_list: - args_list = [args for i in range(n_instances)] - - pool = Pool(int(max_process)) - results = [pool.apply_async(target, args_list[i], kwargs_list[i]) for i in range(n_instances)] - pool.close() # Close the pool so no more creation of new tasks - - res = list() - for result in results: - try: - res.append(result.get(timeout=timeout)) - except TimeoutError as e: - LOG.logger.error('Task overrun %d secs and timedout'%timeout) - print 'Task overrun %d secs and timedout'%timeout - except Exception as e: - LOG.logger.error('Exception in a task: %s %s'%(type(e).__name__, str(e))) - print 'Exception in a task:', type(e).__name__, str(e) - pool.terminate() # Terminate the pool to delete the task overrun processes - pool.join() - if len(res) != n_instances: - raise Exception('Exception observed in some of the processes') - elif int(count) == 0: - return res[0] - return res diff --git a/tcutils/__init__.py b/tcutils/__init__.py deleted file mode 100644 index 12c52538c..000000000 --- a/tcutils/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Module to hold the test case related utilites. -""" -import platform - -from fabric.api import local - -def get_release(pkg='contrail-install-packages'): - pkg_ver = None - dist = platform.dist()[0] - if dist in ['centos', 'fedora', 'redhat']: - cmd = "rpm -q --queryformat '%%{VERSION}' %s" %pkg - elif dist in ['Ubuntu']: - cmd = "dpkg -p %s | grep Version: | cut -d' ' -f2 | cut -d'-' -f1" %pkg - pkg_ver = local(cmd, capture=True) - if 'is not installed' in pkg_ver or 'is not available' in pkg_ver: - print "Package %s not installed." % pkg - return None - return pkg_ver diff --git a/tcutils/agent/__init__.py b/tcutils/agent/__init__.py deleted file mode 100644 index 2ee9e75fa..000000000 --- a/tcutils/agent/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Agent Utils""" diff --git a/tcutils/agent/vna_introspect_utils.py b/tcutils/agent/vna_introspect_utils.py deleted file mode 100755 index af590dce7..000000000 --- a/tcutils/agent/vna_introspect_utils.py +++ /dev/null @@ -1,685 +0,0 @@ -import cgitb -cgitb.enable(format='text') - -import logging as LOG - -from tcutils.verification_util import * -from vna_results import * -import re -from netaddr import * -from tcutils.util import is_v6 - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - - -class AgentInspect (VerificationUtilBase): - - def __init__(self, ip, logger=LOG): - super(AgentInspect, self).__init__(ip, 8085, XmlDrv, logger=logger) - - def get_vna_domain(self, domain='default-domain'): - pass - - def get_vna_project(self, domain='default-domain', project='admin'): - pass - - def get_vna_ipam(self, domain='default-domain', project='admin', ipam='default-network-ipam'): - pass - - def get_vna_policy(self, domain='default-domain', project='admin', policy='default-network-policy'): - pass - - def get_vna_vn_list(self, domain='default-domain', project='admin'): - ''' - method: get_vna_vn_list returns a list - returns None if not found, a dict w/ attrib. eg: - - ''' - vnl = self.dict_get('Snh_VnListReq?name=') - avn = vnl.xpath('./VnListResp/vn_list/list/VnSandeshData') or \ - vnl.xpath('./vn_list/list/VnSandeshData') - l = [] - for v in avn: - p = {} - for e in v: - p[e.tag] = e.text - l.append(p) - return VnaVnListResult({'VNs': l}) - - def get_vna_vm_list(self, domain='default-domain', project='admin'): - ''' - method: get_vna_vm_list returns a list - returns None if not found, a dict w/ attrib. eg: - - ''' - vnl = self.dict_get('Snh_VmListReq?uuid=') - avn = vnl.xpath('./VmListResp/vm_list/list/VmSandeshData/uuid') or \ - vnl.xpath('./vm_list/list/VmSandeshData/uuid') - l = [] - for v in avn: - l.append(v.text) - # return VnaVnListResult ({'VNs': l}) - return l - - def get_vna_vn(self, domain='default-domain', project='admin', - vn_name='default-virtual-network'): - ''' - method: get_vna_vn finds a vn - returns None if not found, a dict w/ attrib. eg: - Sample : {'name': 'default-domain:admin:vn222', 'mirror_acl_uuid': None, 'acl_uuid': None, 'vrf_name': 'default-domain:admin:vn222:vn222', 'mirror_cfg_acl_uuid': None, 'ipam_data': None, 'uuid': '43c92a36-89fa-4e3e-b89f-702cdddd33ea'} - - ''' - p = None - vn_fq_name = ':'.join((domain, project, vn_name)) - vnl = self.dict_get('Snh_VnListReq?name=%s' %vn_fq_name) - vns = vnl.xpath('./VnListResp/vn_list/list/VnSandeshData') or \ - vnl.xpath('./vn_list/list/VnSandeshData') - for vn in vns: - if vn.find('name').text in vn_fq_name: - p = VnaVnResult() - for e in vn: - p[e.tag] = e.text - return p - - def get_vna_acl_by_vn(self, - fq_vn_name='default-domain:admin:default-virtual-network'): - ''' - method: get_vna_acl_by_vn finds acl of the vn - returns None if not found, a dict w/ attrib. eg: - - ''' - p = None - vn = self.get_vna_vn(*fq_vn_name.split(':')) - if vn and vn.acl(): - dict_resp = self.dict_get('Snh_AclReq?x=' + vn.acl()) - vnl = dict_resp.xpath('./AclResp/acl_list/list/AclSandeshData') or \ - dict_resp.xpath('./acl_list/list/AclSandeshData') - if 1 == len(vnl): - p = VnaACLResult() - for e in vnl[0]: - if e.tag == 'entries': - p[e.tag] = [] - for ae in e.xpath('./list/AclEntrySandeshData'): - ace = {} - p[e.tag].append(ace) - for c in ae: - if c.tag in ('src_port_l', 'dst_port_l', 'proto_l'): - ace[c.tag] = {} - # Validate data before looking for list - # elements as port_list is Not Available if - # protocol is ICMP - cdata = c.xpath('./list/SandeshRange') - if cdata == []: - ace[c.tag] = 'NA' - else: - for pl in cdata[0]: - ace[c.tag][pl.tag] = pl.text - elif c.tag in ('action_l', ): - ace[c.tag] = map(lambda x: x.text, - c.xpath('./list/ActionStr/action')) - else: - ace[c.tag] = c.text - else: - p[e.tag] = e.text - return p - - def get_vna_flow_by_vn(self, - fq_vn_name='default-domain:admin:default-virtual-network'): - ''' - method: get_vna_flow_by_vn finds acl of the vn - returns None if not found, a dict w/ attrib. eg: - - ''' - p = None - vn = self.get_vna_vn(*fq_vn_name.split(':')) - if vn.acl() == None: - err_msg = "ERROR: VN under test has no policy associated in Agent - %s" % vn - return err_msg - if vn and vn.acl(): - vnl = self.dict_get('Snh_AclFlowReq?uuid=' + vn.acl()) - vnl = vnl.xpath('./AclFlowResp') or vnl - if vnl: - p = VnaFlowResult() - for e in vnl: - if e.tag == 'flow_entries': - p[e.tag] = [] - for ae in e.xpath('./list/FlowSandeshData'): - # Dump following info for debugging in case of - # failure - chk_keys = ['ace_l', 'src', 'dst', 'source_vn', - 'dest_vn', 'protocol', 'action_l', 'flow_uuid'] - for i in ae.getchildren(): - if i.tag == 'dest_vn' and i.text == '__UNKNOWN__': - err_msg = "ERROR: Route import issue seen in agent, check log for failed flow" - # print flow data for debugging.. - for k in chk_keys: - if k == i.tag: - self.log.info( - "flow: %s, -->, %s" % - (i.tag, i.text)) - break - return err_msg - ace = {} - p[e.tag].append(ace) - for c in ae: - if c.tag in ('ace_l', ): - ace[c.tag] = {} - ace_id_data = c.xpath('./list/AceId') - if ace_id_data == []: - ace[c.tag] = [] - else: - for pl in ace_id_data[0]: - ace[c.tag][pl.tag] = pl.text - elif c.tag in ('action_l', ): - ace[c.tag] = map(lambda x: x.text, - c.xpath('./list/ActionStr/action')) - else: - ace[c.tag] = c.text - elif e.tag == 'aceid_cnt_list': - p[e.tag] = [] - for ae in e.xpath('./list/AceIdFlowCnt'): - ace = {} - p[e.tag].append(ace) - for c in ae: - ace[c.tag] = c.text - else: - p[e.tag] = e.text - - return p - - def get_vna_pkt_agentstatsreq(self): - '''returns output of http://10.204.216.15:8085/Snh_AgentStatsReq? - {'XmppStatsResp': {'xmpp_out_msgs': '62', 'xmpp_reconnect': '4', 'xmpp_in_msgs': '20', 'more': 'true'}, 'PktTrapStatsResp': {'invalid_agent_hdr': '0', 'invalid_interface': '8', 'exceptions': '3937', 'pkt_dropped': '8', 'no_handler': '8', 'more': 'true'}, 'IpcStatsResp': {'ipc_in_msgs': '0', 'ipc_out_msgs': '0', 'more': 'true'}, 'FlowStatsResp': {'flow_aged': '44', 'flow_denied': '0', 'flow_duplicate': '0', 'flow_allowed': '48', 'flow_active': '4', 'more': 'true'}, 'SandeshStatsResp': {'sandesh_out_msgs': '0', 'sandesh_reconnects': '0', 'sandesh_http_sessions': '0', 'sandesh_in_msgs': '0', 'more': 'false'}}''' - - stat_dct = {} - stats = self.dict_get('Snh_AgentStatsReq?') - for elem in stats.getchildren(): - dct = {} - k = elem.tag - for e in elem: - k1 = e.tag - v1 = e.text - d = {k1: v1} - dct.update(d) - d1 = {k: dct} - stat_dct.update(d1) - return stat_dct - - def get_vna_fetchallflowrecords(self): - '''returns out from http://10.204.216.15:8085/Snh_FetchAllFlowRecords? - return a list of all the flow records as below: - [{'protocol': '1', 'stats_bytes': '222180', 'stats_packets': '2645', 'setup_time_utc': '1371254131073195', 'sip': '1.1.1.253', 'src_port': '0', 'uuid': '3a95eaa5-87e5-4b37-a49a-15a406db8356', 'nat': 'disabled', 'mirror_port': '0', 'direction': 'ingress', 'implicit_deny': 'no', 'refcount': '4', 'setup_time': '2013-Jun-14 23:55:31.073195', 'vrf': '1', 'dest_vrf': '0', 'interface_idx': '3', 'flow_handle': '54518', 'dst_port': '0', 'action': '32', 'short_flow': 'no', 'dip': '2.1.1.253', 'mirror_ip': '0.0.0.0'}, {'protocol': '1', 'stats_bytes': '222180', 'stats_packets': '2645', 'setup_time_utc': '1371254131065594', 'sip': '2.1.1.253', 'src_port': '0', 'uuid': '2ea64aa3-d716-407e-acf6-54c81027c042', 'nat': 'disabled', 'mirror_port': '0', 'direction': 'ingress', 'implicit_deny': 'no', 'refcount': '4', 'setup_time': '2013-Jun-14 23:55:31.065594', 'vrf': '2', 'dest_vrf': '0', 'interface_idx': '4', 'flow_handle': '25755', 'dst_port': '0', 'action': '32', 'short_flow': 'no', 'dip': '1.1.1.253', 'mirror_ip': '0.0.0.0'}] - -l[0]={'protocol': '1', 'stats_bytes': '222180', 'stats_packets': '2645', 'setup_time_utc': '1371254131073195', 'sip': '1.1.1.253', 'src_port': '0', 'uuid': '3a95eaa5-87e5-4b37-a49a-15a406db8356', 'nat': 'disabled', 'mirror_port': '0', 'direction': 'ingress', 'implicit_deny': 'no', 'refcount': '4', 'setup_time': '2013-Jun-14 23:55:31.073195', 'vrf': '1', 'dest_vrf': '0', 'interface_idx': '3', 'flow_handle': '54518', 'dst_port': '0', 'action': '32', 'short_flow': 'no', 'dip': '2.1.1.253', 'mirror_ip': '0.0.0.0'}''' - - r = self.dict_get('Snh_FetchAllFlowRecords?') - l = [] - xpath = './flow_list' - records = EtreeToDict(xpath).get_all_entry(r) - return records['flow_list'] - - def get_vna_fetchflowrecord(self, nh=None, sip=None, dip=None, sport=None, dport=None, protocol=None): - '''http://10.204.216.15:8085/Snh_FetchFlowRecord?vrf=1&sip=1.1.1.253&dip=2.1.1.253&src_port=0&dst_port=0&protocol=1 - usage:self.records=inspect_h.get_vna_fetchflowrecord(nh='1',sip='1.1.1.253',dip='2.1.1.253',sport='0',dport='0',protocol='1') - - return value:[{'nh': '1'}, {'sip': '1.1.1.253'}, {'dip': '2.1.1.253'}, {'src_port': '0'}, {'dst_port': '0'}, {'protocol': '1'}, {'dest_vrf': '0'}, {'action': '32'}, {'mirror_ip': '0.0.0.0'}, {'mirror_port': '0'}, {'direction': 'ingress'}, {'stats_bytes': '0'}, {'stats_packets': '0'}, {'uuid': 'aa010de9-5eec-48d8-884a-ccbc5de665bb'}, {'nat': 'disabled'}, {'flow_handle': '54518'}, {'interface_idx': '3'}, {'setup_time': '2013-Jun-17 11:28:08.708658'}, {'refcount': '4'}, {'implicit_deny': 'no'}, {'short_flow': 'no'}, {'setup_time_utc': '1371468488708658'}]''' - path = 'Snh_FetchFlowRecord?nh=' + nh + '&sip=' + sip + '&dip=' + dip + \ - '&src_port=' + sport + '&dst_port=' + \ - dport + '&protocol=' + protocol - rec = self.dict_get(path) - if (rec.getchildren()[0].text == 'No Flow Record for specified key '): - return None - rec = rec.getchildren()[0].xpath('./FlowRecordsResp/SandeshFlowData') or \ - rec.getchildren()[0].xpath('./SandeshFlowData') - if rec is None: - return None - record = rec[0].getchildren() - l = [] - for v in record: - p = {} - p[v.tag] = v.text - l.append(p) - return l - - def delete_all_flows(self): - '''Delete flows with following introspect url - http://10.204.216.15:8085/Snh_DeleteAllFlowRecords?. - ''' - resp = self.dict_get('Snh_DeleteAllFlowRecords?') - - def match_item_in_flowrecord(self, flow_rec, item, expected): - '''This proc typically work in pair with get_vna_fetchflowrecord. It parse the output of get_vna_fetchflowrecord and verify for the given item output is matching with the user expected one.''' - result = False - for itr in flow_rec: - if itr.keys() == [item]: - if expected in itr.values()[0]: - result = True - return result - - def _get_vna_kflowresp(self, record): - '''return list of kernel flow records for a given record.. - a record is an element with tag KFlowInfo and has flow_list''' - l = [] - record = record.getchildren()[0].xpath('./list/KFlowInfo') - for v in record: - p = {} - for e in v: - p[e.tag] = e.text - l.append(p) - return l - - def get_vna_kflowresp(self): - '''http://10.204.216.15:8085/Snh_KFlowReq?flow_idx= - introspect has 3 different return values - record_list, record and []''' - record_list = self.dict_get('Snh_KFlowReq?flow_idx=') - if ('KFlowResp' in record_list.getchildren()[0].tag): - l = [] - for record in record_list: - l = l + self._get_vna_kflowresp(record) - return l - elif ('flow_list' in record_list.getchildren()[0].tag): - return self._get_vna_kflowresp(record_list) - else: - self.log.error("Introspect output match failure, got as follows: ") - self.log.error(etree.tostring(record_list, pretty_print=True)) - return None - - def get_cs_alloc_fip_pool(self, domain='default-domain', project='admin', fip_pool='default-floating-ip-pool'): - pass - - def policy_update(self, domain='default-domain', *arg): - pass - - def dissassociate_ip(self, domain='default-domain', *arg): - pass - - def get_vna_vrf_objs(self, domain='default-domain', project='admin', vn_name='default-virtual-network'): - ''' - - Returns VRF objects list from VRF name in agent using : http://172.27.58.57:8085/Snh_VrfListReq?x=default-domain:admin:net10:net10 - Sample : List of {'mcindex': '1', 'name': 'default-domain:admin:vn222:vn222', 'ucindex': '1'} - ''' - p = None - vrflist = self.dict_get('Snh_VrfListReq?name=%s:%s:%s:%s' % (domain, - project, vn_name, vn_name)) - if len(vrflist.xpath('./VrfListResp')): - vrf = vrflist.xpath('./VrfListResp')[0] - else: - vrf = vrflist - avn = filter(lambda x: ':'.join((domain, project, - vn_name)) in x.xpath('./name')[0].text, vrf.xpath( - './vrf_list/list/VrfSandeshData')) - p = VnaVrfListResult({'vrf_list': []}) - for v in avn: - pp = VnaVrfRouteResult() - for e in v: - pp[e.tag] = e.text - p['vrf_list'].append(pp) - return p - # end get_vna_vrf_objs - - def get_vna_vrf_id(self, vn_fq_name): - domain = str(vn_fq_name.split(':')[0]) - project = str(vn_fq_name.split(':')[1]) - vn = str(vn_fq_name.split(':')[2]) - vrf = '%s:%s:%s:%s' % (domain, - project, vn, vn) - agent_vrf_objs = self.get_vna_vrf_objs(domain,project,vn) - return [x['ucindex'] for x in agent_vrf_objs['vrf_list'] if x['name'] == vrf] - - def get_vna_route(self, vrf_id='', ip=None, prefix=None): - if not ip or not is_v6(ip): - table = 'Snh_Inet4UcRouteReq' - plen = 32 - else: - table = 'Snh_Inet6UcRouteReq' - plen = 128 - table_resp = table.replace('Req', 'Resp') - table_resp = table_resp.replace('Snh_', '') - prefix = plen if prefix is None else prefix - routes = {'ip': ip, 'prefix': prefix} - path = '%s?x=%s' % (table, str(vrf_id)) - xpath = 'route_list/list/RouteUcSandeshData' - - p = self.dict_get(path) - - routelist = EtreeToDict('./%s/%s' %(table_resp, xpath)).get_all_entry(p) or \ - EtreeToDict('./%s' % (xpath)).get_all_entry(p) - if not ip: - routes.update({'routes': routelist}) - return routes - if type(routelist) is dict: - routelist1 = [routelist] - else: - routelist1 = routelist - for route in routelist1: - if (route['src_ip'] == ip and route['src_plen'] == str(prefix)): - routes.update({'routes': [route]}) - return routes - # end get_vna_route - - def get_vna_layer2_route(self, vrf_id='', mac=None): - routes = {'mac': mac} - path = 'Snh_Layer2RouteReq?x=%s' % str(vrf_id) - xpath = 'route_list/list/RouteL2SandeshData' - p = self.dict_get(path) - routelist = EtreeToDict('./Layer2RouteResp/%s' %(xpath)).get_all_entry(p) or \ - EtreeToDict('./%s' % (xpath)).get_all_entry(p) - if not mac: - routes.update({'routes': routelist}) - return routes - if type(routelist) is dict: - routelist1 = [routelist] - else: - routelist1 = routelist - for route in routelist1: - if (EUI(route['mac']) == EUI(mac)): - routes.update({'routes': [route]}) - return routes - # end get_vna_layer2_route - - def get_vna_route_in_mclist_by_key(self, vrf_id, key, ip, prefix): - route_list = self.get_vna_active_route( - vrf_id, ip, prefix)['path_list'][0]['nh']['mc_list'] - for entry in route_list: - if entry[key]: - return entry[key] - else: - return None - # end get_vna_route_in_mclist_by_key - - def get_vna_active_route(self, ip, prefix=None, vrf_id=None, vn_fq_name=None): - ''' - Returns the first path got from get_vna_route. We would later need to have API to search a path given a set of match-conditions like nh/label/peer etc. - ''' - if vrf_id is None: - assert vn_fq_name, "Either vrf_id or vn_fq_name has to be specified" - vrf_id = self.get_vna_vrf_id(vn_fq_name) - if not vrf_id: - return None - vrf_id = vrf_id[0] - route_list = self.get_vna_route(vrf_id, ip, prefix) - if route_list: - return route_list['routes'][0] - else: - return None - # end get_vna_active_route - - def _itf_fltr(self, x, _type, value): - if _type == 'vmi': - path = './uuid' - elif _type == 'tap': - path = './name' - elif _type == 'vm': - path = './vm_uuid' - elif _type == 'ip': - path = './ip_addr' - elif _type == 'type': - path = './type' - e = x.xpath(path) - if e: - return value == e[0].text - return False - - def get_vna_tap_interface_common(self, _type, value): - ''' - - Returns the tap-interface name for a VM as seen by agent - Note:: define elsewhere - def get_vna_tap_interface(vm_id): - cs = VNCApiInspect (ip) - vna = AgentInspect(ip) - return vna.get_vna_tap_interface (cs.get_cs_vmi_of_vm( - vm_id)['virtual-machine-interface']['uuid']) - ''' - ret_list = [] - p = None - vnl = self.dict_get('Snh_PageReq?x=begin:-1,end:-1,table:db.interface.0,') - intf_list = vnl.xpath('./ItfResp/itf_list/list/ItfSandeshData') or \ - vnl.xpath('./itf_list/list/ItfSandeshData') - avn = filter(lambda x: self._itf_fltr(x, _type, value), intf_list) -# if 1 == len (avn): - for intf in avn: - p = VnaItfResult() - for e in intf: # intf replaces avn[0] - if e.tag == 'fip_list': - p[e.tag] = [] - for fip in e.xpath('./list/FloatingIpSandeshList'): - pp = {} - for ee in fip: - pp[ee.tag] = ee.text - p[e.tag].append(pp) - else: - p[e.tag] = e.text - ret_list.append(p) - return ret_list - - def get_vna_tap_interface_by_vm(self, vm_id): - return self.get_vna_tap_interface_common('vm', vm_id) - - def get_vna_tap_interface_by_ip(self, ip_addr): - return self.get_vna_tap_interface_common('ip', ip_addr) - - def get_vna_interface_by_type(self, type): - """ - Returns interface name by type specified - Type can take 'eth'/'vhost'/'pkt'/'vport' - """ - intf_name = [] - intf_list = self.get_vna_tap_interface_common('type', type) - for intf in intf_list: - if intf['type'] == type: - intf_name.append(intf['name']) - return intf_name - - def get_vna_tap_interface_by_vmi(self, vmi_id): - ''' - - Returns the tap-interface name for a VM as seen by agent - Note:: define elsewhere - def get_vna_tap_interface(vm_id): - cs = VNCApiInspect (ip) - vna = AgentInspect(ip) - return vna.get_vna_tap_interface (cs.get_cs_vmi_of_vm( - vm_id)['virtual-machine-interface']['uuid']) - ''' - return self.get_vna_tap_interface_common('vmi', vmi_id) - # end get_vna_tap_interface - - def get_vna_intf_details(self, tap_intf_name): - ''' - - Returns the object got from http://172.27.58.57:8085/Snh_ItfReq?name= - ''' - return self.get_vna_tap_interface_common('tap', tap_intf_name) - # end get_vna_intf_details - - def get_vna_xmpp_connection_status(self): - ''' - method: get_vna_xmpp_connection_status returns a list - ''' - vnl = self.dict_get('Snh_AgentXmppConnectionStatusReq?') - elem = vnl.getchildren()[0] - var = elem.xpath('./list/AgentXmppData') - l = [] - for x in range(0, len(var)): - p = {} - p[elem.xpath('./list/AgentXmppData')[x].xpath('./controller_ip')[0] - .tag] = elem.xpath('./list/AgentXmppData')[x].xpath('./controller_ip')[0].text - p[elem.xpath('./list/AgentXmppData')[x].xpath('./cfg_controller')[0] - .tag] = elem.xpath('./list/AgentXmppData')[x].xpath('./cfg_controller')[0].text - p[elem.xpath('./list/AgentXmppData')[x].xpath('./state')[0] - .tag] = elem.xpath('./list/AgentXmppData')[x].xpath('./state')[0].text - l.append(p) - return l - # end get_vna_xmpp_connection_status - - def get_vna_diag_ping_res(self, src_ip='', src_port='', dst_ip='', dst_port='', proto='', vrf='', size='', count='', intv=''): - ''' - method: Get the ping response from diag introspect - ''' - ping_url = "Snh_PingReq?source_ip=%s&source_port=%s&dest_ip=%s&dest_port=%s&protocol=%s&vrf_name=%s&packet_size=%s&count=%s&interval=%s" % ( - src_ip, src_port, dst_ip, dst_port, proto, vrf, size, count, intv) - print ping_url - self.ping_out = self.dict_get(ping_url) - l = {} - i = 1 - # Get individual ping response - ping_resp = self.ping_out.xpath('/__PingResp_list/PingResp') - if ping_resp != []: - for x in ping_resp: - q = {} - for y in x.getchildren(): - q[y.tag] = y.text - l[i] = q - i = i + 1 - # Get ping response summary - ping_sum_resp = self.ping_out.xpath( - '/__PingResp_list/PingSummaryResp') - m = [] - for x in ping_sum_resp[0].getchildren(): - r = {} - r[x.tag] = x.text - m.append(r) - l['PingSummaryResp'] = m - return l - - def get_vna_verify_diag_ping(self, src_ip='', src_port='', dst_ip='', dst_port='', proto='', vrf='', size='', count='', intv=''): - ''' - method: This method verify the ping response from diag introspect - ''' - result = True, - req_sent = 0 - req_rcv = 0 - loss = 0 - ping_count = self.get_vna_diag_ping_res( - src_ip=src_ip, src_port=src_port, dst_ip=dst_ip, dst_port=dst_port, proto=proto, vrf=vrf, size=size, count=10, intv=intv) - if ping_count == {}: - result = False - else: - for i in range(0, len(ping_count['PingSummaryResp']) - 1): - if ping_count['PingSummaryResp'][i].keys()[0] == 'request_sent': - req_sent = int( - ping_count['PingSummaryResp'][i].values()[0]) - elif ping_count['PingSummaryResp'][i].keys()[0] == 'response_received': - req_rcv = int(ping_count['PingSummaryResp'][i].values()[0]) - elif ping_count['PingSummaryResp'][i].keys()[0] == 'pkt_loss': - loss = int(ping_count['PingSummaryResp'][i].values()[0]) - print "%s %s %s" % (req_sent, req_rcv, loss) - print "%s" % (count) - - if req_sent == req_rcv: - result = True - else: - result = False - return result - - def get_sg_list(self): - ''' - method: get_sg_list returns a list - returns None if not found, a dict w/ attrib. eg: - - ''' - l = [] - sg = self.dict_get('Snh_SgListReq?name=') - asg = sg.xpath('./SgListResp/sg_list/list/SgSandeshData') or \ - sg.xpath('./sg_list/list/SgSandeshData') - - for s in asg: - p = {} - for e in s: - p[e.tag] = e.text - l.append(p) - return l - - def get_sg(self, sg_uuid): - ''' - method: get_sg get sg sg_uuid from agent - returns None if not found, a dict w/ attrib. eg: - - ''' - query = 'Snh_SgListReq?sg_uuid=' + str(sg_uuid) - l = [] - sg = self.dict_get(query) - asg = sg.xpath('./SgListResp/sg_list/list/SgSandeshData') or \ - sg.xpath('./sg_list/list/SgSandeshData') - - for s in asg: - p = {} - for e in s: - p[e.tag] = e.text - l.append(p) - return l - - def get_sg_acls_list(self, sg_uuid): - ''' - method: get_sg_acls_list returns a list - returns None if not found, a dict w/ attrib. eg: - - ''' - - sg_info = self.get_sg(sg_uuid) - acl_id_list = [sg_info[0]['ingress_acl_uuid'], sg_info[0]['egress_acl_uuid']] - - l = [] - for acl_id in acl_id_list: - query = 'Snh_AclReq?uuid=' + str(acl_id) - acl = self.dict_get(query) - aacl = acl.xpath('./AclResp/acl_list/list/AclSandeshData') or \ - acl.xpath('./acl_list/list/AclSandeshData') - for a in aacl: - p = {} - for e in a: - if e.tag == 'entries': - entry = e.xpath('./list/AclEntrySandeshData') - enl = [] - for rule in entry: - en = {} - for x in rule: - en[x.tag] = x.text - enl.append(en) - p[e.tag] = enl - else: - p[e.tag] = e.text - l.append(p) - return l - - def get_acls_list(self): - ''' - method: get_acls_list returns a list - returns None if not found, a dict w/ attrib. eg: - - ''' - l = [] - acl = self.dict_get('Snh_AclReq?name=') - aacl = acl.xpath('./AclResp/acl_list/list/AclSandeshData') or \ - acl.xpath('./acl_list/list/AclSandeshData') - for a in aacl: - p = {} - for e in a: - if e.tag == 'entries': - entry = e.xpath('./list/AclEntrySandeshData') - enl = [] - for rule in entry: - en = {} - for x in rule: - en[x.tag] = x.text - enl.append(en) - p[e.tag] = enl - else: - p[e.tag] = e.text - l.append(p) - return l - -if __name__ == '__main__': - - vvnagnt = AgentInspect('10.204.217.12') - print vvnagnt.get_vna_vn('default-domain', 'admin', 'vn-1') - print vvnagnt.get_vna_vn_list('default-domain', 'demo') - print vvnagnt.get_vna_vrf_id('default-domain', 'demo', 'fe:fe') - print vvnagnt.get_vna_route(3, '172.168.10.254', 32) - print vvnagnt.get_vna_tap_interface_by_vmi('73caeeed-7cac-4ef4-8268-f16c1ba514a4') - print vvnagnt.get_vna_tap_interface_by_vm('ae57b6d0-f057-4ccc-95eb-e3932a265752') - print vvnagnt.get_vna_intf_details('tap8e3d0097-7b') - print vvnagnt.get_vna_acl_by_vn('default-domain:demfeo:fe') - print vvnagnt.get_vna_flow_by_vn('default-domain:demo:pub') - print vvnagnt.get_vna_tap_interface_by_vm('aec7cc6e-977a-4e2d-8650-e583c5f63241') diff --git a/tcutils/agent/vna_results.py b/tcutils/agent/vna_results.py deleted file mode 100644 index abf4618c8..000000000 --- a/tcutils/agent/vna_results.py +++ /dev/null @@ -1,128 +0,0 @@ -from tcutils.verification_util import * - - -class VnaVnListResult (Result): - - ''' - VnaVnListResult to provide access to vna_introspect_utils.get_vna_vn_list - dict contrains: - ''' - - def vn_list(self): - return self.xpath('VNs') - - def vn_items(self): - for v in self.vn_list(): - yield VnaVnResult(v) - - -class VnaVrfRouteEntryResult (Result): - pass - - -class VnaVrfIdResult (Result): - pass - - -class VnaVrfRouteResult (Result): - - ''' - VnaVrfRouteResult to provide access to vna_introspect_utils.get_vna_vrf_id - dict contrains: - - ''' - - def route_items(self): - for r in self['routes']: - yield VnaVrfRouteEntryResult(r) - - def first_route(self): - if self['routes']: - return VnaVrfRouteEntryResult(self['routes'][0]) - - def filter(self): - if self['ip'] and self['prefix']: - r = filter(lambda x: x['src_ip'] == self['ip'] and str( - self['prefix']) == x['src_plen'], self['routes']) - self['routes'] = r - - -class VnaVrfListResult (Result): - pass - - -class VnaItfResult (Result): - - ''' - VnaItfResult to provide access to vna_introspect_utils.get_vna_tap_interface_* - dict contrains: - - ''' - - def tapif(self): - return self['name'] - - def vm(self): - return self['vm_uuid'] - - def ip(self): - return self['ip_addr'] - - def vn(self): - return self['vn_name'] - - def vrf(self): - return self['vrf_name'] - - def floating_ip_list(self): - return map(lambda x: x['ip_addr'], self['fip_list']) - - def floating_ip_vrf_list(self): - return map(lambda x: x['vrf_name'], self['fip_list']) - - -class VnaVnResult (Result): - - ''' - VnaVnResult to provide access to vna_introspect_utils.get_vna_vn - dict contrains: - - {'acl_uuid': '00000000-0000-0000-0000-000000000000', - 'ipam_data': None, - 'mirror_acl_uuid': '00000000-0000-0000-0000-000000000000', - 'name': 'default-domain:admin:front-end', - 'uuid': '75b38b78-554e-40fe-96ca-e7137b8d9974', - 'vrf_name': 'default-domain:admin:front-end:front-end'} - ''' - - def vrf_name(self): - return self.xpath('vrf_name') - - def name(self): - return self.xpath('name') - - def mirror_acl_uuid(self): - return self.xpath('mirror_acl_uuid') - - def ipam_data(self): - return self.xpath('ipam_data') - - def acl(self): - return self.xpath('acl_uuid') - - def uuid(self): - return self.xpath('uuid') - - -class VnaACLResult (Result): - - ''' - VnaACLResult to provide access to vna_introspect_utils.get_vna_acl_by_vn - ''' - - -class VnaFlowResult (Result): - - ''' - VnaFlowResult to provide access to vna_introspect_utils.get_vna_flow_by_vn - ''' diff --git a/tcutils/cfgparser.py b/tcutils/cfgparser.py deleted file mode 100644 index 0f5a31081..000000000 --- a/tcutils/cfgparser.py +++ /dev/null @@ -1,43 +0,0 @@ -'''Parse config files which are ConfigParser complaint''' - -from ConfigParser import SafeConfigParser - - -def string_to_list(tstr, force=False): - '''Split a string with comma, If no comma is present - and if force=True, return a list with str element - ''' - - tstr = tstr.replace('\n', '') - tstr = tstr.split(' #')[0].strip() - tstr = tstr.split(' ;')[0].strip() - sstr = [sstr.strip() for sstr in tstr.split(',')] - if force: - return sstr - else: - return tstr if tstr.rfind(',') < 0 else sstr - - -def parse_cfg_file(cfg_files): - ''' parse given config files and return a dictionary - with sections as keys and its items as dictionary items - ''' - parsed_dict = {} - sections = [] - cfg_files = [cfg_files] if type(cfg_files) is str else cfg_files - for cfg_file in cfg_files: - parser = SafeConfigParser() - parsed_files = parser.read(cfg_file) - if cfg_file not in parsed_files: - raise RuntimeError('Unable to parse (%s), ' - 'No such file or invalid format' % cfg_file) - common_sections = list(set(parser.sections()) & set(sections)) - if len(common_sections) != 0: - raise RuntimeError('Duplication Section Error while parsing ' - '(%s): %s' % (cfg_file, "\n".join(common_sections))) - for sect in parser.sections(): - parsed_dict[sect] = dict((iname, string_to_list(ival)) - for iname, ival in parser.items(sect)) - sections.extend(parser.sections()) - del parser - return parsed_dict diff --git a/tcutils/collector/__init__.py b/tcutils/collector/__init__.py deleted file mode 100644 index b83d193bd..000000000 --- a/tcutils/collector/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Collector Utils""" diff --git a/tcutils/collector/analytics_performance_tests.py b/tcutils/collector/analytics_performance_tests.py deleted file mode 100644 index c13369fc2..000000000 --- a/tcutils/collector/analytics_performance_tests.py +++ /dev/null @@ -1,412 +0,0 @@ -# Need to import path to test/fixtures and test/scripts/ -# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/' -# -# To run tests, you can do 'python -m testtools.run tests'. To run specific tests, -# You can do 'python -m testtools.run -l tests' -# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD -# -import time -import re -import socket -import unittest -import fixtures -import testtools -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from vm_test import * -from common.connections import ContrailConnections -from multiple_vn_vm_test import * -from tcutils.wrappers import preposttest_wrapper -from common.servicechain.config import ConfigSvcChain -from common.servicechain.verify import VerifySvcChain -import threading -from subprocess import Popen, PIPE -import shlex -from netaddr import * - - -class AnalyticsTestPerformance(testtools.TestCase, ConfigSvcChain, VerifySvcChain): - - def setUp(self): - super(AnalyticsTestPerformance, self).setUp() - if 'PARAMS_FILE' in os.environ: - self.ini_file = os.environ.get('PARAMS_FILE') - else: - self.ini_file = 'params.ini' - self.inputs = self.useFixture(ContrailTestInit(self.ini_file)) - self.connections = ContrailConnections(self.inputs) - self.quantum_h = self.connections.quantum_h - self.nova_h = self.connections.nova_h - self.vnc_lib = self.connections.vnc_lib - self.logger = self.inputs.logger - self.agent_inspect = self.connections.agent_inspect - self.cn_inspect = self.connections.cn_inspect - self.analytics_obj = self.connections.analytics_obj - self.sender_list = [] - self.receiver_list = [] - - def cleanUp(self): - super(AnalyticsTestPerformance, self).cleanUp() - - def runTest(self): - pass - - def provision_static_route( - self, prefix='111.1.0.0/16', virtual_machine_id='', - tenant_name=None, api_server_ip='127.0.0.1', - api_server_port='8082', oper='add', - virtual_machine_interface_ip='11.1.1.252', route_table_name='my_route_table', - user='admin', password='contrail123'): - - if not tenant_name: - tenant_name = self.inputs.stack_tenant - cmd = "python /opt/contrail/utils/provision_static_route.py --prefix %s \ - --virtual_machine_id %s \ - --tenant_name %s \ - --api_server_ip %s \ - --api_server_port %s\ - --oper %s \ - --virtual_machine_interface_ip %s \ - --user %s\ - --password %s\ - --route_table_name %s" % (prefix, virtual_machine_id, tenant_name, api_server_ip, api_server_port, oper, - virtual_machine_interface_ip, user, password, route_table_name) - args = shlex.split(cmd) - process = Popen(args, stdout=PIPE) - stdout, stderr = process.communicate() - if stderr: - self.logger.warn("Route could not be created , err : \n %s" % - (stderr)) - else: - self.logger.info("%s" % (stdout)) - - def start_traffic(self, vm, src_min_ip='', src_mx_ip='', dest_ip='', dest_min_port='', dest_max_port=''): - - self.logger.info("Sending traffic...") - try: - cmd = '~/pktgen_new.sh %s %s %s %s %s' % (src_min_ip, - src_mx_ip, dest_ip, dest_min_port, dest_max_port) - vm.run_cmd_on_vm(cmds=[cmd]) - except Exception as e: - self.logger.exception("Got exception at start_traffic as %s" % (e)) - - def stop_traffic(self, vm): - self.logger.info("Stopping traffic...") - try: - cmd = 'killall ~/pktgen_new.sh' - vm.run_cmd_on_vm([cmd]) - except Exception as e: - self.logger.exception("Got exception at stop_traffic as %s" % (e)) - - def create_vms(self, vn_name='vn_analytics', vm_name='vm-analytics', vn_count=1, vm_count=1, flavor='contrail_flavor_small'): - - vm1_name = vm_name - vn_name = vn_name - vn_subnets = ['11.1.1.0/24'] - try: - self.setup_fixture = self.useFixture( - create_multiple_vn_and_multiple_vm_fixture( - connections=self.connections, - vn_name=vn_name, vm_name=vm1_name, inputs=self.inputs, project_name=self.inputs.project_name, - subnets=vn_subnets, vn_count=vn_count, vm_count=vm_count, subnet_count=1, image_name='ubuntu-traffic', - flavor='contrail_flavor_small')) - time.sleep(20) - except Exception as e: - self.logger.exception("Got exception as %s" % (e)) - - try: - assert self.setup_fixture.verify_vms_on_setup() - assert self.setup_fixture.verify_vns_on_setup() - except Exception as e: - self.logger.exception( - "Got exception in create_vms function as %s" % (e)) - - def build_query(self, src_vn, dst_vn): - - self.query = '(' + 'sourcevn=' + src_vn + \ - ') AND (destvn=' + dst_vn + ')' - - def run_query(self): - for ip in self.inputs.collector_ips: - try: - self.logger.info('setup_time= %s' % (self.start_time)) - # Quering flow sreies table - self.logger.info( - "Verifying flowSeriesTable through opserver %s" % (ip)) - res1 = self.analytics_obj.ops_inspect[ip].post_query( - 'FlowSeriesTable', start_time=self.start_time, end_time='now', select_fields=['sourcevn', 'sourceip', 'destvn', 'destip', 'sum(packets)', 'sport', 'dport', 'T=1'], - where_clause=self.query, sort=2, limit=5, sort_fields=['sum(packets)']) - self.logger.info("result: %s" % (res1)) - assert res1 - self.logger.info("Top 5 flows %s" % (res1)) - except Exception as e: - self.logger.exception("Got exception as %s" % (e)) - - def get_ip_list_from_prefix(self, prefix): - - ip_list = [] - ip = IPNetwork(prefix) - ip_netowrk = str(ip.network) - ip_broadcast = str(ip.broadcast) - ip_lst = list(ip) - for ip_addr in ip_lst: - if ((str(ip_addr) in ip_netowrk) or (str(ip_addr) in ip_broadcast)): - continue - ip_list.append(str(ip_addr)) - return ip_list - - def get_min_max_ip_from_prefix(self, prefix): - - ip_list = self.get_ip_list_from_prefix(prefix) - min_ip = ip_list[0] - max_ip = ip_list[-1] - return [min_ip, max_ip] - - def create_svc_chains(self, st_name, si_prefix, si_count, max_inst, - left_vn='', right_vn='', svc_mode='in-network', svc_scaling=False): - - self.action_list = [] - self.if_list = [['management', False], ['left', True], ['right', True]] - self.st_fixture, self.si_fixtures = self.config_st_si( - st_name, si_prefix, si_count, - svc_scaling, max_inst, left_vn=left_vn, - right_vn=right_vn, svc_mode=svc_mode) - self.action_list = self.chain_si(si_count, si_prefix) - - def create_policy(self, policy_name='policy_in_network', rules=[], src_vn_fixture=None, dest_vn_fixture=None): - - self.policy_fixture = self.config_policy(policy_name, rules) - self.vn1_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, src_vn_fixture) - self.vn2_policy_fix = self.attach_policy_to_vn( - self.policy_fixture, dest_vn_fixture) - self.validate_vn(src_vn_fixture.vn_name) - self.validate_vn(dest_vn_fixture.vn_name) - - def setup_vm(self, vn_count=2, vm_count=1): - - self.create_vms(vn_count=vn_count, vm_count=vm_count) - - def setup_service_instance( - self, st_name='in_net_svc_template_1', si_prefix='in_net_svc_instance_', - si_count=1, svc_scaling=False, max_inst=1, left_vn='', right_vn='', svc_mode='in-network'): - - self.create_svc_chains( - st_name, si_prefix, si_count, max_inst, svc_scaling=svc_scaling, - left_vn=left_vn, right_vn=right_vn, svc_mode=svc_mode) - - def setup_policy(self, policy_name='policy_in_network', policy_rules=[], src_vn_fixture=None, dest_vn_fixture=None): - - self.create_policy( - policy_name=policy_name, rules=policy_rules, src_vn_fixture=src_vn_fixture, - dest_vn_fixture=dest_vn_fixture) - - def restart_service(self, ip_list, service, command='restart'): - - for ip in ip_list: - cmd = 'service %s %s' % (service, command) - self.inputs.run_cmd_on_server( - ip, cmd, username='root', password='c0ntrail123') - - def reboot_node(self, ip_list): - - for ip in ip_list: - self.inputs.run_cmd_on_server( - ip, 'reboot', username='root', password='c0ntrail123') - - def reboot_vm(self, vm, cmd): - - vm.run_cmd_on_vm([cmd]) - - def triggers(self, preference='', ip=[], command='', service='', vm=None): - ''' - preference : agent restart - to restart vrouter service - control restart - collector restart - agent stop - control stop - collector stop - agent start - control start - collector start - agent reboot - control reboot - collector reboot - vm reboot - ''' - - if not preference: - if (ip and service): - self.restart_service(ip, service) - if vm: - self.reboot_vm(vm) - if ip: - self.reboot_node(ip) - return - if (preference in 'agent restart') or (preference in 'control restart') or (preference in 'collector restart'): - if (ip and service): - self.restart_service(ip, service) - if (preference in 'agent stop') or (preference in 'control stop') or (preference in 'collector stop'): - if (ip and service): - self.restart_service(ip, service) - if (preference in 'agent start') or (preference in 'control start') or (preference in 'collector start'): - if (ip and service): - self.restart_service(ip, service) - if (preference in 'agent reboot') or (preference in 'control reboot') or (preference in 'collector reboot'): - if ip: - self.reboot_node(ip) - if (preference in 'vm reboot'): - if vm: - self.reboot_vm(vm, command) - - def verifications(self, verify='uve'): - - if 'uve' in verify: - assert self.analytics_obj.verify_all_uves() - if 'tables' in verify: - start_time = self.analytics_obj.get_time_since_uptime( - self.inputs.cfgm_ip) - assert self.analytics_obj.verify_object_tables( - start_time=start_time, skip_tables=[ - 'FlowSeriesTable', 'FlowRecordTable', - 'ObjectQueryQid', - 'ServiceChain', 'ObjectSITable', 'ObjectModuleInfo', - 'StatTable.QueryPerfInfo.query_stats', 'StatTable.UveVirtualNetworkAgent.vn_stats', - 'StatTable.AnalyticsCpuState.cpu_info']) - if 'setup' in verify: - assert self.setup_fixture.verify_vms_on_setup() - assert self.setup_fixture.verify_vns_on_setup() - - @preposttest_wrapper - def test_verify_analytics_scale(self): - ''' Test to validate scale - - ''' - self.setup_vm() # Creating vns/vm - # Creating service instance - left_vn_fix = self.setup_fixture.vn_obj_dict.values()[0] - right_vn_fix = self.setup_fixture.vn_obj_dict.values()[1] - left_vn_fq_name = self.setup_fixture.vn_obj_dict.values()[0].vn_fq_name - right_vn_fq_name = self.setup_fixture.vn_obj_dict.values()[ - 1].vn_fq_name - self.setup_service_instance( - left_vn=left_vn_fq_name, right_vn=right_vn_fq_name) - - # Creating rules and policy - rules = [ - { - 'direction': '<>', - 'protocol': 'any', - 'source_network': left_vn_fq_name, - 'src_ports': [0, -1], - 'dest_network': right_vn_fq_name, - 'dst_ports': [0, -1], - 'simple_action': 'pass', - 'action_list': {'apply_service': self.action_list} - }, - ] - self.setup_policy(policy_rules=rules, - src_vn_fixture=left_vn_fix, dest_vn_fixture=right_vn_fix) - # Sending traffic - prefix = '111.1.0.0/16' - vm_uuid = self.setup_fixture.vm_valuelist[0].vm_obj.id - vm_ip = self.setup_fixture.vm_valuelist[0].vm_ip - self.provision_static_route( - prefix=prefix, virtual_machine_id=vm_uuid, - virtual_machine_interface_ip=vm_ip, route_table_name='my_route_table', - user='admin', password='contrail123') - - dest_min_port = 8000 - dest_max_port = 8005 - ips = self.get_min_max_ip_from_prefix(prefix) - - first_vm = self.setup_fixture.vm_valuelist[0] - vm_list = self.setup_fixture.vm_valuelist[1:] - self.tx_vm_node_ip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(first_vm.vm_obj)]['host_ip'] - self.start_time = self.analytics_obj.getstarttime(self.tx_vm_node_ip) - traffic_threads = [] - for vm in vm_list: - t = threading.Thread( - target=self.start_traffic, args=( - first_vm, ips[0], ips[-1], vm.vm_ip, - dest_min_port, dest_max_port,)) - traffic_threads.append(t) - for th in traffic_threads: - time.sleep(1) - th.start() - time.sleep(60) -# - # Analytics query to flow tables - self.logger.info("start time= %s" % (self.start_time)) - self.build_query(left_vn_fix.vn_fq_name, right_vn_fix.vn_fq_name) - self.run_query() -# print 'Waiting...' - - # Triggers - # restart agent with scenario up - self.logger.info("Verifying agent restart") - temp = self.inputs.compute_ips[:] - self.inputs.compute_ips.remove(self.tx_vm_node_ip) - self.triggers(preference='agent restart', ip=self.inputs.compute_ips, - command='restart', service='contrail-vrouter') - time.sleep(20) - self.verifications(verify='uve') - self.inputs.compute_ips = temp[:] - # switchover collector - self.logger.info("Verifying collector start/stop") - self.triggers(preference='collector stop', ip=[ - self.inputs.collector_ips[0]], command='stop', service='supervisor-analytics') - temp = self.inputs.collector_ips[:] - self.inputs.collector_ips.remove(self.inputs.collector_ips[0]) - time.sleep(10) - self.verifications(verify='uve') - self.inputs.collector_ips = temp[:] - self.triggers(preference='collector start', ip=[ - self.inputs.collector_ips[0]], command='start', service='supervisor-analytics') - time.sleep(10) - # collector reboot - self.logger.info("Verifying collector reboot") - self.triggers(preference='collector reboot', - ip=[self.inputs.collector_ips[1]]) - temp = self.inputs.collector_ips[:] - self.inputs.collector_ips.remove(self.inputs.collector_ips[1]) - time.sleep(10) - self.verifications(verify='uve') - self.inputs.collector_ips = temp[:] - # reboot dest vm - self.logger.info("Verifying vm reboot") - for vm in vm_list: - self.triggers(preference='vm reboot', command='reboot', vm=vm) - time.sleep(20) - # reboot dest compute - self.logger.info("Verifying agent reboot") - for vm in vm_list: - dest_vm_node_list = [] - dest_vm_node_ip = self.inputs.host_data[ - self.nova_h.get_nova_host_of_vm(vm.vm_obj)]['host_ip'] - dest_vm_node_list.append(dest_vm_node_ip) - self.triggers(preference='agent reboot', ip=dest_vm_node_list) - time.sleep(20) - # add new config-TO DO - # modify policy rules without affecting live flows - TO DO - # modify policy rules affecting live flows-TO DO - # force continuous aging of flows- TO DO - self.verifications(verify='uve') - # Stopping traffic - self.stop_traffic(first_vm) - - for th in traffic_threads: - th.join() - return True -# end AnalyticsTestPerformance - - -def main(): - obj = AnalyticsTestPerformance() -# obj.get_ip_list_from_prefix('192.0.2.16/29') - for ip in obj.get_ip_list_from_prefix('192.0.2.16/29'): - print ip - -if __name__ == "__main__": - main() diff --git a/tcutils/collector/analytics_scale_tests_with_setup.py b/tcutils/collector/analytics_scale_tests_with_setup.py deleted file mode 100644 index baaf8a37d..000000000 --- a/tcutils/collector/analytics_scale_tests_with_setup.py +++ /dev/null @@ -1,58 +0,0 @@ -# Need to import path to test/fixtures and test/scripts/ -# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/' -# -# To run tests, you can do 'python -m testtools.run tests'. To run specific tests, -# You can do 'python -m testtools.run -l tests' -# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD -# - -import fixtures -import testtools -import unittest - -from common.contrail_test_init import ContrailTestInit -from common.connections import ContrailConnections -from contrail_fixtures import * -from tcutils.wrappers import preposttest_wrapper -from mock_generator import MockGeneratorFixture - - -class AnalyticsScaleTest(testtools.TestCase, fixtures.TestWithFixtures): - - def setUp(self): - super(AnalyticsScaleTest, self).setUp() - if 'PARAMS_FILE' in os.environ: - self.ini_file = os.environ.get('PARAMS_FILE') - else: - self.ini_file = 'params.ini' - self.inputs = self.useFixture(ContrailTestInit(self.ini_file)) - self.connections = ContrailConnections(self.inputs) - self.logger = self.inputs.logger - self.analytics_obj = self.connections.analytics_obj - self.ops_inspect = self.connections.ops_inspect - # end setUp - - def cleanUp(self): - super(AnalyticsScaleTest, self).cleanUp() - # end cleanUp - - def runTest(self): - pass - # end runTest - - @preposttest_wrapper - def test_generator_scale(self, num_generators=10, - num_instances_per_generator=10, num_networks=50, - num_flows_per_instance=10): - '''Test to validate collector scaling viz number of generators - ''' - mock_gen_fixture = self.useFixture( - MockGeneratorFixture(connections=self.connections, - inputs=self.inputs, num_generators=num_generators, - num_instances_per_generator=num_instances_per_generator, - num_networks=num_networks, - num_flows_per_instance=num_flows_per_instance)) - return True - # end test_generator_scale - -# end class AnalyticsScaleTest diff --git a/tcutils/collector/analytics_tests.py b/tcutils/collector/analytics_tests.py deleted file mode 100644 index 99b6dfd2d..000000000 --- a/tcutils/collector/analytics_tests.py +++ /dev/null @@ -1,3809 +0,0 @@ -# Need to import path to test/fixtures and test/scripts/ -# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/' -# -# To run tests, you can do 'python -m testtools.run tests'. To run specific tests, -# You can do 'python -m testtools.run -l tests' -# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD -# -import fixtures -from tcutils.util import * -import logging as LOG -import re -import json -import urllib2 -import requests -import time -import datetime -import threading -import Queue -from subprocess import Popen, PIPE -import shlex -from netaddr import * -import random -from tcutils.collector.opserver_introspect_utils import VerificationOpsSrvIntrospect - -months = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': - 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12} -months_number_to_name = { - '01': 'JAN', '02': 'FEB', '03': 'MAR', '04': 'APR', '05': 'MAY', - '06': 'JUN', '07': 'JUL', '08': 'AUG', '09': 'SEP', '10': 'OCT', '11': 'NOV', '12': 'DEC'} - -uve_dict = { - 'xmpp-peer/': ['state_info', 'peer_stats_info', 'event_info', - 'send_state', 'identifier'], - 'config-node/': ['module_cpu_info', 'module_id', 'cpu_info', - 'build_info', 'config_node_ip', 'process_info'], - 'control-node/': ['uptime', 'build_info', 'cpu_info', 'ifmap_info', 'process_info'], - 'analytics-node/': ['cpu_info', 'ModuleCpuState', 'module_cpu_info', - 'process_info', 'contrail-collector', 'contrail-query-engine', - 'contrail-analytics-nodemgr', 'contrail-analytics-api', 'build_info', - 'generator_infos'], - 'generator/': ['client_info', 'ModuleServerState', 'session_stats', 'generator_info'], - 'bgp-peer/': ['state_info', 'peer_stats_info', 'families', 'peer_type', 'local_asn', - 'configured_families', 'event_info', 'peer_address', 'peer_asn', 'send_state'], - 'vrouter/': ['exception_packets', 'cpu_info', 'uptime', - 'total_flows', 'drop_stats', 'xmpp_stats_list', - 'vhost_stats', 'process_info', - 'control_ip', 'dns_servers', - 'build_info', 'vhost_cfg', - 'tunnel_type', 'xmpp_peer_list', - 'self_ip_list','process_status', - 'exception_packets','drop_stats', - 'phy_if_stats_list', - 'vhost_stats'], - 'dns-node/': ['start_time', 'build_info', 'self_ip_list'], - 'virtual-machine/': [ - 'interface_list', - 'vm_name', - 'uuid']} - -uve_list = ['xmpp-peer/', 'config-node/', 'control-node/','virtual-machine/', - 'analytics-node/', 'generator/', 'bgp-peer/', 'dns-node/', 'vrouter/'] - - -http_introspect_ports = {'HttpPortConfigNodemgr' : 8100, - 'HttpPortControlNodemgr' : 8101, - 'HttpPortVRouterNodemgr' : 8102, - 'HttpPortDatabaseNodemgr' : 8103, - 'HttpPortAnalyticsNodemgr' : 8104, - 'HttpPortStorageStatsmgr' : 8105, - 'HttpPortControl' : 8083, - 'HttpPortApiServer' : 8084, - 'HttpPortAgent' : 8085, - 'HttpPortSchemaTransformer' : 8087, - 'HttpPortSvcMonitor' : 8088, - 'HttpPortCollector' : 8089, - 'HttpPortOpserver' : 8090, - 'HttpPortQueryEngine' : 8091, - 'HttpPortDns' : 8092} - -GENERATORS = {'Compute' : ['contrail-vrouter-agent', - 'contrail-vrouter-nodemgr' - ], - 'Analytics' : ['contrail-snmp-collector', - 'contrail-query-engine', - 'contrail-analytics-nodemgr', - 'contrail-topology', - 'contrail-collector', - 'contrail-analytics-api' - ], - 'Database' : ['contrail-database-nodemgr'], - 'Config' : ['contrail-api', - 'contrail-discovery', - 'contrail-svc-monitor', - 'contrail-config-nodemgr', - 'contrail-schema', - 'DeviceManager'], - 'Control' : ['contrail-control', - 'contrail-control-nodemgr', - 'contrail-dns' - ] - } - -class AnalyticsVerification(fixtures.Fixture): - - def __init__(self, inputs, cn_inspect, agent_inspect, ops_inspect, logger=LOG): - - self.inputs = inputs - self.ops_inspect = ops_inspect - self.agent_inspect = agent_inspect - self.cn_inspect = cn_inspect - self.logger = logger - self.get_all_generators() - - def get_all_generators(self): - self.generator_hosts = [] - self.bgp_hosts = [] - self.compute_hosts = [] - self.collector_hosts = [] - -# self.cfgm_host = self.inputs.host_data[self.inputs.cfgm_ip]['name'] -# if (self.cfgm_host not in self.generator_hosts): -# self.generator_hosts.append(self.cfgm_host) -# # collector_ip=self.inputs.collector_ip -# # self.collector_host=self.inputs.host_data[collector_ip]['name'] - - vip_contrail = self.inputs.vip['contrail'] \ - if self.inputs.vip.has_key('contrail') else None - - for collector_ip in self.inputs.collector_ips: - if collector_ip == vip_contrail: - continue - - for collector_ip in self.inputs.collector_ips: - c_host = self.inputs.host_data[collector_ip]['name'] - self.collector_hosts.append(c_host) - if (c_host not in self.generator_hosts): - self.generator_hosts.append(c_host) - - for ip in self.inputs.bgp_ips: - if ip == vip_contrail: - continue - bgp_host = self.inputs.host_data[ip]['name'] - self.bgp_hosts.append(bgp_host) - if (bgp_host not in self.generator_hosts): - self.generator_hosts.append(bgp_host) - for ip in self.inputs.compute_ips: - compute_host = self.inputs.host_data[ip]['name'] - self.compute_hosts.append(compute_host) - if (compute_host not in self.generator_hosts): - self.generator_hosts.append(compute_host) - - def get_connection_status(self, collector, generator, moduleid, node_type, instanceid='0'): - '''Getting connection status with generator:node_type:moduleid:instanceid with collector - ''' - connobj = self.get_connection_dict( - collector, generator, moduleid, node_type, instanceid) - if connobj: - return connobj['status'] - else: - return None - - def get_primary_collector(self, opserver, generator, moduleid, node_type, instanceid='0'): - '''Get primary collector for a generator''' - - connobj = self.get_connection_dict( - opserver, generator, moduleid, node_type, instanceid) - if connobj: - return connobj['primary'] - else: - return None - - def get_secondary_collector(self, opserver, generator, moduleid, node_type, instanceid='0'): - '''Get secondary collector for a generator''' - - connobj = self.get_connection_dict( - opserver, generator, moduleid, node_type, instanceid) - if connobj: - return connobj['secondary'] - else: - return None - - def get_connection_dict(self, collector, generator, moduleid, node_type, instanceid): - '''Getting connection dict with generator:moduleid with collector - ''' - self.opsobj = self.ops_inspect[collector].get_ops_generator( - generator=generator, moduleid=moduleid, node_type=node_type, instanceid=instanceid) - if not self.opsobj: - self.logger.warn("query returned none") - st = self.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database( - node=self.inputs.collector_names[0], \ - module='Contrail-Analytics-Api', trace_buffer_name='DiscoveryMsg') - self.logger.info("status: %s" % (st)) - return None - self.conoutput = self.opsobj.get_attr('Client', 'client_info') - if not self.conoutput: - self.logger.info("query returned none") - st = self.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database( - node=self.inputs.collector_names[0], \ - module='Contrail-Analytics-Api', trace_buffer_name='DiscoveryMsg') - self.logger.info("status: %s" % (st)) - return None - return self.conoutput - - def verify_generator_connection_to_collector(self): - '''Verify the collector connection with different modules''' - - for k,v in GENERATORS.items(): - if (k == 'Compute'): - for name in self.inputs.compute_names: - for elem in v: - assert self.verify_connection_status( - name,elem,k) - if (k == 'Analytics'): - for name in self.inputs.collector_names: - for elem in v: - assert self.verify_connection_status( - name,elem,k) - if (k == 'Database'): - for name in self.inputs.database_names: - for elem in v: - assert self.verify_connection_status( - name,elem,k) - if (k == 'Config'): - - for name in self.inputs.cfgm_names: - result = False - for elem in v: - result = result or self.verify_connection_status( - name,elem,k) - assert result - - if (k == 'Control'): - for name in self.inputs.bgp_names: - for elem in v: - assert self.verify_connection_status( - name,elem,k) - - @retry(delay=5, tries=4) - def verify_connection_status(self, generator, moduleid, node_type, instanceid='0'): - '''Verify if connection status with collector and generator:node_type:moduleid:instance - is established - ''' - - self.g = generator - self.m = moduleid - result = True - for collector_ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % - (collector_ip)) - status = self.get_connection_status( - collector_ip, self.g, self.m, node_type, instanceid) - if (status == 'Established'): - self.logger.info("%s:%s:%s:%s is connected to collector %s" % - (self.g, node_type, self.m, instanceid, collector_ip)) - result = result & True - else: - self.logger.warn( - "%s:%s:%s:%s is NOT connected to collector %s" % - (self.g, node_type, self.m, instanceid, collector_ip)) - result = result & False - return result - - def get_collector_of_gen(self, collector, gen, module, node_type, instance='0'): - '''Gets the collector node of a generator - ''' - connobj = self.get_connection_dict( - collector, gen, module, node_type, instance) - return connobj['collector_name'] - - def get_all_generator_links(self, module=None): - '''Get all links for a particular generator''' - - ret = [] - try: - links = self.ops_inspect[self.inputs.collector_ips[ - 0]].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='generators') - if links: - pattern = '%s(.*)' % module - compiled = re.compile(pattern) - for elem in links: - if compiled.search(str(elem)): - ret.append(elem) - except Exception as e: - self.logger.warn("Got exception as %s" % (e)) - finally: - return ret - - def get_module_instances(self, module): - '''Return the module instances from analytics/genarators url''' - ret = [] - try: - links = self.get_all_generator_links(module=module) - if links: - for elem in links: - inst = str(elem['name']).split(":")[-1] - ret.append(inst) - except Exception as e: - self.logger.warn("Got exception as %s" % (e)) - finally: - return ret - - def get_uve_key(self, uve=None): - '''{ - href: "http://10.204.216.14:8081/analytics/uves/virtual-machine/292c7779-c085-4079-91f6-440272bd2922?flat", - name: "292c7779-c085-4079-91f6-440272bd2922" - }''' - ret = [] - try: - links = self.ops_inspect[self.inputs.collector_ips[0] - ].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType=uve) - if links: - for elem in links: - ret.append(elem['name']) - except Exception as e: - self.logger.warn("Got exception as %s" % (e)) - finally: - return ret - - -# Collector uve functions# -# ------------------------# - - # @retry(delay=5, tries=1) - def verify_collector_uve(self): - '''Verify that all generators are connected to collector''' - result = True - - # Verify module-ids correctly shown in the collector uve for respective generators - # verify module-id for bgp node in collector uve - should be - # 'Contrail-Control' - for ip in self.inputs.bgp_ips: - assert self.verify_collector_connection_introspect(ip,http_introspect_ports['HttpPortControl']) - for ip in self.inputs.cfgm_ips: - assert self.verify_collector_connection_introspect(ip,http_introspect_ports['HttpPortApiServer']) - result = False - for ip in self.inputs.cfgm_ips: - result= result or self.verify_collector_connection_introspect(ip,http_introspect_ports['HttpPortSchemaTransformer']) - assert result - result = False - for ip in self.inputs.cfgm_ips: - result = result or self.verify_collector_connection_introspect(ip,http_introspect_ports['HttpPortSvcMonitor']) - assert result - for ip in self.inputs.collector_ips: - assert self.verify_collector_connection_introspect(ip,http_introspect_ports['HttpPortOpserver']) - for ip in self.inputs.collector_ips: - assert self.verify_collector_connection_introspect(ip,http_introspect_ports['HttpPortQueryEngine']) - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - expected_module_id = ['contrail-control', 'contrail-dns'] - expected_node_type = 'Control' - expected_instance_id = '0' - for bgp_host in self.bgp_hosts: - for module in expected_module_id: - is_established = self.verify_connection_status( - bgp_host, module, expected_node_type, expected_instance_id) - # collector=self.output['collector_name'] - if is_established: - #self.logger.info("%s:%s connected to collector %s"%(bgp_host,module,collector)) - result = result and True - else: - result = result and False - - expected_module_id = 'contrail-vrouter-agent' - expected_node_type = 'Compute' - expected_instance_id = '0' - for compute_host in self.compute_hosts: - is_established = self.verify_connection_status( - compute_host, expected_module_id, expected_node_type, expected_instance_id) - # collector=self.output['collector_name'] - if is_established: - result = result and True - else: - result = result and False - # Verifying module_id from ApiServer - expected_cfgm_modules = 'contrail-schema' - expected_node_type = 'Config' - expected_instance_id = '0' - for cfgm_node in self.inputs.cfgm_names: - result1 = True - is_established = self.verify_connection_status( - cfgm_node, expected_cfgm_modules, expected_node_type, expected_instance_id) - if is_established: - # collector=self.output['collector_name'] - result1 = result1 and True - break - else: - result1 = result1 and False - st = self.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database( - node=self.inputs.collector_names[0], \ - module='Contrail-Analytics-Api', trace_buffer_name='DiscoveryMsg') - self.logger.info("status: %s" % (st)) - result = result and result1 - # Verifying module_id from DiscoveryService - expected_cfgm_modules = 'contrail-discovery' - expected_node_type = 'Config' - expected_instance_id = '0' - for cfgm_node in self.inputs.cfgm_names: - result1 = True - is_established = self.verify_connection_status( - cfgm_node, expected_cfgm_modules, expected_node_type, expected_instance_id) - if is_established: - # collector=self.output['collector_name'] - result1 = result1 and True - break - else: - result1 = result1 and False - st = self.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database( - node=self.inputs.collector_names[0], \ - module='Contrail-Analytics-Api', trace_buffer_name='DiscoveryMsg') - self.logger.info("status: %s" % (st)) - result = result and result1 - #Verifying for ServiceMonitor - expected_cfgm_modules = 'contrail-svc-monitor' - expected_node_type = 'Config' - expected_instance_id = '0' - for cfgm_node in self.inputs.cfgm_names: - result1 = True - is_established = self.verify_connection_status( - cfgm_node, expected_cfgm_modules, expected_node_type, expected_instance_id) - if is_established: - # collector=self.output['collector_name'] - resulti1 = result1 and True - break - else: - result1 = result1 and False - st = self.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database( - node=self.inputs.collector_names[0], \ - module='Contrail-Analytics-Api', trace_buffer_name='DiscoveryMsg') - result = result and result1 - # Verifying module_id ApiServer - expected_apiserver_module = 'Contrail-Api' - expected_apiserver_instances = self.get_module_instances( - expected_apiserver_module) - expected_node_type = 'Config' - # expected_cfgm_modules=['Contrail-Schema','contrail-svc-monitor'] - for cfgm_node in self.inputs.cfgm_names: - for inst in expected_apiserver_instances: - result1 = True - is_established = self.verify_connection_status( - cfgm_node, expected_apiserver_module, expected_node_type, inst) - if is_established: - result1 = result1 and True - break - else: - result = result and False - st = self.ops_inspect[self.inputs.collector_ips[0]].send_trace_to_database( - node=self.inputs.collector_names[0], \ - module='Contrail-Analytics-Api', trace_buffer_name='DiscoveryMsg') - result = result1 and result - # Verifying module_id Contrail-Analytics-Api - expected_opserver_module = 'Contrail-Analytics-Api' - expected_opserver_instances = self.get_module_instances( - expected_opserver_module) - expected_node_type = 'Analytics' - for c_host in self.collector_hosts: - for inst in expected_opserver_instances: - is_established = self.verify_connection_status( - c_host, expected_opserver_module, expected_node_type, inst) - if is_established: - # collector=self.output['collector_name'] - result = result and True - else: - result = result and False - # Verifying collector:moduleid - expected_collector_module = ['contrail-collector', 'contrail-query-engine'] - expected_node_type = 'Analytics' - expected_instance_id = '0' - for c_host in self.collector_hosts: - for module in expected_collector_module: - is_established = self.verify_connection_status( - c_host, module, expected_node_type, expected_instance_id) - # collector=self.output['collector_name'] - if is_established: - result = result and True - else: - result = result and False - return result - - @retry(delay=3, tries=15) - def verify_hrefs_to_all_uves_of_a_given_uve_type(self): - '''Verify all analytics links - ''' - result = True - for ip in self.inputs.collector_ips: - self.logger.info( - "Verifying the bgp-routers links through opserver %s" % (ip)) - self.links = self.ops_inspect[ - ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='control-nodes') - gen_list = [] - for elem in self.links: - name = elem.get_attr('Name') - gen_list.append(name) - missing_nodes = set(gen_list) ^ set(self.inputs.bgp_names) - if not missing_nodes: - self.logger.info("%s is present in the link" % - (self.inputs.bgp_names)) - result = result and True - else: - self.logger.info( - "%s is not present in the in the bgp-routers" % - (missing_nodes)) - result = result and False - - self.logger.info( - "Verifying the vrouters links through opserver %s" % (ip)) - self.links = self.ops_inspect[ - ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='vrouters') - gen_list = [] - for elem in self.links: - name = elem.get_attr('Name') - gen_list.append(name) - for name in self.inputs.compute_names: - if (name in gen_list): - self.logger.info("%s is present in the link" % (name)) - result = result and True - else: - self.logger.info( - "%s is not present in the in the vrouters" % (name)) - result = result and False - - self.logger.info( - "Verifying the collector links through opserver %s" % (ip)) - self.links = self.ops_inspect[ - ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='analytics-nodes') - gen_list = [] - for elem in self.links: - name = elem.get_attr('Name') - gen_list.append(name) - missing_nodes = set(gen_list) ^ set(self.inputs.collector_names) - if not missing_nodes: - self.logger.info("%s is present in the link" % - (self.inputs.collector_names)) - result = result and True - else: - self.logger.info( - "%s is not present in the in the bgp-routers" % - (missing_nodes)) - result = result and False - - self.logger.info( - "Verifying the collector links through opserver %s" % (ip)) - self.links = self.ops_inspect[ - ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='config-nodes') - gen_list = [] - for elem in self.links: - name = elem.get_attr('Name') - gen_list.append(name) - missing_nodes = set(gen_list) ^ set(self.inputs.cfgm_names) - if not missing_nodes: - self.logger.info("%s is present in the link" % - (self.inputs.cfgm_names)) - result = result and True - else: - self.logger.info( - "%s is not present in the in the bgp-routers" % - (missing_nodes)) - result = result and False - return result -# end collector uve functions - -# vrouter uve functions -# ------------------------# - - # Vrouter xmpp connection verification - @retry(delay=3, tries=15) - def verify_vrouter_xmpp_connections(self): - '''Verify that vrouter is connected to the bgp router''' - result = False - for compute_host in self.compute_hosts: - peers = [] - collector = self.get_collector_of_gen( - self.inputs.collector_ips[0], compute_host, 'contrail-vrouter-agent', 'Compute') - collector_ip = self.inputs.host_data[collector]['host_ip'] - self.ops_compute_obj = self.ops_inspect[ - collector_ip].get_ops_vrouter(vrouter=compute_host) - xmpp_peer_list = self.ops_compute_obj.get_attr( - 'Agent', 'xmpp_peer_list') - for elem in xmpp_peer_list: - ip = elem['ip'] - peers.append(ip) - missing_peers = set(self.inputs.bgp_control_ips) - set(peers) - if not missing_peers: - self.logger.info( - "xmpp peer correctly displayed as %s for vrouter %s " % - (peers, compute_host)) - result = True - else: - self.logger.error("xmpp peer %s not displayed vrouter %s " % - (missing_peers, compute_host)) - return False - return result - - @retry(delay=3, tries=15) - def verify_vm_list_not_in_vrouter_uve(self, vm_uuid=None, vrouter='localhost', tap=None): - '''Verifies that vm not in the vrouter uve if the vm is deleted''' - - result = True - result1 = True - result2 = True - if not vm_uuid: - self.logger.warn("vm_uuid not resceived") - return False - collector = self.get_collector_of_gen( - self.inputs.collector_ips[0], vrouter, 'contrail-vrouter-agent', 'Compute') -# collector_ip = self.inputs.host_data[collector]['host_ip'] - collector_ip=self.inputs.get_host_ip(name=collector) - self.vrouter_ops_obj = self.ops_inspect[ - collector_ip].get_ops_vrouter(vrouter=vrouter) - # Verifying vm in vrouter uve - vrouter_ops_vms = self.vrouter_ops_obj.get_attr( - 'Agent', 'virtual_machine_list', match=vm_uuid) - if not vrouter_ops_vms: - result = result and True - self.logger.info("vm %s is not present in vrouter %s uve " % - (vm_uuid, vrouter)) - else: - result = result and False - self.logger.error("vm %s is still present in vrouter %s uve " % - (vm_uuid, vrouter)) - self.logger.info( - "Verifying if the vm interface deleted from vroter uve...") - vm_interface_list = self.vrouter_ops_obj.get_attr( - 'Agent', 'interface_list') - if vm_interface_list: - for elem in vm_interface_list: - if re.search(vm_uuid, elem): - self.logger.warn( - "%s interface NOT deleted from vrouter uve ..." % (elem)) - result1 = result1 and False - else: - result1 = result1 and True - else: - self.logger.info( - "interface for vm %s deleted from vrouter uve ..." % - (vm_uuid)) - result1 = result1 and True - if result1: - self.logger.info( - "interface for vm %s deleted from vrouter uve ..." % - (vm_uuid)) - result = result and True - # Verify that deleted interface not in error interface list - error_interface_list = self.vrouter_ops_obj.get_attr( - 'Agent', 'error_intf_list') - if error_interface_list: - for elem in error_interface_list: - if (re.search(vm_uuid, elem)): - self.logger.warn( - "%s deleted interface in error interface list ..." % (elem)) - result2 = result2 and False - else: - self.logger.info( - "deleted interface not in error interface list ...") - result2 = result2 and True - - return result and result1 and result2 - - @retry(delay=3, tries=15) - def verify_vm_list_in_vrouter_uve(self, vm_uuid=None, vn_fq_name=None, vrouter='localhost', tap=None): - '''Verify that vm exists in the vrouter - ,also verifies that network, in which vm is connected , gets downloaded in vrouter and tap interface of the vm is created''' - result = False - result1 = False - if not vm_uuid: - self.logger.warn("vm_uuid not resceived") - return False - collector = self.get_collector_of_gen( - self.inputs.collector_ips[0], vrouter, 'contrail-vrouter-agent', 'Compute') -# collector_ip = self.inputs.host_data[collector]['host_ip'] - collector_ip=self.inputs.get_host_ip(name=collector) - self.vrouter_ops_obj = self.ops_inspect[ - collector_ip].get_ops_vrouter(vrouter=vrouter) - # Verifying vm in vrouter uve - vrouter_ops_vms = self.vrouter_ops_obj.get_attr( - 'Agent', 'virtual_machine_list', match=vm_uuid) - if not vrouter_ops_vms: - result = False - self.logger.error("vm %s is not present in vrouter %s uve " % - (vm_uuid, vrouter)) - else: - result = True - self.logger.info("vm %s is present in vrouter %s uve " % - (vm_uuid, vrouter)) - # Verifying tap interfaces in vrouter uve - if tap: - # disabling for the time beeing.Vrouter tap interface name is - # chenaged.. - result = True - vm_tap_intf = self.vrouter_ops_obj.get_attr( - 'Agent', 'interface_list', match=tap) - if not vm_tap_intf: - result1 = False - self.logger.error( - "tap interface %s of vm %s is not present in vrouter %s uve " % - (tap, vm_uuid, vrouter)) - else: - result1 = True - self.logger.info( - "tap interface %s of vm %s is present in vrouter %s uve " % - (tap, vm_uuid, vrouter)) - else: - result1 = True - # Verify if network created - if vn_fq_name: - result2 = False - uve_vn = self.vrouter_ops_obj.get_attr( - 'Agent', 'connected_networks', match=vn_fq_name) - if not uve_vn: - result2 = False - self.logger.error( - "Connected network %s of vm %s is not present in vrouter %s uve " % - (vn_fq_name, vm_uuid, vrouter)) - else: - result2 = True - self.logger.info( - "Connected nwtwork %s of vm %s is present in vrouter %s uve " % - (vn_fq_name, vm_uuid, vrouter)) - else: - result2 = True - return (result and result1 and result2) -# return (result and result2) - - def get_flows_vrouter_uve(self, vrouter='localhost', flowType='active_flows'): - '''flowType=active_flows,aged_flows,total_flows''' - collector = self.get_collector_of_gen( - self.inputs.collector_ips[0], vrouter, 'contrail-vrouter-agent', 'Compute') - collector_ip = self.inputs.host_data[collector]['host_ip'] - self.vrouter_ops_obj = self.ops_inspect[ - collector_ip].get_ops_vrouter(vrouter=vrouter) - # self.vrouter_ops_obj=self.ops_inspect.get_ops_vrouter(vrouter=vrouter) - return self.vrouter_ops_obj.get_attr('Stats', flowType) - - def get_vrouter_mem_stats(self): - '''compute uve o/p: {u'nodef1': {u'sys_mem_info': - {u'total': 197934164, u'used': 4815188, u'free': 193118976, - u'buffers': 155812}, u'num_cpu': 32, u'cpu_share': 0.171875, - u'meminfo': {u'virt': 2462240, u'peakvirt': 2525360, - u'res': 109032}, - u'cpuload': {u'fifteen_min_avg': 0.05, u'five_min_avg': 0.03, - u'one_min_avg': 0.06}}} - return u'virt' as dict with node_name as key - ''' - all_vr_mem_stats = {} - for compute_host in self.compute_hosts: - collector = self.get_collector_of_gen( - self.inputs.collector_ips[0], compute_host, 'contrail-vrouter-agent', 'Compute') - collector_ip = self.inputs.host_data[collector]['host_ip'] - self.vrouter_ops_obj = self.ops_inspect[ - collector_ip].get_ops_vrouter(vrouter=compute_host) - if self.vrouter_ops_obj: - out = self.vrouter_ops_obj.get_attr('Stats', 'cpu_info') - else: - return all_vr_mem_stats - all_vr_mem_stats[compute_host] = out['meminfo']['virt'] - return all_vr_mem_stats - - def get_vrouter_drop_stats(self): - '''Get data from vrouter uve drop_stats data.. - sample: drop_stats: {ds_flow_no_memory: 0,ds_flow_queue_limit_exceeded: 55426,...} - ''' - all_vr_drop_stats = {} - for compute_host in self.compute_hosts: - collector = self.get_collector_of_gen( - self.inputs.collector_ips[0], compute_host, 'contrail-vrouter-agent', 'Compute') - collector_ip = self.inputs.host_data[collector]['host_ip'] - self.vrouter_ops_obj = self.ops_inspect[ - collector_ip].get_ops_vrouter(vrouter=compute_host) - out = self.vrouter_ops_obj.get_attr('Stats', 'drop_stats') - all_vr_drop_stats[compute_host] = out - return all_vr_drop_stats - - def get_agent_introspect_agentstatsreq(self, agent_ip=None): - inspect_h = self.agent_inspect[agent_ip] - return inspect_h.get_vna_pkt_agentstatsreq() - - def get_agent_introspect_fetchallflowrecords(self, agent_ip=None): - inspect_h = self.agent_inspect[agent_ip] - return inspect_h.get_vna_fetchallflowrecords() - # self.records=inspect_h.get_vna_fetchallflowrecords() - - def get_agent_introspect_fetchflowrecords(self, agent_ip=None, vrf=None, sip=None, dip=None, sport=None, dport=None, protocol=None): - inspect_h = self.agent_inspect[agent_ip] - return inspect_h.get_vna_fetchflowrecord(vrf=vrf, sip=sip, dip=dip, sport=sport, dport=dport, protocol=protocol) - - def get_agent_introspect_Kflowrecords(self, agent_ip=None): - #self.agent_inspect= self.connections.agent_inspect - inspect_h = self.agent_inspect[agent_ip] - return inspect_h.get_vna_kflowresp() - - def get_vrouter_active_xmpp_peer(self, vrouter=None): - '''Gets the the active xmpp connection from vrouter uve - [{u'status': u'true', u'ip': u'10.204.216.14', u'setup_time': - u'2013-Jun-25 08:43:46.726649'}, {u'status': u'true', - u'ip': u'10.204.216.25', u'primary': u'true', - u'setup_time': u'2013-Jun-25 08:43:46.725917'}] - ''' - collector = self.get_collector_of_gen( - self.inputs.collector_ips[0], vrouter, 'contrail-vrouter-agent', 'Compute') - collector_ip = self.inputs.host_data[collector]['host_ip'] - self.vrouter_ops_obj = self.ops_inspect[ - collector_ip].get_ops_vrouter(vrouter=vrouter) - # self.vrouter_ops_obj=self.ops_inspect.get_ops_vrouter(vrouter=vrouter) - if not self.vrouter_ops_obj: - self.logger.critical("%s vrouter uve returned none" % (vrouter)) - return None - xmpp_peer_list = self.vrouter_ops_obj.get_attr( - 'Agent', 'xmpp_peer_list', match=('primary', True)) - if xmpp_peer_list: - return xmpp_peer_list[0]['ip'] - else: - return None - - @retry(delay=5, tries=12) - def verify_active_xmpp_peer_in_vrouter_uve(self): - '''Verify active vrouter uve for active xmpp connections - - ''' - result = True - for agent in self.inputs.compute_names: - # getting active xmpp peer from vrouter uve - act_xmpp_uve = self.get_vrouter_active_xmpp_peer(vrouter=agent) - self.logger.info("Active xmpp peer in %s vrouter uve is %s" % - (agent, act_xmpp_uve)) - # self.inputs.host_data['nodea19']['host_ip'] - agent_ip = self.inputs.host_data[agent]['host_ip'] - inspect_h = self.agent_inspect[agent_ip] - xmpp_peer_from_agent_inspect = inspect_h.get_vna_xmpp_connection_status( - ) - for elem in xmpp_peer_from_agent_inspect: - if (elem['cfg_controller'] == 'Yes'): - active_xmpp = elem['controller_ip'] - self.logger.info( - "Active xmpp peer in %s agent introspect is %s" % - (agent, active_xmpp)) - if (act_xmpp_uve == active_xmpp): - result = result & True - else: - return False - return result - - def get_vrouter_interface_list(self, collector, vrouter): - '''Return the interface list from vrouter uve''' - self.vrouter_ops_obj = self.ops_inspect[ - collector].get_ops_vrouter(vrouter=vrouter) - return self.vrouter_ops_obj.get_attr('Agent', 'interface_list') - -# end vrouter uve functions -# virtual-network uve functions -# ------------------------# - def get_vn_uve(self, vn_fq_name): - '''This function returns entire vn uve.Need this to verify that vn uve does not exists if the vn is deleted''' - for ip in self.inputs.collector_ips: - self.opsobj = self.ops_inspect[ip] - if self.opsobj.get_ops_vn(vn_fq_name=vn_fq_name): - return self.opsobj.get_ops_vn(vn_fq_name=vn_fq_name) - return None - - def verify_vn_uve_tiers(self, vn_fq_name=None): - '''Verify that when vn is created , vn uve should show info from UveVirtualNetworkConfig and UveVirtualNetworkAgent''' - result = False - if not vn_fq_name: - vn_fq_name='default-domain:%s:default-virtual-network'%self.inputs.stack_tenant - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - self.opsobj = self.ops_inspect[ip] - self.ops_vnoutput = self.opsobj.get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output " % - vn_fq_name) - return False - expected_tiers = ['UveVirtualNetworkAgent', - 'UveVirtualNetworkConfig'] - tiers = self.ops_vnoutput.keys() - missing_tier = set(expected_tiers) - set(tiers) - if not missing_tier: - self.logger.info( - "Tiers correctly shown in vn vue for %s in collector %s" % (vn_fq_name, ip)) - result = True - else: - self.logger.error( - "uve message did not come from %s for %s in collector %s" % - (missing_tier, vn_fq_name, ip)) - return False - return result - - @retry(delay=5, tries=6) - def verify_vn_uve_ri(self, vn_fq_name=None, ri_name=None): - '''Verify routing instance element when vn is created by apiserver''' - result = True - if not vn_fq_name: - vn_fq_name='default-domain:%s:default-virtual-network'%self.inputs.stack_tenant - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - self.opsobj = self.ops_inspect[ip] - self.ops_vnoutput = self.opsobj.get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % - vn_fq_name) - return False - ri_list = self.ops_vnoutput.get_attr( - 'Config', 'routing_instance_list') - if (ri_list == None): - self.logger.error( - "%s uve did not return any routing instance" % vn_fq_name) - return False - - if not ri_name: - domain, use, vn_name = vn_fq_name.split(':') - else: - vn_name = ri_name - for elem in ri_list: - if vn_name in elem: - self.logger.info( - "routing instance %s correctly showed in vue for %s" % (vn_fq_name, vn_name)) - return True - else: - self.logger.error("Routing instance not shown in %s uve" % - (vn_fq_name)) - result = result and False - return result - - @retry(delay=5, tries=6) - def verify_ri_not_in_vn_uve(self, vn_fq_name=None, ri_name=None): - '''Verify routing instance element when vn is created by apiserver''' - result = True - if not vn_fq_name: - vn_fq_name='default-domain:%s:default-virtual-network'%self.inputs.stack_tenant - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - self.opsobj = self.ops_inspect[ip] - self.ops_vnoutput = self.opsobj.get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % - vn_fq_name) - return False - ri_list = self.ops_vnoutput.get_attr( - 'Config', 'routing_instance_list') - if (ri_list == None): - self.logger.info("%s uve did not return any routing instance" % - vn_fq_name) - return True - - if not ri_name: - domain, use, vn_name = vn_fq_name.split(':') - else: - vn_name = ri_name - - for elem in ri_list: - if vn_name in elem: - self.logger.error( - "routing instance %s correctly showed in vue for %s" % (vn_fq_name, vn_name)) - return False - else: - self.logger.info("Routing instance not shown in %s uve" % - (vn_fq_name)) - result = result and True - return result - - @retry(delay=2, tries=30) - def verify_vn_uve_for_vm_not_in_vn(self, vn_fq_name=None, vm=None): - '''Verify vm not in vn uve''' - result = False - vm_intf_lst = [] - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - if not vm: - self.logger.info("vm list name passed") - return False - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - self.opsobj = self.ops_inspect[ip] - self.ops_vnoutput = self.opsobj.get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % - vn_fq_name) - return False - # Verifying vm list - vm_uuid_list = self.ops_vnoutput.get_attr( - 'Agent', 'virtualmachine_list', match=vm) - if not vm_uuid_list: - self.logger.info("%s vm not in %s uve " % (vm, vn_fq_name)) - return True - else: - self.logger.error("%s still in %s uve" % - (vm_uuid_list, vn_fq_name)) - self.logger.error("%s vm still in %s uve" % (vm, vn_fq_name)) - return False - # Verifying the vm interface deleted in the vn uve - vm_interface_list = self.ops_vnoutput.get_attr( - 'Agent', 'interface_list') - if vm_interface_list: - for elem in vm_interface_list: - if (re.search(vm, elem)): - self.logger.info("%s vm interface not in %s uve " % - (vm, vn_fq_name)) - result = result and True - else: - self.logger.error("%s interface still in %s uve" % - (elem, vn_fq_name)) - result = result and False - else: - self.logger.info("%s vm interface not in %s uve " % - (vm, vn_fq_name)) - result = result and True - return result - - @retry(delay=5, tries=10) - def verify_vn_uve_for_vm(self, vn_fq_name=None, vm=None): - '''Verify vm in vn uve''' - result = False - vm_intf_lst = [] - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - if not vm: - self.logger.info("vm list name passed") - return False - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - self.opsobj = self.ops_inspect[ip] - self.ops_vnoutput = self.opsobj.get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % - vn_fq_name) - return False - # Verifying vm list - vm_uuid_list = self.ops_vnoutput.get_attr( - 'Agent', 'virtualmachine_list', match=vm) - if (vm_uuid_list == None): - self.logger.error("%s uve did not return any output" % - vn_fq_name) - return False - else: - self.logger.info("expected vm list %s" % (vm)) - self.logger.info("Extracted vm list %s" % (vm_uuid_list)) - self.logger.info("%s is present in %s" % (vm, vn_fq_name)) - return True - - @retry(delay=3, tries=15) - def verify_vm_list_in_vn_uve(self, vn_fq_name=None, vm_uuid_lst=None): - '''Verify vm list for vn uve.''' - result = True - vm_intf_lst = [] - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - if not vm_uuid_lst: - self.logger.info("vm list name passed") - return False - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - self.opsobj = self.ops_inspect[ip] - self.ops_vnoutput = self.opsobj.get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % - vn_fq_name) - return False - # Verifying vm list - vm_uuid_list = self.ops_vnoutput.get_attr( - 'Agent', 'virtualmachine_list') - if (vm_uuid_list == None): - self.logger.error("%s uve did not return any output" % - vn_fq_name) - return False - for uuid in vm_uuid_lst: - if uuid in vm_uuid_list: - self.logger.info("%s vm is present in vn %s" % - (uuid, vn_fq_name)) - result = result and True - else: - self.logger.info("%s vm is NOT present in vn %s" % - (uuid, vn_fq_name)) - result = result and False - - return result - - def get_vn_uve_interface_list(self, collector, vn_fq_name=None): - '''Returns the list of vm interfaces in the vn''' - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - if not vm_uuid: - self.logger.info("vm list name passed") - return False - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % vn_fq_name) - return False - vn_uve_intf_list = self.ops_vnoutput.get_attr( - 'Agent', 'interface_list') - if vn_uve_intf_list: - return vn_uve_intf_list - else: - self.logger.info("No interface shown in the vn uve of %s" % - (vn_fq_name)) - return None - - def get_vn_uve_vm_interface(self, collector, vn_fq_name=None, vm_uuid=None): - '''Returns the interface of the vm from vn uve''' - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - if not vm_uuid: - self.logger.info("vm list name passed") - return False - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % vn_fq_name) - return False - vn_uve_intf_list = self.ops_vnoutput.get_attr( - 'Agent', 'interface_list') - result1 = False - for vm_intf in vn_uve_intf_list: - vm_uuid_extracted = str(vm_intf).split(':')[:1][0] - if (vm_uuid == vm_uuid_extracted): - self.logger.info( - "interface for vm %s is found in vn uve as %s" % - (vm_uuid, vm_intf)) - return vm_intf - self.logger.info("interface for vm %s is not created" % (vm_uuid)) - return None - - def get_vn_uve_vm_list(self, collector, vn_fq_name=None): - '''Returns the vm list from vn uve''' - if not vn: - self.logger.info("vn name not passed") - return False - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % vn) - return False - vn_uve_vm_list = self.ops_vnoutput.get_attr( - 'Agent', 'virtualmachine_list') - return vn_uve_vm_list - - def get_vn_uve_attched_policy(self, collector, vn_fq_name=None): - '''Get attached policy in vn uve - - ''' - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % vn_fq_name) - return False - self.policy_list = self.ops_vnoutput.get_attr( - 'Config', 'attached_policies') - if not self.policy_list: - return None - self.policy_name_list = [] - for elem in self.policy_list: - if isinstance(elem, dict): - self.policy_name_list.append(elem['vnp_name']) - if isinstance(elem, list): - self.policy_name_list.append(elem[0][0]['vnp_name']) - return self.policy_name_list - - def get_vn_uve_num_of_rules_agent(self, collector, vn_fq_name=None): - '''Get number of rules in vn uve agent - - ''' - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % vn_fq_name) - return False - self.num_of_rules = self.ops_vnoutput.get_attr( - 'Agent', 'total_acl_rules') - return self.num_of_rules - - def get_vn_uve_num_of_rules_config(self, collector, vn_fq_name=None): - '''Get number of rules in vn uve-config - - ''' - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % vn_fq_name) - return False - self.num_of_rules = self.ops_vnoutput.get_attr( - 'Config', 'total_acl_rules') - return self.num_of_rules - - def get_vn_uve_connected_networks(self, collector, vn_fq_name=None): - '''Gets connected networks from vn uve when policy is attached - ''' - res = None - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % vn_fq_name) - return False - try: - res = self.ops_vnoutput.get_attr('Config', 'connected_networks') - except Exception as e: - print e - finally: - return res - - def get_vn_uve_partially_connected_networks(self, collector, vn_fq_name=None): - '''Gets partially_connected_networks from vn uve when policy is attached - ''' - res = None - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=vn_fq_name) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % vn_fq_name) - return False - try: - res = self.ops_vnoutput.get_attr( - 'Config', 'partially_connected_networks') - except Exception as e: - print e - finally: - return res - - def get_inter_vn_stats(self, collector, src_vn, other_vn, direction='out'): - '''Returns the intervn stats''' - - res = None - if not src_vn: - self.logger.info("vn name not passed") - return False - if (direction == 'out'): - direction = 'out_stats' - else: - direction = 'in_stats' - try: - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name=src_vn) - if not self.ops_vnoutput: - self.logger.error("%s uve did not return any output" % src_vn) - res = self.ops_vnoutput.get_attr( - 'Agent', direction, match=('other_vn', other_vn)) - if res: - self.logger.info("Interven out stats in %s vn..." % (src_vn)) - self.logger.info("res = %s" % (res)) - res = res[0]['tpkts'] - except Exception as e: - print e - finally: - return res - - def verify_connected_networks_in_vn_uve(self, vn_fq_name, connected_vn_fq_name): - '''Verify connected networks and partially connected networks in vn uve based on policy - ''' - if not vn_fq_name: - self.logger.info("vn name not passed") - return False - result = True - for ip in self.inputs.collector_ips: - try: - c_net = self.get_vn_uve_connected_networks(ip, vn_fq_name) - if (connected_vn_fq_name in c_net): - self.logger.info( - "connected networks %s present in %s vn uve" % - (connected_vn_fq_name, vn_fq_name)) - result = result & True - else: - result = result & False - self.logger.warn("connected networks %s not in vn uve" % - (connected_vn_fq_name, vn_fq_name)) - pc_net = self.get_vn_uve_partially_connected_networks( - ip, vn_fq_name) - if pc_net: - if (connected_vn_fq_name in pc_net): - self.logger.warn( - "Wrong policy configuration: same vn should not be inconnected networks and partially connected networks") - result = result & False - except Exception as e: - print e - result = False - return result - - @retry(delay=3, tries=15) - def verify_vn_link(self, vn_fq_name): - '''Verifies that vn is listed in http://nodea18.englab.juniper.net:8081/analytics/virtual-networks when created''' - - # vn='default-domain:'+self.inputs.project_name+':'+vn - result = False - for ip in self.inputs.collector_ips: - self.logger.info( - "Verifying the %s virtual network link through opserver %s" % (vn_fq_name, ip)) - self.links = self.ops_inspect[ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type( - uveType='virtual-networks') - gen_list = [] - for elem in self.links: - name = elem.get_attr('Name') - if name: - if (name in vn_fq_name): - self.logger.info("vn link and name as %s" % (elem)) - result = True - break - else: - result = False - else: - self.logger.warn("not links retuned") - return False - return result - - def get_acl(self,collector,vn_fq_name,tier = 'Agent'): - - res = None - try: - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name = vn_fq_name) - res = self.ops_vnoutput.get_attr( - tier , 'total_acl_rules') - except Exception as e: - self.logger.exception('Got exception as %s'%(e)) - finally: - return res - - def get_bandwidth_usage(self,collector,vn_fq_name,direction = 'out'): - - res = None - direction = '%s_bandwidth_usage'%direction - try: - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name = vn_fq_name) - res = self.ops_vnoutput.get_attr( - 'Agent' , direction) - except Exception as e: - self.logger.exception('Got exception as %s'%(e)) - finally: - return res - - def get_flow(self,collector,vn_fq_name,direction = 'egress'): - - res = None - direction = '%s_flow_count'%direction - try: - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name = vn_fq_name) - res = self.ops_vnoutput.get_attr( - 'Agent' , direction) - except Exception as e: - self.logger.exception('Got exception as %s'%(e)) - finally: - return res - - @retry_for_value(delay=4, tries=10) - def get_vn_stats(self,collector,vn_fq_name,other_vn ): - - res = None - try: - self.ops_vnoutput = self.ops_inspect[ - collector].get_ops_vn(vn_fq_name = vn_fq_name) - res = self.ops_vnoutput.get_attr( - 'Agent' , 'in_stats',match = ('other_vn',\ - other_vn)) - except Exception as e: - self.logger.exception('Got exception as %s'%(e)) - finally: - return res - - # virtual-machine uve functions -# -------------------------------------# - def get_vm_uve(self, collector, uuid): - '''Returns entire vm uve.Need this to verify vm uve does not return anything when vm is deleted''' - return self.ops_inspect[collector].get_ops_vm(vm=uuid) - - def verify_vm_not_in_opserver(self, uuid, compute, vn_fq_name): - '''Verify that vm not in opserver after the vm is deleted''' - -# for ip in self.inputs.collector_ips: -# output= self.get_vm_uve(ip,uuid) -# self.logger.info("vm uve after delete of vm %s is %s"%(uuid,output)) -# assert (not output) - assert self.verify_vm_list_not_in_vrouter_uve( - vrouter=compute, vm_uuid=uuid) - assert self.verify_vn_uve_for_vm_not_in_vn( - vn_fq_name=vn_fq_name, vm=uuid) - assert self.verify_vm_uve_not_in_opserver(vm=uuid) - - def get_ops_vm_uve_interface(self, collector, uuid): - '''Returns: [{u'virtual_network': u'default-domain:admin:vn1', u'ip_address': u'11.1.1.249', u'name': u'111e77ec-c392-4dbf-90bb-d1ab7e0bb476:14bc574b-56fe-4fcb-819b-5f038da34f1a'}] ''' - - self.ops_vm_output = self.ops_inspect[collector].get_ops_vm(vm=uuid) - if not self.ops_vm_output: - self.logger.warn("vm uve did not return anything") - return False - self.vm_intf_dct = self.ops_vm_output.get_attr( - 'Agent', 'interface_list') - return self.vm_intf_dct - - def get_ops_vm_uve_vm_host(self, collector, uuid): - '''Retruns vm uve view of vrouter ''' - - self.ops_vm_output = self.ops_inspect[collector].get_ops_vm(vm=uuid) - if not self.ops_vm_output: - self.logger.warn("vm uve did not return anything") - return False - self.uve_vm_host = self.ops_vm_output.get_attr('Agent', 'vrouter') - return self.uve_vm_host - - def verify_vm_uve_tiers(self, uuid=None): - '''Verify vm uve tiers as UveVirtualMachineConfig and UveVirtualMachineAgent ''' - - result = True - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - self.ops_vm_output = self.ops_inspect[ip].get_ops_vm(vm=uuid) - key_list = self.ops_vm_output.keys() - # expect_lst=['UveVirtualMachineConfig','UveVirtualMachineAgent'] - expect_lst = ['UveVirtualMachineAgent'] - diff_key = set(expect_lst) ^ set(key_list) - for uve in expect_lst: - if uve not in key_list: - self.logger.error("%s uve not shown in vm uve %s" % - (uve, uuid)) - result = result and False - else: - self.logger.info("%s uve correctly shown in vm uve %s" % - (uve, uuid)) - result = result and True - return result - - @retry(delay=4, tries=10) - def verify_vm_link(self, vm): - '''Verifies that vm is listed in http://nodea18.englab.juniper.net:8081/analytics/virtual-machines when created''' - - # vn='default-domain:'+self.inputs.project_name+':'+vn - result = False - for ip in self.inputs.collector_ips: - self.logger.info( - "Verifying the %s virtual network link through opserver %s" % (vm, ip)) - self.links = self.ops_inspect[ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type( - uveType='virtual-machines') - gen_list = [] - for elem in self.links: - name = elem.get_attr('Name') - if name: - if (name == vm): - self.logger.info("vm link and name as %s" % (elem)) - result = True - break - else: - result = False - else: - self.logger.warn("not links retuned") - return False - return result - - @retry(delay=4, tries=10) - def verify_vm_uve_not_in_opserver(self, vm): - '''Verify that vm uves deleted from opserver on vm delete''' - - result = True - try: - for ip in self.inputs.collector_ips: - self.logger.info( - "Verifying the %s virtual network link through opserver %s" % (vm, ip)) - links = self.ops_inspect[ip].get_hrefs_to_all_UVEs_of_a_given_UVE_type( - uveType='virtual-machines') - if links: - for elem in links: - name = elem.get_attr('Name') - if name: - if (name == vm): - self.logger.warn("vm link and name as %s" % - (elem)) - self.logger.warn("vm link still in opserver") - result = result and False - break - else: - result = result and True - else: - self.logger.info("no links retuned for %s" % (vm)) - result = result and True - if result: - self.logger.info("%s vm uve deleted from opserver" % (vm)) - result = result and True - except Exception as e: - self.logger.info("Got exception as %s" % (e)) - finally: - return result - - def get_intf_uve(self,intf): - try: - _intf = self.ops_inspect[self.inputs.collector_ips[0]].get_ops_vm_intf(intf) - return _intf.get_attr('Agent') - except Exception as e: - return None - - def get_vm_attr(self,intf,attr): - try: - ops_data = self.get_intf_uve(intf) - return ops_data[attr] - except Exception as e: - return None - -# BGP-ROUTER UEE -# -------------------# - def get_bgp_router_uve_count_xmpp_peer(self, collector): - '''Get count of xmpp peers from bgp-router uve - - ''' - self.bgp_uve_xmpp_count = [] - for bgp in self.bgp_hosts: - dct = {} - self.bgp_ops = self.ops_inspect[ - collector].get_ops_bgprouter(bgprouter=bgp) - dct[str(bgp)] = str( - self.bgp_ops.get_attr('Control', 'num_xmpp_peer')) - self.bgp_uve_xmpp_count.append(dct) - return self.bgp_uve_xmpp_count - - def get_bgp_router_uve_count_up_xmpp_peer(self, collector, bgp): - '''Get count of up xmpp peers from bgp-router uve - - ''' - self.bgp_ops = self.ops_inspect[ - collector].get_ops_bgprouter(bgprouter=bgp) - return str(self.bgp_ops.get_attr('Control', 'num_up_xmpp_peer')) - - def get_bgp_router_uve_count_bgp_peer(self, collector): - '''Get count of bgp peers from bgp-router uve - - ''' - self.bgp_uve_bgp_count = [] - for bgp in self.bgp_hosts: - dct = {} - self.bgp_ops = self.ops_inspect[ - collector].get_ops_bgprouter(bgprouter=bgp) - dct[str(bgp)] = str( - self.bgp_ops.get_attr('Control', 'num_bgp_peer')) - self.bgp_uve_bgp_count.append(dct) - return self.bgp_uve_bgp_count - - def get_bgp_router_uve_count_up_bgp_peer(self, collector, bgp): - '''Get count of up bgp peers from bgp-router uve - - ''' - self.bgp_ops = self.ops_inspect[ - collector].get_ops_bgprouter(bgprouter=bgp) - return str(self.bgp_ops.get_attr('Control', 'num_up_bgp_peer')) - - @retry(delay=4, tries=20) - def verify_bgp_router_uve_xmpp_and_bgp_count(self): - '''Verifies the xmpp and bgp peer count in bgp-router uve''' - - result = True - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - count_agents_dct = self.get_bgp_router_uve_count_xmpp_peer(ip) - count_bgp_nodes_dct = self.get_bgp_router_uve_count_bgp_peer(ip) - for bgp_host in self.inputs.bgp_names: - self.logger.info("Verifying for %s bgp-router uve " % - (bgp_host)) - for elem in count_agents_dct: - if bgp_host in elem.keys(): - if (elem[bgp_host] == str(len(self.inputs.compute_ips))): - self.logger.info("xmpp peers = %s" % - (elem[bgp_host])) - result = result and True - else: - self.logger.warn("xmpp peers = %s" % - (elem[bgp_host])) - self.logger.warn("expected xmpp peers = %s " % - (len(self.inputs.compute_ips))) - result = result and False - break - for elem in count_bgp_nodes_dct: - expected_bgp_peers = str( - len(self.inputs.bgp_ips) + len(self.inputs.ext_routers) - 1) - if bgp_host in elem.keys(): - if (elem[bgp_host] == expected_bgp_peers): - self.logger.info("bgp peers = %s" % - (elem[bgp_host])) - result = result and True - else: - self.logger.warn("bgp peers = %s" % - (elem[bgp_host])) - self.logger.warn("expected bgp peers = %s " % - expected_bgp_peers) - result = result and False - break - return result - - @retry(delay=2, tries=14) - def verify_bgp_router_uve_up_xmpp_and_bgp_count(self): - '''Verifies the xmpp and bgp peer count in bgp-router uve''' - - result = True - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver in %s" % (ip)) - count_agents_dct = self.get_bgp_router_uve_count_xmpp_peer(ip) - count_bgp_nodes_dct = self.get_bgp_router_uve_count_bgp_peer(ip) - for bgp_host in self.inputs.bgp_names: - self.logger.info("Verifying for %s bgp-router uve " % - (bgp_host)) - for elem in count_agents_dct: - if bgp_host in elem.keys(): - if (elem[bgp_host] >= self.get_bgp_router_uve_count_up_xmpp_peer(ip, bgp_host)): - self.logger.info("xmpp peers = %s" % - (elem[bgp_host])) - result = result and True - else: - self.logger.warn("configured xmpp peers = %s" % - (elem[bgp_host])) - self.logger.warn("expected xmpp peers = %s " % - (len(self.inputs.compute_ips))) - result = result and False - break - expected_bgp_peers = str( - len(self.inputs.bgp_ips) + len(self.inputs.ext_routers) - 1) - for elem in count_bgp_nodes_dct: - if bgp_host in elem.keys(): - if (elem[bgp_host] >= self.get_bgp_router_uve_count_up_bgp_peer(ip, bgp_host)): - self.logger.info("bgp peers = %s" % - (elem[bgp_host])) - result = result and True - else: - self.logger.warn("configured bgp peers = %s" % - (elem[bgp_host])) - self.logger.warn("expected bgp peers = %s " % - expected_bgp_peers) - result = result and False - break - return result -# service instance uve functions - - def get_svc_instance(self, collector, project=None, instance=None): - '''get the svc insance uve our put''' - if not project: - project = self.inputs.stack_tenant - self.svc_obj = self.ops_inspect[ - collector].get_ops_svc_instance(svc_instance=instance) - return self.svc_obj.get_attr('Config') - - def get_svc_template(self, collector, left_vn=None, right_vn=None): - '''get the svc insance uve our put''' - self.svc_obj = self.ops_inspect[collector].get_ops_svc_template( - left_vn=left_vn, right_vn=right_vn) - return self.svc_obj.get_attr('Config') - - def get_service_chain_uve(self,collector): - sc_obj = self.ops_inspect[collector].get_ops_sc_uve() - return sc_obj.get_attr('Config') - - def get_specific_service_chain_uve(self,collector,left_vn, - right_vn, - services = [], - protocol = None, - direction = None, - src_port = None, - dst_port = None): - - sc_uve = self.get_service_chain_uve\ - (collector) - for elem in sc_uve: - if ((elem['value']['UveServiceChainData']['source_virtual_network']\ - == left_vn) and (elem['value']['UveServiceChainData']['destination_virtual_network']\ - == right_vn) and (set(elem['value']['UveServiceChainData']['services'])\ - == set(services))): - return elem - return None - - def get_service_chain_name(self,left_vn, - right_vn, - services = [], - protocol = None, - direction = None, - src_port = None, - dst_port = None): - svc_chain = None - svc_chain = self.get_specific_service_chain_uve(self.inputs.collector_ips[0], - left_vn, - right_vn, - services) - if svc_chain: - return svc_chain['name'] - else: - None - - - def verify_service_chain_uve(self,left_vn, - right_vn, - services = [], - protocol = None, - direction = None, - src_port = None, - dst_port = None): - if self.get_specific_service_chain_uve(self.inputs.collector_ips[0], - left_vn, - right_vn, - services): - return True - return False - - def verify_si_st_uve(self, instance=None, st_name=None, left_vn=None, right_vn=None): - - services_from_st_uve_lst = None - result = True - self.si_uve = self.get_svc_instance( - self.inputs.collector_ips[0], instance=instance) - if self.si_uve: - self.logger.info("Service instance uve shown as %s" % - (self.si_uve)) - result = result and True - if st_name in self.si_uve['st_name']: - result = result and True - else: - self.logger.warn( - 'template name not correctly shown in the si uve - should be %s' % (st_name)) - else: - self.logger.warn("Service instance uve not shown ") - result = result and False - # Verifying that internal routing instances, policy,connected_networks - # in vn uves - - self.st_uve = self.get_svc_template( - self.inputs.collector_ips[0], left_vn=left_vn, right_vn=right_vn) - if self.st_uve: - self.logger.info("Service template uve shown as %s" % - (self.st_uve)) - result = result and True - else: - self.logger.warn("Service template uve not shown ") - result = result and False - - if ((left_vn in self.st_uve['source_virtual_network']) and (right_vn in self.st_uve['destination_virtual_network'])): - self.logger.info( - "left and right vn correctly shown service template uve") - result = result and True - else: - self.logger.info( - "left and right vn NOT correctly shown service template uve") - result = result and False - - services_from_st_uve_lst = self.st_uve['services'] - if services_from_st_uve_lst: - for elem in services_from_st_uve_lst: - if (instance in elem): - self.logger.info( - "Correct services info shown in the st uve ") - result = result and True - else: - self.logger.warn( - "Correct services info Not shown in the st uve: %s " % (elem)) - result = result and True - return result - - def verify_si_uve_not_in_analytics(self, instance=None, st_name=None, left_vn=None, right_vn=None): - - try: - si_uve = self.get_svc_instance( - self.inputs.collector_ips[0], instance=instance) - if si_uve: - self.logger.info("service instance uve after deletion %s" % - (si_uve)) - return False - else: - self.logger.info("service instance uve deleted") - except Exception as e: - return True - - st_uve = None - st_uve = self.get_specific_service_chain_uve( - self.inputs.collector_ips[0], - left_vn=left_vn, - right_vn=right_vn, - services = [instance]) - if st_uve: - return False - else: - return True - - def verify_st_uve_not_in_analytics(self, instance=None, st_name=None, left_vn=None, right_vn=None): - - st_uve = None - try: - st_uve = self.get_specific_service_chain_uve( - self.inputs.collector_ips[0], - left_vn=left_vn, - right_vn=right_vn, - services = [instance]) - if st_uve: - return False - self.logger.info("Service chain NOT deleted from analytics...") - else: - return True - self.logger.info("Service chain deleted from analytics...") - except Exception as e: - self.logger.info("Service chain deleted from analytics...") - return True - -# bgp-peer uve functions - def get_bgp_peers(self, collector): - ''' - { - href: "http://nodea18:8081/analytics/uves/bgp-peer/default-domain:default-project:ip-fabric:__default__:nodea19: - default-domain:default-project:ip-fabric:__default__:nodea18?flat", - name: "default-domain:default-project:ip-fabric:__default__:nodea19:default-domain:default-project:ip-fabric:__default__:nodea18" - }, - ''' - - peer_touple = [] - try: - self.logger.info("Verifying through opserver %s" % (collector)) - self.links = self.ops_inspect[ - collector].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType='bgp-peers') - for elem in self.links: - name = elem.get_attr('Name') - parsed_name = name.split(':') - bgp_node = parsed_name[4] - self.logger.info("bgp-node is %s" % (bgp_node)) - peer = parsed_name[-1] - self.logger.info("peer is %s" % (peer)) - touple = (bgp_node, peer) - peer_touple.append(touple) - except Exception as e: - print e - finally: - return peer_touple - - def get_bgp_peer_uve(self, collector, peering_toupe=None): - '''Return the bgp peer uve''' - res = None - try: - res = self.ops_inspect[collector].get_ops_bgp_peer(peering_toupe) - except Exception as e: - print e - finally: - return res - - def verify_bgp_peers_in_opserver(self, peering_toupe=None): - '''{ - href: http://10.204.216.25:8081/analytics/uves/bgp-peer/default-domain:default-project:ip-fabric:__default__:10.204.216.14:10.204.216.25?flat, - name: default-domain:default-project:ip-fabric:__default__:10.204.216.14:10.204.216.25 - }, - { - href: http://10.204.216.25:8081/analytics/uves/bgp-peer/default-domain:default-project:ip-fabric:__default__:10.204.216.25:10.204.216.14?flat, - name: default-domain:default-project:ip-fabric:__default__:10.204.216.25:10.204.216.14 - ''' - - result = True - try: - for ip in self.inputs.collector_ips: - self.logger.info("Verifying through opserver %s" % (ip)) - self.bgp_peers = self.get_bgp_peers(ip) - if (peering_toupe in self.bgp_peers): - self.logger.info(" peering uve could be found in opserver") - result = result and True - else: - self.logger.info( - "peering uve could not be found in opserver") - result = result and False - except Exception as e: - print e - finally: - return result - - def get_peer_stats_info_tx_proto_stats(self, collector, peer_toupe=None): - '''tx_proto_stats: { - notification: 0, - update: 33, - close: 0, - total: 2794, - open: 1, - keepalive: 2760 - ''' - stats = None - for i in range(20): - try: - self.logger.info( - "Trying to get the bgp stats from bgp peer uve %s" % (peer_toupe,)) - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - if self.peer_obj: - stats = self.peer_obj.get_attr( - 'Control', 'peer_stats_info') - except Exception as e: - print e - finally: - if stats: - return stats['tx_proto_stats'] - time.sleep(5) - return stats - - def get_peer_stats_info_tx_update_stats(self, collector, peer_toupe=None): - ''' - tx_update_stats: { - unreach: 13, - total: 33, - reach: 20 - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'peer_stats_info') - except Exception as e: - print e - finally: - return stats['tx_update_stats'] - - def get_peer_stats_info_rx_proto_stats(self, collector, peer_toupe=None): - ''' - rx_proto_stats: { - notification: 0, - update: 33, - close: 0, - total: 2795, - open: 1, - keepalive: 2761 - }, - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'peer_stats_info') - except Exception as e: - print e - finally: - return stats['rx_proto_stats'] - - def get_peer_stats_info_rx_update_stats(self, collector, peer_toupe=None): - ''' - rx_update_stats: { - unreach: 13, - total: 33, - reach: 20 - } - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'peer_stats_info') - except Exception as e: - print e - finally: - return stats['rx_update_stats'] - - def get_peer_state_info(self, collector, peer_toupe=None): - ''' - state_info: { - last_state: "OpenConfirm", - state: "Established", - last_state_at: 1375774054038293 - }, - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'state_info') - except Exception as e: - print e - finally: - return stats - - def get_peer_falp_info(self, collector, peer_toupe=None): - ''' - flap_info: { - flap_count: 1, - flap_time: 1375871293924163 - } - - - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'flap_info') - except Exception as e: - print e - finally: - return stats - - def get_peer_families(self, collector, peer_toupe=None): - ''' - [ - "IPv4:Vpn" - ], - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'families') - except Exception as e: - print e - finally: - return stats - - def get_peer_peer_type(self, collector, peer_toupe=None): - ''' - peer_type: "internal" - - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'peer_type') - except Exception as e: - print e - finally: - return stats - - def get_peer_local_asn(self, collector, peer_toupe=None): - ''' - local_asn: 64512 - - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'local_asn') - except Exception as e: - print e - finally: - return stats - - def get_peer_event_info(self, collector, peer_toupe=None): - ''' - event_info: { - last_event_at: 1375856854872047, - last_event: "fsm::EvBgpKeepalive" - }, - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'event_info') - except Exception as e: - print e - finally: - return stats - - def get_peer_local_id(self, collector, peer_toupe=None): - ''' - local_id: 181196825 - - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'local_id') - except Exception as e: - print e - finally: - return stats - - def get_peer_send_state(self, collector, peer_toupe=None): - ''' - send_state: "in sync" - - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'send_state') - except Exception as e: - print e - finally: - return stats - - def get_peer_peer_id(self, collector, peer_toupe=None): - ''' - peer_id: 181196814 - - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'peer_id') - except Exception as e: - print e - finally: - return stats - - def get_peer_peer_asn(self, collector, peer_toupe=None): - ''' - peer_asn: 64512 - - ''' - stats = None - try: - self.peer_obj = self.ops_inspect[ - collector].get_ops_bgp_peer(peer_toupe) - stats = self.peer_obj.get_attr('Control', 'peer_asn') - except Exception as e: - print e - finally: - return stats -# def get_bgp_peer_tx_proto_stats(self,bgp,peer): -# def get_bgp_peer_tx_proto_stats(self,bgp,peer): - -# xmpp-peer uve -# ---------------# - - def get_xmpp_peer_state_info(self, collector, peer_toupe=None): - ''' - state_info: { - last_state: "Active", - state: "Established", - last_state_at: 1375935156613195 - ''' - stats = None - try: - self.xmpp_peer_obj = self.ops_inspect[ - collector].get_ops_bgp_xmpp_peer(peer_toupe) - stats = self.xmpp_peer_obj.get_attr('Control', 'state_info') - except Exception as e: - print e - finally: - return stats - - def get_xmpp_peer_flap_info(self, collector, peer_toupe=None): - ''' - flap_info: { - flap_count: 1, - flap_time: 1375945111699895 - }, - ''' - stats = None - try: - self.xmpp_peer_obj = self.ops_inspect[ - collector].get_ops_bgp_xmpp_peer(peer_toupe) - stats = self.xmpp_peer_obj.get_attr('Control', 'flap_info') - except Exception as e: - print e - finally: - return stats - - def get_xmpp_peer_event_info(self, collector, peer_toupe=None): - ''' - flap_info: { - flap_count: 1, - flap_time: 1375945111699895 - }, - ''' - stats = None - try: - self.xmpp_peer_obj = self.ops_inspect[ - collector].get_ops_bgp_xmpp_peer(peer_toupe) - stats = self.xmpp_peer_obj.get_attr('Control', 'event_info') - except Exception as e: - print e - finally: - return stats - - # Collector uve verification - - def get_analytics_process_details(self, opserver, collector, process=None): - - res = None - try: - obj = self.ops_inspect[opserver].get_ops_collector( - collector=collector) - res = obj.get_attr('Node', 'process_info', - match=('process_name', process)) - except Exception as e: - self.logger.exception('Got exception as %s' % (e)) - finally: - return res - - def get_analytics_process_parameters(self, opserver, collector, process_parameters=None, process=None): - - info = self.get_analytics_process_details( - opserver, collector, process=process) - if info: - self.logger.info("process deatils : %s" % (info)) - return info[0][process_parameters] - else: - return None - - @retry(delay=3, tries=30) - def verify_collector_uve_module_state(self, opserver, collector, process, expected_process_state='RUNNING'): - '''Verify http://nodea18:8081/analytics/uves/collector/nodea29?flat''' - - result = True - try: - info = self.get_analytics_process_details( - opserver, collector, process=process) - if info: - if expected_process_state in info[0]['process_state']: - self.logger.info("%s process is %s" % - (process, expected_process_state)) - result = result and True - else: - self.logger.warn("%s process is NOT %s" % - (process, expected_process_state)) - result = result and False - else: - self.logger.warn("No output for %s" % (process)) - if 'RUNNING' in expected_process_state: - result = result and False - else: - result = result and True - - except Exception as e: - self.logger.info("Got exception as %s" % (e)) - finally: - return result - -# Config-node uve verification - - def get_cfgm_process_details(self, opserver, cfgm_name, process=None, instanceid='0'): - - res = None - - if ((process == 'contrail-discovery') or (process == 'contrail-api')): - process = '%s:%s' % (process, instanceid) - - try: - obj = self.ops_inspect[opserver].get_ops_config(config=cfgm_name) - res = obj.get_attr('Node', 'process_info', - match=('process_name', process)) - except Exception as e: - self.logger.exception('Got exception as %s' % (e)) - finally: - return res - - def get_cfgm_process_parameters(self, opserver, cfgm, process_parameters=None, process=None): - - info = self.get_cfgm_process_details(opserver, cfgm, process=process) - if info: - return info[0][process_parameters] - else: - return None - - @retry(delay=5, tries=15) - def verify_cfgm_uve_module_state(self, opserver, cfgm, process): - '''Verify http://nodea18:8081/analytics/uves/collector/nodea29?flat''' - - result = True - try: - info = self.get_cfgm_process_details( - opserver, cfgm, process=process) - if info: - if (info[0]['process_state'] == 'PROCESS_STATE_RUNNING'): - self.logger.info("%s is running" % (process)) - result = result and True - else: - self.logger.error("%s is NOT running" % (process)) - result = result and False - else: - self.logger.error("Not output for %s" % (process)) - result = result and False - - except Exception as e: - self.logger.info("Got exception as %s" % (e)) - result = result and False - finally: - return result - -# Sending query for FlowSreiesTable -# -------------------------------# - - def getstarttime(self, ip=None): - '''Getting start time from the system when the test is run''' - time = self.inputs.run_cmd_on_server(ip, 'date', - self.inputs.host_data[ - ip]['username'], - self.inputs.host_data[ip]['password']) - day, month, date, time, timezone, year = time.split() - time = time + '.' + '0' - # formatting start_time as is needed for post_query - start_time = year + ' ' + month.upper() + ' ' + date + ' ' + time - return start_time - - def get_time_since_uptime(self, ip=None): - - uptime = self.inputs.run_cmd_on_server(ip, 'cat /proc/uptime', - self.inputs.host_data[ - ip]['username'], - self.inputs.host_data[ip]['password']) - utime = uptime.split() - utime = utime[0] - current_time = self.inputs.run_cmd_on_server(ip, 'date', - self.inputs.host_data[ - ip]['username'], - self.inputs.host_data[ip]['password']) - day, month, date, time, timezone, year = current_time.split() - month = months[month] - h, m, sec = time.split(":") - current_time_utc = datetime.datetime( - int(year), int(month), int(date), int(h), int(m), int(sec)) - s_time_utc = current_time_utc - \ - datetime.timedelta(seconds=float(utime)) - s_time_str = s_time_utc.strftime('%Y %m %d %H:%M:%S.0') - s_time_lst = s_time_str.split() - yr, mn, d, tm = s_time_lst - mnth = months_number_to_name[mn] - start_time = '%s %s %s %s' % (yr, mnth, d, tm) - return start_time - - @retry(delay=2, tries=50) - def verify_all_uves(self): - - ret = {} - self.uve_verification_flags = [] - ret = self.get_all_uves() - if ret: - result = self.dict_search_for_values(ret) - if 'False' in str(self.uve_verification_flags): - result = False - else: - result = True - return result - - def get_schema_from_table(self, lst): - - schema = None - for el in lst: - if 'schema' in el: - schema = el['schema'] - return schema - - def get_source_from_table(self, lst): - - source = None - for el in lst: - if 'Source' in el: - source = el['Source'] - return source - - def get_modules_from_table(self, lst): - - modules = None - for el in lst: - if 'ModuleId' in el: - modules = el['ModuleId'] - return modules - - def get_names_from_table(self, lst): - - names = None - for el in lst: - if 'name' in el: - names = el['name'] - return names - - def verify_message_table(self, start_time=None, end_time='now'): - - result = True - result1 = True - res2 = None - ret = None - objects = None - query_table_failed = [] - query_table_passed = [] - message_table = None - table_name = 'MessageTable' - source = None - if not start_time: - self.logger.warn("start_time must be passed...") - return - ret = self.get_all_tables(uve='tables') - tables = self.get_table_schema(ret) - for elem in tables: - for k, v in elem.items(): - if table_name in k: - schema = self.get_schema_from_table(v) - break - for elem in tables: - if 'MessageTable' in str(elem): - message_table = elem - break - if message_table: - mduleid = None - for k, v in message_table.items(): - for elem in v: - if 'Source' in elem.keys(): - source = elem['Source'] - if 'ModuleId' in elem.keys(): - moduleid = elem['ModuleId'] - - if source and moduleid: - for src in source: - if src in self.inputs.compute_names: - if 'contrail-vrouter-agent' in moduleid: - query = '(Source=%s AND ModuleId = contrail-vrouter-agent)' % ( - src) - res = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, where_clause=query, - sort=2, limit=5, sort_fields=["MessageTS"]) - for el in res: - if 'Source' not in str(el): - self.logger.warn( - "Logs from MessageTable not having source \n%" % (str(el))) - return False - - if src in self.inputs.collector_names: - if 'contrail-collector' in moduleid: - query = '(Source=%s AND ModuleId = contrail-collector)' % (src) - res = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, where_clause=query, - sort=2, limit=5, sort_fields=["MessageTS"]) - for el in res: - if 'Source' not in str(el): - self.logger.warn( - "Logs from MessageTable not having source \n%" % (str(el))) - return False - - if src in self.inputs.cfgm_names: - if 'contrail-api' in moduleid: - query = '(Source=%s AND ModuleId = contrail-api)' % (src) - res = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, where_clause=query, - sort=2, limit=5, sort_fields=["MessageTS"]) - for el in res: - if 'Source' not in str(el): - self.logger.warn( - "Logs from MessageTable not having source \n%" % (str(el))) - return False - return True - - def verify_object_tables(self, table_name=None, start_time=None, end_time='now', skip_tables=[]): - - result = True - result1 = True - res2 = None - ret = None - objects = None - query_table_failed = [] - query_table_passed = [] - if not start_time: - self.logger.warn("start_time must be passed...") - return - ret = self.get_all_tables(uve='tables') - tables = self.get_table_schema(ret) - - if table_name: - for elem in tables: - for k, v in elem.items(): - if table_name in k: - schema = self.get_schema_from_table(v) - break - #start_time = '2014 FEB 5 14:10:49.0' - if 'MessageTable' not in table_name: - objects = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=['ObjectId']) - if not objects: - self.logger.warn( - "%s table object id could not be retrieved" % - (table_name)) - result = result and False - - else: - for obj in objects: - query = '(' + 'ObjectId=' + obj['ObjectId'] + ')' - try: - res2 = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, where_clause=query, - sort=2, limit=5, sort_fields=["MessageTS"]) - - if not res2: - result1 = result1 and False - self.logger.warn("query to table %s between %s and Now did not return any value with objectid %s" % ( - table_name, start_time, obj)) - else: - result1 = result1 and True - self.logger.info( - "%s table contains data with objectid %s" % (table_name, obj)) - except Exception as e: - self.logger.warn( - "Got exception as %s \n while querying %s table" % (e, table_name)) - else: - self.logger.info("Querying table %s" % (table_name)) - res2 = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, - sort=2, limit=5, sort_fields=["MessageTS"]) - if not res2: - result1 = result1 and False - self.logger.warn( - "query to table %s between %s and Now did not return any value" % - (table_name, start_time)) - else: - result1 = result1 and True - self.logger.info("%s table contains data \n%s" % - (table_name, res2)) - else: - for el1 in tables: - for k, v in el1.items(): - table_name = k.split('/')[-1] - if table_name not in skip_tables: - pass - continue - - if 'MessageTable' in table_name: - schema = self.get_schema_from_table(v) - self.logger.info("Querying table %s" % (table_name)) - res2 = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, - sort=2, limit=5, sort_fields=["MessageTS"]) - if not res2: - result1 = result1 and False - self.logger.warn( - "query to table %s between %s and Now did not return any value" % (table_name, start_time)) - query_table_failed.append(table_name) - else: - result1 = result1 and True - query_table_passed.append(table_name) - continue - - if 'MessageTable' not in table_name: - self.logger.info("Querying for object_id in table %s" % - (table_name)) - objects = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=['ObjectId']) - if not objects: - self.logger.warn( - "%s table object id could not be retrieved" % (table_name)) - result = result and False - if table_name not in query_table_failed: - query_table_failed.append(table_name) - continue - else: - schema = self.get_schema_from_table(v) - - for obj in objects: - query = '(' + 'ObjectId=' + obj['ObjectId'] + ')' - try: - self.logger.info( - "Querying table %s with objectid as %s\n" % (table_name, obj)) - res2 = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, where_clause=query, - sort=2, limit=5, sort_fields=["MessageTS"]) - if not res2: - result1 = result1 and False - self.logger.warn("query to table %s between %s and Now did not return any value with objectid %s" % ( - table_name, start_time, obj)) - if table_name not in query_table_failed: - query_table_failed.append(table_name) - else: - result1 = result1 and True - self.logger.info( - "%s table contains data with objectid %s\n" % (table_name, obj)) - if table_name not in query_table_passed: - query_table_passed.append(table_name) - except Exception as e: - self.logger.warn( - "Got exception as %s \n while querying %s table" % (e, table_name)) - - q_failed = query_table_failed[:] - for item in q_failed: - if item in query_table_passed: - query_table_failed.remove(item) - - if query_table_failed: - result = False - else: - result = True - - self.logger.info("Query failed for the follwoing tables \n%s" % - (query_table_failed)) - self.logger.info("Query passed for the follwoing tables \n%s" % - (query_table_passed)) - return result - - def verify_stats_tables(self, table_name=None, start_time=None, end_time='now', skip_tables=[]): - - result = True - result1 = True - res2 = None - ret = None - objects = None - query_table_failed = [] - query_table_passed = [] - if not start_time: - self.logger.warn("start_time must be passed...") - return - ret = self.get_all_tables(uve='tables') - tables = self.get_table_schema(ret) - - if table_name: - for elem in tables: - for k, v in elem.items(): - if table_name in k: - schema = self.get_schema_from_table(v) - schema.remove('T=') - names = self.get_names_from_table(v) - break - #start_time = '2014 FEB 5 14:10:49.0' - for name in names: - query = '(name = %s)' % name - objects = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, where_clause=query, - limit=1500000) - if not objects: - self.logger.warn( - "%s table could not be retrieved with name %s" % - (table_name, name)) - result = result and False - else: - self.logger.info( - "%s table could be retrieved with name %s" % - (table_name, name)) - result = result and True - - else: - for el1 in tables: - for k, v in el1.items(): - table_name = k.split('/')[-1] - if 'StatTable' not in table_name: - continue - if table_name not in skip_tables: - pass - continue - else: - schema = self.get_schema_from_table(v) - schema.remove('T=') - names = self.get_names_from_table(v) - - for name in names: - query = '(name = %s)' % name - try: - self.logger.info( - "Querying table %s with name as %s\n" % (table_name, name)) - res2 = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=schema, where_clause=query, - limit=1500000) - if not res2: - result1 = result1 and False - self.logger.warn("query to table %s between %s and Now did not return any value with name %s" % ( - table_name, start_time, name)) - if table_name not in query_table_failed: - query_table_failed.append(table_name) - else: - result1 = result1 and True - self.logger.info( - "%s table contains data with name %s\n" % (table_name, name)) - if table_name not in query_table_passed: - query_table_passed.append(table_name) - except Exception as e: - self.logger.warn( - "Got exception as %s \n while querying %s table" % (e, table_name)) - - q_failed = query_table_failed[:] - for item in q_failed: - if item in query_table_passed: - query_table_failed.remove(item) - - if query_table_failed: - result = False - else: - result = True - - self.logger.info("Query failed for the follwoing tables \n%s" % - (query_table_failed)) - self.logger.info("Query passed for the follwoing tables \n%s" % - (query_table_passed)) - return result - - def start_query_threads(self, thread_objects=[]): - for thread in thread_objects: - thread.start() - time.sleep(0.5) - - def join_threads(self, thread_objects=[]): - for thread in thread_objects: - thread.join(300) - - def get_value_from_query_threads(self): - while not self.que.empty(): - self.logger.info("******** Verifying resutlts *************") - try: - assert self.que.get() - except Exception as e: - print e - - def build_parallel_query_to_object_tables(self, table_name=None, start_time=None, end_time='now', skip_tables=[]): - - threads = [] - self.que = Queue.Queue() - if not start_time: - self.logger.warn("start_time must be passed...") - return - ret = self.get_all_tables(uve='tables') - tables = self.get_table_schema(ret) - try: - for el1 in tables: - objects = None - for k, v in el1.items(): - table_name = k.split('/')[-1] - print 'Table name %s' % table_name - if table_name in skip_tables: - pass - continue - - if 'MessageTable' not in table_name: - self.logger.info("Querying for object_id in table %s" % - (table_name)) - objects = self.ops_inspect[self.inputs.collector_ips[0]].post_query( - table_name, - start_time=start_time, end_time=end_time, select_fields=['ObjectId']) - else: - continue - - if not objects: - self.logger.warn( - "%s table object id could not be retrieved" % (table_name)) - result = result and False - else: - schema = self.get_schema_from_table(v) - - for obj in objects: - query = '(' + 'ObjectId=' + obj['ObjectId'] + ')' - self.logger.info( - "Querying table %s with objectid as %s\n" % (table_name, obj)) - foo = [0, 1] - num = random.choice(foo) - - t = threading.Thread( - target=lambda q, table, start_time, end_time, select_fields, where_clause, - sort_fields, sort, limit: - q.put(self.ops_inspect[self.inputs.collector_ips[num]].post_query( - table, start_time, end_time, select_fields, - where_clause, sort_fields, sort, limit)), - args=( - self.que, table_name, start_time, - end_time, schema, query, ["MessageTS"], 2, 5)) - threads.append(t) - - except Exception as e: - print e - finally: - return threads - - def get_table_schema(self, d): - - tables_lst = [] - for k, v in d.items(): - src_key = None - mod_key = None - schema_key = None - name_key = None - columns = None - table_dct = {} - table_schema_dct = {} - table_src_dct = {} - table_mod_dct = {} - table_name_dct = {} - column_names = [] - schema_key = '%s/schema' % k - columns = d[k][schema_key]['columns'] - for elem in columns: - column_names.append(elem['name']) - table_schema_dct.update({'schema': column_names}) - if not 'Flow' in k: - column_value_key = '%s/column-values' % k - else: - table_dct.update({k: [table_schema_dct]}) - tables_lst.append(table_dct) - continue - - if column_value_key: - try: - for elem in d[k][column_value_key].keys(): - if 'Source' in elem: - src_key = '%s/Source' % column_value_key - if 'ModuleId' in elem: - mod_key = '%s/ModuleId' % column_value_key - if 'name' in elem: - name_key = '%s/name' % column_value_key - except Exception as e: - self.logger.warn("Got exception as %s " % (e)) - - if src_key: - try: - table_src_dct.update( - {'Source': d[k][column_value_key][src_key]}) - except Exception as e: - self.logger.warn("Got exception as %s " % (e)) - if mod_key: - try: - table_mod_dct.update( - {'ModuleId': d[k][column_value_key][mod_key]}) - except Exception as e: - self.logger.warn("Got exception as %s " % (e)) - - if name_key: - try: - table_name_dct.update( - {'name': d[k][column_value_key][name_key]}) - except Exception as e: - self.logger.warn("Got exception as %s " % (e)) - table_dct.update( - {k: [table_schema_dct, table_src_dct, table_mod_dct, table_name_dct]}) - tables_lst.append(table_dct) - - return tables_lst - - def get_table_objects(self, d, table): - pass - - def get_table_module_ids(self, d, table): - pass - - def dict_search_for_values(self, d, key_list=uve_list, value_dct=uve_dict): - - result = True - if isinstance(d, dict): - for k, v in d.items(): - for uve in key_list: - if uve in k: - self.search_key_in_uve(uve, k, v, value_dct) - - if (v or isinstance(v, int) or isinstance(v, float)): - result = self.dict_search_for_values(v) - else: - pass - - elif isinstance(d, list): - for item in d: - result = self.dict_search_for_values(item) - else: - return result - - def search_key_in_uve(self, uve, k, dct, v_dct): - - if not dct: - self.uve_verification_flags.append('False') - self.logger.warn("Empty dict for %s uve" % (k)) - - self.logger.info("Verifying for %s uve" % (uve)) - for elem in v_dct[uve]: - if elem not in str(dct): - self.logger.warn("%s not in %s uve" % (elem, k)) - self.uve_verification_flags.append('False') - else: - pass - #self.logger.info("%s is in %s uve"%(elem,k)) - - def get_all_uves(self, uve=None): - ret = {} - try: - if not uve: - links = self.ops_inspect[self.inputs.collector_ips[0] - ].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType=uve) - else: - links = self.ops_inspect[self.inputs.collector_ips[0] - ].get_hrefs_to_all_UVEs_of_a_given_UVE_type(uveType=uve) - if links: - ret = self.search_links(links) - except Exception as e: - self.uve_verification_flags.append('False') - print e - finally: - return ret - - def get_all_tables(self, uve='tables'): - ret = {} - try: - if not uve: - links = self.ops_inspect[self.inputs.collector_ips[0] - ].get_hrefs_to_all_tables(uveType=uve) - else: - links = self.ops_inspect[self.inputs.collector_ips[0] - ].get_hrefs_to_all_tables(uveType=uve) - if links: - ret = self.search_links(links) - except Exception as e: - self.uve_verification_flags.append('False') - print e - finally: - return ret - - def search_links(self, link, selected_uve=''): -# - result = True - links = self.parse_links(link) - dct = {} - for ln in links: - try: - response = urllib2.urlopen(str(ln)) - data = json.load(response) - if selected_uve: - if selected_uve in ln: - return data - dct.update({ln: self.search_links(data)}) - except Exception as e: - self.uve_verification_flags.append('False') - print 'not an url %s' % ln - if dct: - return dct - else: - return link - - def parse_links(self, links=None): - - try: - if isinstance(links, dict): - if 'href' in links: - yield links['href'] - if isinstance(links, list): - for elem in links: - for item in self.parse_links(elem): - yield item - if isinstance(links, str): - if 'http://' in links: - yield links - - except Exception as e: - print e - - def provision_static_route( - self, prefix='111.1.0.0/16', virtual_machine_id='', - tenant_name=None, api_server_ip='127.0.0.1', - api_server_port='8082', oper='add', - virtual_machine_interface_ip='11.1.1.252', route_table_name='my_route_table', - user='admin', password='contrail123'): - - if not tenant_name: - tenant_name = self.inputs.stack_tenant - cmd = "python /opt/contrail/utils/provision_static_route.py --prefix %s \ - --virtual_machine_id %s \ - --tenant_name %s \ - --api_server_ip %s \ - --api_server_port %s\ - --oper %s \ - --virtual_machine_interface_ip %s \ - --user %s\ - --password %s\ - --route_table_name %s" % (prefix, virtual_machine_id, tenant_name, api_server_ip, api_server_port, oper, - virtual_machine_interface_ip, user, password, route_table_name) - args = shlex.split(cmd) - process = Popen(args, stdout=PIPE) - stdout, stderr = process.communicate() - if stderr: - self.logger.warn("Route could not be created , err : \n %s" % - (stderr)) - else: - self.logger.info("%s" % (stdout)) - - def start_traffic(self, vm, src_min_ip='', src_mx_ip='', dest_ip='', dest_min_port='', dest_max_port=''): - - self.logger.info("Sending traffic...") - try: - cmd = 'sudo /home/ubuntu/pktgen_new.sh %s %s %s %s %s &' % (src_min_ip, - src_mx_ip, dest_ip, dest_min_port, dest_max_port) - vm.run_cmd_on_vm(cmds=[cmd]) - except Exception as e: - self.logger.exception("Got exception at start_traffic as %s" % (e)) - - def stop_traffic(self, vm): - self.logger.info("Stopping traffic...") - try: - cmd = 'killall ~/pktgen_new.sh' - vm.run_cmd_on_vm([cmd]) - except Exception as e: - self.logger.exception("Got exception at stop_traffic as %s" % (e)) - - def build_query(self, src_vn, dst_vn): - - self.query = '(' + 'sourcevn=' + src_vn + \ - ') AND (destvn=' + dst_vn + ')' - - def get_ip_list_from_prefix(self, prefix): - - ip_list = [] - ip = IPNetwork(prefix) - ip_netowrk = str(ip.network) - ip_broadcast = str(ip.broadcast) - ip_lst = list(ip) - for ip_addr in ip_lst: - if ((str(ip_addr) in ip_netowrk) or (str(ip_addr) in ip_broadcast)): - continue - ip_list.append(str(ip_addr)) - return ip_list - - def get_min_max_ip_from_prefix(self, prefix): - - ip_list = self.get_ip_list_from_prefix(prefix) - min_ip = ip_list[0] - max_ip = ip_list[-1] - return [min_ip, max_ip] - - def build_flow_query(self, src_vn, dst_vn): - - query = '(' + 'sourcevn=' + src_vn + ') AND (destvn=' + dst_vn + ')' - return query - - def run_flow_query(self, src_vn, dst_vn): - - result = True - - query = self.build_flow_query(src_vn, dst_vn) - for ip in self.inputs.collector_ips: - try: - self.logger.info('setup_time= %s' % (self.start_time)) - # Quering flow sreies table - self.logger.info( - "Verifying flowSeriesTable through opserver %s" % (ip)) - res1 = self.ops_inspect[ip].post_query( - 'FlowSeriesTable', start_time=self.start_time, - end_time='now', - select_fields=['sourcevn', \ - 'sourceip', 'destvn', \ - 'destip', 'sum(packets)', \ - 'sport', 'dport', 'T=1'], - where_clause=query, sort=2, - limit=5, sort_fields=['sum(packets)']) - assert res1 - self.logger.info("Top 5 flows %s" % (res1)) - except Exception as e: - self.logger.exception("Got exception as %s" % (e)) - result = result and False - return result - - @retry(delay=5, tries=4) - def verify_collector_connection_introspect(self,ip,port): - conn=None - ops_inspect= VerificationOpsSrvIntrospect(ip,port) - conn=ops_inspect.get_collector_connectivity() - try: - if (conn['status'] =='Established'): - self.logger.info("ip %s port %s connected to collector %s "%(ip,port,conn['ip'])) - return True - else: - self.logger.info("ip %s NOT connected to collector"%(ip)) - return False - except Exception as e: - return False - -#Common functions - def verify_process_status(self,obj,module,state = 'Functional'): - obj1 = None - try: - obj1 = obj.get_attr('Node','process_status' - ,match = ('module_id',module)) - - if (obj1 and isinstance(obj1,list)): - for elem in obj1: - if (elem['state'] == state): - return True - else: - return False - elif (obj1 and isinstance(obj1,dict)): - if (obj1['state'] == state): - return True - else: - return False - else: - self.logger.error ("No object found for module %s"%(module)) - return False - except Exception as e: - self.logger.exception("Got exception as %s"%(e)) - return False - - def verify_connection_infos(self,obj,module,server_addrs, - status='Up', - t_ype=None, - name=None, - description=None, - node = None): - result = True - try: - obj1 = obj.get_attr('Node','process_status' - ,match = ('module_id',module)) - if (obj1 and isinstance(obj1,list)): - for elem in obj1: - for el in elem['connection_infos']: - #if ((set(el['server_addrs']) == set(server_addrs)) \ - if (((server_addrs in el['server_addrs']) or \ - (server_addrs == el['server_addrs']))\ - and (el['status'] == status)): - self.logger.info("%s:%s module connection to \ - %s servers UP"%(node,module,str(server_addrs))) - return True - else: - continue - self.logger.error("%s:%s module connection to \ - %s servers NOT UP"%(node,module,str(server_addrs))) - return False - - elif (obj1 and isinstance(obj1,dict)): - for el in obj1['connection_infos']: - if ((set(el['server_addrs']) == set(server_addrs)) \ - and (el['status'] == status)): - self.logger.info("%s module connection to %s \ - servers UP"%(module,str(server_addrs))) - return True - else: - self.logger.info("%s module connection to %s \ - servers NOT UP"%(module,str(server_addrs))) - return False - except Exception as e: - self.logger.exception("Got exception as %s"%(e)) - - def verify_process_and_connection_infos_agent(self): - - port_dict = {'xmpp':'5269', - 'dns' :'53', - 'collector':'8086', - 'disco':'5998' - } - server_list = [] - for vrouter in self.inputs.compute_names: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_vrouter(vrouter) - assert self.verify_process_status(ops_inspect,\ - 'contrail-vrouter-agent') - for ip in self.inputs.bgp_control_ips: - server = "%s:%s"%(ip,port_dict['xmpp']) - assert self.verify_connection_infos(ops_inspect,\ - 'contrail-vrouter-agent',\ - [server],node = vrouter) - for ip in self.inputs.bgp_control_ips: - server = "%s:%s"%(ip,port_dict['dns']) - assert self.verify_connection_infos(ops_inspect,\ - 'contrail-vrouter-agent',\ - [server],node = vrouter) - result = False - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-vrouter-agent',\ - [server],node = vrouter) - assert result - - - def verify_process_and_connection_infos_config(self): - - port_dict = {'zookeeper':'2181', - 'rmq' :'5672', - 'collector':'8086', - 'disco':'5998', - 'cassandra':'9160', - 'api':'8082', - 'ifmap':'8443' - } - module_connection_dict = {'DeviceManager':['zookeeper',\ - 'rmq',\ - 'collector',\ - 'disco',\ - 'cassandra',\ - 'api'],\ - - 'contrail-schema':['zookeeper',\ - 'collector',\ - 'disco',\ - 'cassandra',\ - 'api'],\ - 'contrail-svc-monitor':['zookeeper',\ - 'collector',\ - 'disco',\ - 'cassandra',\ - 'api'],\ - 'contrail-api':['zookeeper',\ - 'collector',\ - 'disco',\ - 'cassandra',\ - 'api',\ - 'ifmap',\ - 'rmq'\ - ] - } - result1 = False - for cfgm in self.inputs.cfgm_names: - result1 = False - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_config(cfgm) - for k,v in module_connection_dict.items(): - result1 = result1 or self.verify_process_status(ops_inspect,\ - k) - assert result1 - for cfgm in self.inputs.cfgm_names: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_config(cfgm) - - result = False - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-api',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['zookeeper']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-api',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-api',\ - server,node = cfgm) - assert result - # result = False - # for ip in self.inputs.cfgm_ips: - # server = "%s:%s"%(ip,port_dict['api']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-api',\ - # [server]) - # assert result - result = False - for ip in self.inputs.database_control_ips: - server = "%s:%s"%(ip,port_dict['cassandra']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-api',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['rmq']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-api',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['ifmap']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-api',\ - server,node = cfgm) - assert result - - for cfgm in self.inputs.cfgm_names: - result1 = False - try: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_config(cfgm) - result1 = result1 or self.verify_process_status(ops_inspect,\ - 'DeviceManager') - if not result1: - raise Exception("No DeviceManager found for node %s"%(cfgm)) - except Exception as e: - continue - - result = False - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'DeviceManager',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['zookeeper']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'DeviceManager',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'DeviceManager',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['api']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'DeviceManager',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.database_control_ips: - server = "%s:%s"%(ip,port_dict['cassandra']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'DeviceManager',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['rmq']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'DeviceManager',\ - server,node = cfgm) - assert result - - for cfgm in self.inputs.cfgm_names: - result1 = False - try: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_config(cfgm) - result1 = result1 or self.verify_process_status(ops_inspect,\ - 'contrail-schema') - if not result1: - raise Exception("No contrail-schema found for node %s"%(cfgm)) - except Exception as e: - continue - - result = False - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-schema',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['zookeeper']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-schema',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-schema',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['api']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-schema',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.database_control_ips: - server = "%s:%s"%(ip,port_dict['cassandra']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-schema',\ - server,node = cfgm) - assert result - - for cfgm in self.inputs.cfgm_names: - result1 = False - try: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_config(cfgm) - result1 = result1 or self.verify_process_status(ops_inspect,\ - 'contrail-svc-monitor') - if not result1: - raise Exception("No contrail-svc-monitor found for node %s"%(cfgm)) - except Exception as e: - continue - - result = False - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-svc-monitor',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['zookeeper']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-svc-monitor',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-svc-monitor',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['api']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-svc-monitor',\ - server,node = cfgm) - assert result - result = False - for ip in self.inputs.database_control_ips: - server = "%s:%s"%(ip,port_dict['cassandra']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-svc-monitor',\ - server,node = cfgm) - assert result - - def verify_process_and_connection_infos_control_node(self): - - port_dict = {'ifmap':'8443', - 'collector':'8086', - 'disco':'5998' - } - server_list = [] - for bgp in self.inputs.bgp_names: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_bgprouter(bgp) - assert self.verify_process_status(ops_inspect,\ - 'contrail-control') - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['ifmap']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-control',\ - [server],node = bgp) - assert result - result = False - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-control',\ - [server],node = bgp) - assert result - result = False - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-control',\ - [server],node = bgp) - assert result - - def verify_process_and_connection_infos_analytics_node(self): - - port_dict = { - 'collector':'8086', - 'disco':'5998', - 'cassandra':'9160', - } - module_connection_dict = {'contrail-collector':[ - 'collector',\ - 'disco',\ - 'cassandra'\ - ],\ - - 'contrail-analytics-api':[\ - 'collector',\ - 'disco',\ - ],\ - 'contrail-query-engine':[\ - 'collector',\ - 'cassandra',\ - ]\ - - } - for collector in self.inputs.collector_names: - result1 = True - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_collector(collector) - for k,v in module_connection_dict.items(): - result1 = result1 and self.verify_process_status(ops_inspect,\ - k) - assert result1 - for collector in self.inputs.collector_names: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_collector(collector) - - result = False - try: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-collector',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-collector',\ - [server],node = collector) - assert result - - result = False - try: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['redis']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-collector',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-collector',\ - [server],node = collector) - assert result - result = False - try: - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-collector',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-collector',\ - [server],node = collector) - assert result - - result = False - try: - for ip in self.inputs.database_control_ips: - server = "%s:%s"%(ip,port_dict['cassandra']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-collector',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.database_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['cassandra']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-collector',\ - [server],node = collector) - assert result - - - for collector in self.inputs.collector_names: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_collector(collector) - - result = False - try: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-analytics-api',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-analytics-api',\ - [server],node = collector) - assert result - - result = False - #To do : Verify Redis connection status once https://bugs.launchpad.net/juniperopenstack/+bug/1459973 - #fixed - # try: - # for ip in self.inputs.collector_control_ips: - # server = "%s:%s"%(ip,port_dict['redis']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-analytics-api',\ - # [server],node = collector) - # assert result - # except Exception as e: - # for ip in self.inputs.collector_control_ips: - # server = "%s:%s"%('127.0.0.1',port_dict['redis']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-analytics-api',\ - # [server],node = collector) - # assert result - - result = False - try: - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-analytics-api',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-analytics-api',\ - [server],node = collector) - assert result - - for collector in self.inputs.collector_names: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_collector(collector) - - result = False - try: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-analytics-api',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-analytics-api',\ - [server],node = collector) - assert result - - result = False - #To do : Verify Redis connection status once https://bugs.launchpad.net/juniperopenstack/+bug/1459973 - #fixed - #try: - # for ip in self.inputs.collector_control_ips: - # server = "%s:%s"%(ip,port_dict['redis']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-analytics-api',\ - # [server],node = collector) - # assert result - #except Exception as e: - # for ip in self.inputs.collector_control_ips: - # server = "%s:%s"%('127.0.0.1',port_dict['redis']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-analytics-api',\ - # [server],node = collector) - # assert result - - result = False - try: - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%(ip,port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-analytics-api',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.cfgm_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['disco']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-analytics-api',\ - [server],node = collector) - assert result - - for collector in self.inputs.collector_names: - ops_inspect = self.ops_inspect[self.inputs.\ - collector_ips[0]].get_ops_collector(collector) - - result = False - try: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%(ip,port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-query-engine',\ - [server],node = collector) - assert result - except Exception as e: - for ip in self.inputs.collector_control_ips: - server = "%s:%s"%('127.0.0.1',port_dict['collector']) - result = result or self.verify_connection_infos(ops_inspect,\ - 'contrail-query-engine',\ - [server],node = collector) - assert result - result = False - #To do : Verify Redis connection status once https://bugs.launchpad.net/juniperopenstack/+bug/1459973 - #fixed - #try: - # for ip in self.inputs.collector_control_ips: - # server = "%s:%s"%(ip,port_dict['redis']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-query-engine',\ - # [server],node = collector) - # assert result - #except Exception as e: - # for ip in self.inputs.collector_control_ips: - # server = "%s:%s"%('127.0.0.1',port_dict['redis']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-query-engine',\ - # [server],node = collector) - # assert result - # - #result = False - #try: - # for ip in self.inputs.cfgm_control_ips: - # server = "%s:%s"%(ip,port_dict['redis']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-query-engine',\ - # [server],node = collector) - # assert result - #except Exception as e: - # for ip in self.inputs.cfgm_control_ips: - # server = "%s:%s"%('127.0.0.1',port_dict['redis']) - # result = result or self.verify_connection_infos(ops_inspect,\ - # 'contrail-query-engine',\ - # [server],node = collector) - # assert result - -#Database relaed functions - def db_purge(self,purge_input): - resp = None - try: - resp = self.ops_inspect[self.inputs.collector_ips[0]].post_db_purge(purge_input) - except Exception as e: - self.logger.error("Got exception as : %s"%(e)) - finally: - return resp - - def get_purge_id(self,purge_input): - try: - resp = self.db_purge(purge_input) - return resp[0]['purge_id'] - except Exception as e: - return None - - def get_purge_satus(self,resp): - try: - resp = self.db_purge(purge_input) - return resp[0]['status'] - except Exception as e: - return None - - @retry(delay=3, tries=20) - def verify_database_process_running(self,process): - self.logger.info('Verifying if db node_mgr running...') - result = True - try: - for collector in self.inputs.collector_ips: - for db in self.inputs.database_names: - self.logger.info("Verifying through collector %s for db node %s"%(collector,db)) - dct = self.ops_inspect[collector].get_ops_db(db) - uve = dct.get_attr('Node','process_info',\ - match = ('process_name', process)) - if (uve[0]['process_state'] == "PROCESS_STATE_RUNNING"): - result = result and True - else: - result = result and False - except Exception as e: - result = result and False - finally: - return result - - @retry(delay=3, tries=20) - def verify_database_process_running_status(self,process): - self.logger.info('Verifying if db node_mgr is functional...') - result = True - try: - for collector in self.inputs.collector_ips: - for db in self.inputs.database_names: - self.logger.info("Verifying through collector %s for db node %s"%(collector,db)) - dct = self.ops_inspect[collector].get_ops_db(db) - uve = dct.get_attr('Node','process_status',\ - match = ('module_id', process)) - if (uve[0]['state'] == "Functional"): - result = result and True - else: - result = result and False - except Exception as e: - result = result and False - finally: - return result - - @retry_for_value(delay=3, tries=20) - def get_purge_info_in_database_uve(self,collector,db): - dct = self.ops_inspect[collector].get_ops_db(db) - try: - uve = dct.get_attr('DatabasePurge','stats') - return uve - except Exception as e: - return None - - def get_matched_purge_info(self,collector,db,purge_id): - try: - dct = self.get_purge_info_in_database_uve(collector,db) - for elem in dct: - if (elem['purge_id'] == purge_id): - return elem - return None - except Exception as e: - return None - - @retry(delay=5, tries=10) - def verify_purge_info_in_database_uve(self,purge_id): - for collector in self.inputs.collector_ips: - for db in self.inputs.database_names: - dct = self.get_matched_purge_info(collector,db,purge_id) - try: - if (dct['purge_status'] == 'success'): - return True - else: - return False - except Exception as e: - return False - -# @classmethod - def setUp(self): - super(AnalyticsVerification, self).setUp() - pass - # end setUpClass - - def cleanUp(self): - super(AnalyticsVerification, self).cleanUp() - # end cleanUp - -if __name__ == '__main__': - - print 'Need to add' - - # end runTest6 diff --git a/tcutils/collector/contrail_traces.py b/tcutils/collector/contrail_traces.py deleted file mode 100644 index 6203f9bc3..000000000 --- a/tcutils/collector/contrail_traces.py +++ /dev/null @@ -1,340 +0,0 @@ -# -# Traces Utils -# -# Utility functions for Operational State Server for VNC -# -# Created by Sandip Dey on 24/09/2013 -# -# Copyright (c) 2013, Contrail Systems, Inc. All rights reserved. -# - -import datetime -import time -import requests -import pkg_resources -import xmltodict -import json -import gevent -from lxml import etree -import socket -import sys -import argparse -import ConfigParser -import os - -try: - from pysandesh.gen_py.sandesh.ttypes import SandeshType -except: - class SandeshType(object): - SYSTEM = 1 - TRACE = 4 - - -def enum(**enums): - return type('Enum', (), enums) -# end enum - - -class TraceUtils(object): - - TIME_FORMAT_STR = '%Y %b %d %H:%M:%S.%f' - DEFAULT_TIME_DELTA = 10 * 60 * 1000000 # 10 minutes in microseconds - USECS_IN_SEC = 1000 * 1000 - OBJECT_ID = 'ObjectId' - -# POST_HEADERS = {'Content-type': 'application/json; charset="UTF-8"', 'Expect':'202-accepted'} - POST_HEADERS = {'Content-type': 'application/json'} - - def __init__(self, args_str=None): - self._args = None - if not args_str: - args_str = ' '.join(sys.argv[1:]) - self._parse_args(args_str) - TraceUtils.get_trace_buffer( - self._args.server_ip, self._args.server_port, self._args.buffer_name, - self._args.filename, self._args.opserver_ip, self._args.node_name, self._args.module) - - def _parse_args(self, args_str): - ''' - Eg. python trace_util.py - --server_ip 127.0.0.1 - --server_port 8083 - --buffer_name None/'DiscoveryClient' - --filename - --opserver_ip - ''' - - # Source any specified config/ini file - # Turn off help, so we print all options in response to -h - conf_parser = argparse.ArgumentParser(add_help=False) - - conf_parser.add_argument("-c", "--conf_file", - help="Specify config file", metavar="FILE") - args, remaining_argv = conf_parser.parse_known_args(args_str.split()) - - defaults = { - 'server_ip': '127.0.0.1', - 'server_port': '', - 'buffer_name': '', - 'filename': '', - 'opserver_ip': '', - 'node_name': '', - 'module': '', - } - - ksopts = { - 'admin_user': 'user1', - 'admin_password': 'password1', - 'admin_tenant_name': 'admin' - } - - if args.conf_file: - config = ConfigParser.SafeConfigParser() - config.read([args.conf_file]) - defaults.update(dict(config.items("DEFAULTS"))) - if 'KEYSTONE' in config.sections(): - ksopts.update(dict(config.items("KEYSTONE"))) - - # Override with CLI options - # Don't surpress add_help here so it will handle -h - parser = argparse.ArgumentParser( - # Inherit options from config_parser - parents=[conf_parser], - # print script description with -h/--help - description=__doc__, - # Don't mess with format of description - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - defaults.update(ksopts) - parser.set_defaults(**defaults) - - parser.add_argument( - "--server_ip", help="IP address of server for which traces needs to collected") - parser.add_argument("--server_port", - help="Port of server depending on the role") - parser.add_argument( - "--buffer_name", help="buffer name, if not given all the buffers to files") - parser.add_argument( - "--filename", help="file name to save buffers;location /var/log/contrail/traces") - parser.add_argument( - "--opserver_ip", help="opserver ip in case want to save the traces to database") - parser.add_argument( - "--node_name", help="Node name in case want to save the traces to database") - parser.add_argument( - "--module", help="module in case want to save the traces to database") - parser.add_argument( - "--admin_user", help="Name of keystone admin user") - parser.add_argument( - "--admin_password", help="Password of keystone admin user") - - self._args = parser.parse_args(remaining_argv) - - # end _parse_args - - @staticmethod - def get_url_http(url): - data = None - try: - if int(pkg_resources.get_distribution("requests").version[0]) == 1: - data = requests.get(url, stream=True) - else: - data = requests.get(url, prefetch=False) - except requests.exceptions.ConnectionError, e: - print "Connection to %s failed" % url - if data.status_code == 200: - try: - return etree.fromstring(data.text) - except Exception as e: - return json.loads(data.text) - else: - print "HTTP error code: %d" % response.status_code - return None - - # end get_url_http - - @staticmethod - def get_trace_buffer_names(ip, port): - url = 'http://%s:%s/Snh_SandeshTraceBufferListRequest?' % (ip, port) - resp = TraceUtils.get_url_http(url) - trace_buf_names = [] - xpath = '/SandeshTraceBufferListResponse' - - try: - tr = EtreeToDict(xpath).get_all_entry(resp) - records = tr['trace_buffer_list'] - for rec in records: - trace_buf_names.append(rec['trace_buf_name']) - except Exception as e: - self.logger.warn( - "Got exception in get_trace_buffer_list as : %s" % (e)) - finally: - return trace_buf_names - # end tace_buffer_names - - @staticmethod - def get_trace_buffer(ip, port, buffer_name=None, file_name=None, op_ip=None, nodename=None, module=None): - '''Get traces buffers from intropsect - ''' - buf_list = [] - txt = [] - host = socket.gethostbyaddr(ip)[0] - - if not (op_ip and nodename and module): - print 'If traces to be sent to the database all 3 arguments - opserver_ip,node_name and module must be provided' - return - - try: - os.makedirs('/var/log/contrail/traces') - except OSError: - pass - - if buffer_name: - buf_list.append(buffer_name) - else: - buf_list = TraceUtils.get_trace_buffer_names(ip, port) - - for elem in buf_list: - url = 'http://%s:%s/Snh_SandeshTraceRequest?x=%s' % (ip, - port, elem) - try: - resp = TraceUtils.get_url_http(url) - xpath = '/SandeshTraceTextResponse' - text = EtreeToDict(xpath).get_all_entry(resp) - if not file_name: - filename = '/var/log/contrail/traces/%s_%s_traces.log' % (host, elem) - else: - filename = '/var/log/contrail/traces/%s' % file_name - for el in text['traces']: - with open(filename, "a+") as f: - f.write(el + '\n') - print "Saved %s traces to %s" % (elem, filename) - except Exception as e: - print "While saving %s trace ,got exception from get_trace_buffer as %s" % (elem, e) - if op_ip: - try: - url1 = 'http://%s:8081/analytics/send-tracebuffer/%s/%s/%s' % (op_ip, - nodename, module, elem) - resp1 = TraceUtils.get_url_http(url1) - if (resp1['status'] == 'pass'): - print 'Traces saved to database' - else: - print 'Traces could not be saved to database' - - except Exception as e: - print 'Traces could not be saved to database' - print 'Got exception as %s' % e - - @staticmethod - def messages_xml_data_to_dict(messages_dict, msg_type): - if msg_type in messages_dict: - # convert xml value to dict - try: - messages_dict[msg_type] = xmltodict.parse( - messages_dict[msg_type]) - except: - pass - # end messages_xml_data_to_dict - - @staticmethod - def messages_data_dict_to_str(messages_dict, message_type, sandesh_type): - data_dict = messages_dict[message_type] - return DiscoveryServerUtils._data_dict_to_str(data_dict, sandesh_type) - # end messages_data_dict_to_str - - -class EtreeToDict(object): - - """Converts the xml etree to dictionary/list of dictionary.""" - - def __init__(self, xpath): - self.xpath = xpath - self.xml_list = ['policy-rule'] - - def _handle_list(self, elems): - """Handles the list object in etree.""" - a_list = [] - for elem in elems.getchildren(): - rval = self._get_one(elem, a_list) - if 'element' in rval.keys(): - a_list.append(rval['element']) - elif 'list' in rval.keys(): - a_list.append(rval['list']) - else: - a_list.append(rval) - - if not a_list: - return None - return a_list - - def _get_one(self, xp, a_list=None): - """Recrusively looks for the entry in etree and converts to dictionary. - - Returns a dictionary. - """ - val = {} - - child = xp.getchildren() - if not child: - val.update({xp.tag: xp.text}) - return val - - for elem in child: - if elem.tag == 'list': - val.update({xp.tag: self._handle_list(elem)}) - - if elem.tag == 'data': - # Remove CDATA; if present - text = elem.text.replace("") - nxml = etree.fromstring(text) - rval = self._get_one(nxml, a_list) - else: - rval = self._get_one(elem, a_list) - - if elem.tag in self.xml_list: - val.update({xp.tag: self._handle_list(xp)}) - if elem.tag in rval.keys(): - val.update({elem.tag: rval[elem.tag]}) - elif 'SandeshData' in elem.tag: - val.update({xp.tag: rval}) - else: - val.update({elem.tag: rval}) - return val - - def find_entry(self, path, match): - """Looks for a particular entry in the etree. - - Returns the element looked for/None. - """ - xp = path.xpath(self.xpath) - f = filter(lambda x: x.text == match, xp) - if len(f): - return f[0].text - return None - - def get_all_entry(self, path): - """All entries in the etree is converted to the dictionary - - Returns the list of dictionary/didctionary. - """ - xps = path.xpath(self.xpath) - if not xps: - # sometime ./xpath dosen't work; work around - # should debug to find the root cause. - xps = path.xpath(self.xpath.strip('.')) - if type(xps) is not list: - return self._get_one(xps) - - val = [] - for xp in xps: - val.append(self._get_one(xp)) - if len(val) == 1: - return val[0] - return val - - -def main(args_str=None): - TraceUtils(args_str) -# end main - -if __name__ == "__main__": - main() diff --git a/tcutils/collector/opserver_introspect_utils.py b/tcutils/collector/opserver_introspect_utils.py deleted file mode 100755 index feba85fd4..000000000 --- a/tcutils/collector/opserver_introspect_utils.py +++ /dev/null @@ -1,355 +0,0 @@ -import sys -vizdtestdir = sys.path[0] -sys.path.insert(1, vizdtestdir + '/../../') - -import urllib2 -import xmltodict -import json -import requests -import socket -from lxml import etree -from tcutils.verification_util import * -from opserver_results import * -from opserver_util import OpServerUtils -from tcutils.util import * - - -class VerificationOpsSrv (VerificationUtilBase): - - def __init__(self, ip, port=8081, logger=LOG): - super(VerificationOpsSrv, self).__init__(ip, port, logger=logger) - - - def get_ops_generator(self, generator=None, - moduleid=None, node_type=None, - instanceid='0'): - '''http://nodea29:8081/analytics/uves/generator\ - /nodea18:Control:Contrail-Control:0?flat''' - if (generator == None): - generator = socket.gethostname() - if (moduleid == None): - self.logger.info("module id not passed") - return None - if instanceid == None: - instanceid = 0 - if node_type == None: - self.logger.info("node type is not passed") - return None - res = None - try: - generator_dict = self.dict_get( - 'analytics/uves/generator/' + generator + \ - ':' + node_type + ':' + moduleid + ':' \ - + instanceid + '?flat') - res = OpGeneratorResult(generator_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_vrouter(self, vrouter=None): - if (vrouter == None): - vrouter = socket.gethostname() - res = None - try: - vrouter_dict = self.dict_get( - 'analytics/uves/vrouter/' + vrouter + '?flat') - res = OpVRouterResult(vrouter_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_bgprouter(self, bgprouter=None): - if (bgprouter == None): - bgprouter = socket.gethostname() - res = None - try: - bgprouter_dict = self.dict_get( - 'analytics/uves/control-node/' + bgprouter + '?flat') - res = OpBGPRouterResult(bgprouter_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_vn(self, vn_fq_name='default-virtual-network'): - res = None - try: - vn_dict = self.dict_get( - 'analytics/uves/virtual-network/' + vn_fq_name + '?flat') - res = OpVNResult(vn_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_vm(self, vm='default-virtual-machine'): - res = None - try: - vm_dict = self.dict_get( - 'analytics/uves/virtual-machine/' + vm + '?flat') - res = OpVMResult(vm_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_svc_instance(self, project='admin', - svc_instance=None): - '''analytics/uves/service-instance/default-domain:\ - admin:svc-instance1?flat''' - res = None - try: - si_dict = self.dict_get( - 'analytics/uves/service-instance/' + svc_instance + '?flat') - res = OpSIResult(si_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_svc_template(self, - left_vn=None, - right_vn=None): - '''analytics/uves/service-chain/\ - sc:default-domain:admin:vn1:\ - default-domain:admin:fip_vn?flat''' - res = None - try: - st_dict = self.dict_get( - 'analytics/uves/service-chain/sc:' + left_vn + \ - ':' + right_vn + '?flat') - res = OpSTResult(st_dict) - except Exception as e: - print e - finally: - return res - - def get_hrefs_to_all_UVEs_of_a_given_UVE_type(self, uveType=None): - '''Get all hrefs for a uve type''' - if uveType: - dct = self.dict_get('analytics/uves/' + uveType) - else: - dct = self.dict_get('analytics/uves') - - ret_value = [] - for elem in dct: - self.ame = OpHrefResult(elem) - ret_value.append(self.ame) - return ret_value - - def get_hrefs_to_all_tables(self, uveType=None): - '''Get all hrefs for a uve type''' - dct = self.dict_get('analytics/uves/' + uveType) - ret_value = [] - for elem in dct: - self.tme = OpHrefTableResult(elem) - ret_value.append(self.tme) - return ret_value - - def send_trace_to_database(self, node=None, - module=None, instance_id='0', - trace_buffer_name=None): - '''http://:8081/analytics/\ - send-tracebuffer/nodeb8/Contrail-Vrouter-Agent/UveTrace''' - res = None - try: - res = self.dict_get('analytics/send-tracebuffer/' + node + - '/' + module + '/' + - instance_id + '/' + trace_buffer_name) - except Exception as e: - print e - finally: - return res - - def get_ops_bgp_peer(self, peer_toupe=None): - '''http://nodea18:8081/analytics/uves/bgp-peer/\ - default-domain:default-project:ip-fabric:__default\ - __:nodea19:default-domain:default-project:ip-fabric:\ - __default__:nodea18?flat''' - res = None - - try: - bgp_node = peer_toupe[0] - peer = peer_toupe[1] - link = 'analytics/uves/bgp-peer\ - /default-domain:default-project:\ - ip-fabric:__default__:' +\ - bgp_node + ':' + 'default-domain:\ - default-project:ip-fabric:__default__:' \ - + peer + '?flat' - - dct = self.dict_get("".join(link.split())) - res = OpBGPPeerResult(dct) - except Exception as e: - print e - finally: - return res - - def get_ops_bgp_xmpp_peer(self, peer_toupe=None): - '''http://nodea29.englab.juniper.net:8081\ - /analytics/uves/xmpp-peer/nodea29:10.204.216.15?flat''' - res = None - - try: - bgp_node = peer_toupe[0] - peer = peer_toupe[1] - dct = self.dict_get( - 'analytics/uves/xmpp-peer/' + bgp_node + ':' + peer + '?flat') - res = OpBGPXmppPeerResult(dct) - except Exception as e: - print e - finally: - return res - - def get_ops_collector(self, collector=None): - '''http://nodea18:8081/analytics/uves/analytics-node/nodea29?flat''' - res = None - try: - c_dict = self.dict_get( - 'analytics/uves/analytics-node/' + collector + '?flat') - res = OpCollectorResult(c_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_alarms(self): - '''http://nodea18:8081/analytics/alarms''' - res = None - try: - c_dict = self.dict_get( - 'analytics/alarms') - res = OpCollectorResult(c_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_config(self, config=None): - '''http://nodea18:8081/analytics/uves/config-node/nodea11?flat''' - res = None - try: - c_dict = self.dict_get( - 'analytics/uves/config-node/' + config + '?flat') - res = OpConfigResult(c_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_db(self, db=None): - '''http://10.204.216.7:8081/analytics/uves/database/nodea11?flat''' - res = None - try: - c_dict = self.dict_get( - 'analytics/uves/database-node/' + db + '?flat') - res = OpDbResult(c_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_vm_intf(self, intf): - '''http://nodea24:8081/analytics/uves/ - virtual-machine-interface/ - default-domain:admin:0e207bb1-5811-4595-a8b6-18e890838f60?flat''' - res = None - try: - c_dict = self.dict_get( - 'analytics/uves/virtual-machine-interface/' + intf + '?flat') - res = OpVmIntfResult(c_dict) - except Exception as e: - print e - finally: - return res - - def get_ops_sc_uve(self): - '''http://nodea18:8081/analytics/uves/service-chain/*''' - res = None - try: - c_dict = self.dict_get( - 'analytics/uves/service-chain/*') - res = OpServiceChainResult(c_dict) - except Exception as e: - print e - finally: - return res - -# @timeout(600, os.strerror(errno.ETIMEDOUT)) - def post_query(self, table, start_time=None, end_time=None, - select_fields=None, - where_clause='', - sort_fields=None, sort=None, limit=None, filter=None, dir=None): - res = None - try: - flows_url = OpServerUtils.opserver_query_url( - self._ip, str(self._port)) - print flows_url - query_dict = OpServerUtils.get_query_dict( - table, start_time, end_time, - select_fields, - where_clause, - sort_fields, sort, limit, filter, dir) - - print json.dumps(query_dict) - res = [] - resp = OpServerUtils.post_url_http( - flows_url, json.dumps(query_dict)) - if resp is not None: - resp = json.loads(resp) - qid = resp['href'].rsplit('/', 1)[1] - result = OpServerUtils.get_query_result( - self._ip, str(self._port), qid) - for item in result: - res.append(item) - except Exception as e: - print str(e) - finally: - return res - - def post_db_purge(self,purge_input): - - res = [] - json_body = OpServerUtils.get_json_body(purge_input = purge_input) - print json.dumps(json_body) - try: - purge_url = OpServerUtils.opserver_db_purge_url( - self._ip, str(self._port)) - print purge_url - resp = OpServerUtils.post_url_http( - purge_url, json.dumps(json_body)) - if resp is not None: - resp = json.loads(resp) - res.append(resp) - except Exception as e: - print str(e) - finally: - return res - -class VerificationOpsSrvIntrospect (VerificationUtilBase): - - def __init__(self, ip, port, logger=LOG): - super(VerificationOpsSrvIntrospect, self).__init__(ip, port,drv=XmlDrv, logger=logger) - - def get_collector_connectivity(self): - connaction_status = dict() - try: - c_dict = self.dict_get( - 'Snh_CollectorInfoRequest?') - ip = c_dict.xpath('ip')[0].text - port = c_dict.xpath('port')[0].text - status = c_dict.xpath('status')[0].text - connaction_status['ip']= ip - connaction_status['port']= port - connaction_status['status']= status - except Exception as e: - print e - finally: - return connaction_status - -if __name__ == '__main__': - vns = VerificationOpsSrvIntrospect('127.0.0.1',8090) - intr = vns.get_collector_connectivity() - diff --git a/tcutils/collector/opserver_results.py b/tcutils/collector/opserver_results.py deleted file mode 100644 index 3648faa80..000000000 --- a/tcutils/collector/opserver_results.py +++ /dev/null @@ -1,535 +0,0 @@ -import re -from tcutils.verification_util import * - - -def _OpResult_get_list_name(lst): - sname = "" - for sattr in lst.keys(): - if sattr[0] not in ['@']: - sname = sattr - return sname - - -def _OpResultFlatten(inp): - #import pdb; pdb.set_trace() - sname = "" - if (inp['@type'] == 'struct'): - sname = _OpResult_get_list_name(inp) - if (sname == ""): - return Exception('Struct Parse Error') - ret = {} - ret[sname] = {} - for k, v in inp[sname].items(): - ret[sname][k] = _OpResultFlatten(v) - return ret - elif (inp['@type'] == 'list'): - sname = _OpResult_get_list_name(inp['list']) - ret = {} - if (sname == ""): - return ret - items = inp['list'][sname] - if not isinstance(items, list): - items = [items] - lst = [] - for elem in items: - if not isinstance(elem, dict): - lst.append(elem) - else: - lst_elem = {} - for k, v in elem.items(): - lst_elem[k] = _OpResultFlatten(v) - lst.append(lst_elem) - ret[sname] = lst - return ret - else: - return inp['#text'] - - -def _OpResultListParse(dct, match): - ret = [] - sname = _OpResult_get_list_name(dct) - if (sname == ""): - return ret - - #import pdb; pdb.set_trace() - if not isinstance(dct[sname], list): - lst = [dct[sname]] - else: - lst = dct[sname] - - for elem in lst: - if (match == None): - isMatch = True - else: - isMatch = False - - if sname == 'element': - if elem == match: - isMatch = True - if isMatch: - ret.append(elem) - else: - dret = {} - isMatcher = True - for k, v in elem.items(): - if v.has_key('#text'): - dret[k] = v["#text"] - if v.has_key('@aggtype'): - if v['@aggtype'] == 'listkey': - if v['#text'] == match: - isMatch = True - if isinstance(match, list): - #import pdb; pdb.set_trace() - for matcher in match: - if not isinstance(matcher, tuple): - raise Exception('Incorrect matcher') - mk, mv = matcher - if (k == mk): - if (v['#text'] != mv): - isMatcher = False - else: - dret[k] = _OpResultFlatten(v) - - if isinstance(match, list): - if isMatcher: - ret.append(dret) - else: - if isMatch: - ret.append(dret) - return ret - -# def _OpResultGet(dct, p1, p2, match = None): -# ret = None -# try: -# res = dct.xpath(p1,p2) -# -# import pdb; pdb.set_trace() -# if isinstance(res, list): -# if len(res) != 1: -# raise Exception('Inconsistency') -# res = res[0][0] -# -# if res['@type'] in ["list"]: -# ret = _OpResultListParse(res['list'], match) -# elif res['@type'] in ["struct"]: -# sname = _OpResult_get_list_name(res) -# ret = _OpResultFlatten(res) -# ret = res[sname] -# else: -# if (match != None): -# raise Exception('Match is invalid for non-list') -# ret = res['#text'] -# except Exception as e: -# print e -# finally: -# return ret - - -def _OpResultGet(dct, p1, p2, match=None): - ret = None - try: - if p2: - res = dct.xpath(p1, p2) - else: - res = dct.xpath(p1) -# if isinstance(res, list): -# if len(res) != 1: -# raise Exception('Inconsistency') -# ret1 = res[0] -# else: - ret1 = res - if match: - ret2 = [] - if isinstance(ret1, list): - for elem in ret1: - if isinstance(elem, dict): - for k, v in elem.items(): - if isinstance(match, tuple): - if ((match[0] == k)and (match[1] == v)): - ret2.append(elem) - break - elif (isinstance(v, dict)): - if (match[0] in v.keys() and (match[1] in v.values()or (int(match[1]) in v.values()))): - ret2.append(elem) - break - elif (isinstance(v,list)): - for vl in v: - if ((match[0] in vl.keys()) and (match[1] in vl.values())): - ret2.append(vl) - break - else: - if(match in v): - ret2.append(elem) - break - elif (isinstance(v, dict)): - if(match in v.values()or int(match) in v.values()): - ret2.append(elem) - break - else: - if (match == elem): - ret2.append(elem) - else: - for k, v in ret1.items(): - if isinstance(match, tuple): - if (match[0] == k and match[1] == v): - ret2.append(ret1) - else: - if(match == v): - ret2.append(ret1) - ret = ret2 - else: - ret = ret1 - - except Exception as e: - print e - finally: - return ret - -# class OpVNResult (Result): -# ''' -# This class returns a VN UVE object -# ''' -# def get_attr(self, tier, attr, match = None): -# import pdb; pdb.set_trace () -# if tier == "Config": -# typ = 'UveVirtualNetworkConfig' -# elif tier == "Agent": -# typ = 'UveVirtualNetworkAgent' -# else: -# raise Exception("Invalid Arguments - bad tier") -# -# return _OpResultGet(self, typ, attr, match) -# -# class OpVMResult (Result): -# ''' -# This class returns a VM UVE object -# ''' -# def get_attr(self, tier, attr, match = None): -# import pdb; pdb.set_trace () -# if tier == "Config": -# typ = 'UveVirtualMachineConfig' -# elif tier == "Agent": -# typ = 'UveVirtualMachineAgent' -# else: -# raise Exception("Invalid Arguments - bad tier") -# -# return _OpResultGet(self, typ, attr, match) -# -# class OpVRouterResult (Result): -# ''' -# This class returns a VROUTER UVE object -# ''' -# def get_attr(self, tier, attr, match = None): -# if tier == "Stats": -# typ = 'VrouterStatsAgent' -# elif tier == "Agent": -# typ = 'VrouterAgent' -# else: -# raise Exception("Invalid Arguments - bad tier") -# return _OpResultGet(self, typ, attr, match) -# -# class OpBGPRouterResult (Result): -# ''' -# This class returns a BGP-ROUTER UVE object -# ''' -# def get_attr(self, tier, attr, match = None): -# if tier == "Control": -# typ = 'BgpRouterState' -# elif tier == "Agent": -# typ = 'VrouterAgent' -# else: -# raise Exception("Invalid Arguments - bad tier") -# return _OpResultGet(self, typ, attr, match) -# -# -# class OpCollectorResult (Result): -# ''' -# This class returns a CollectorInfo object -# ''' -# def get_attr(self, tier, attr, match = None): -# if tier == "Analytics": -# typ = 'CollectorState' -# else: -# raise Exception("Invalid Arguments - bad tier") -# -# return _OpResultGet(self, typ, attr, match) -# -# -# def get_all_generator_nodes(self, tier, attr, match = None): -# self.src_list=[] -# self.dct_list=self.get_attr(tier,attr) -# for item in self.dct_list: -# source=item['source'] -# if source not in self.src_list: -# self.src_list.append(source) -# return self.src_list -# -# def get_all_moduleid_for_a_generator(self, tier, attr, match = None,generator=None): -# self.module_id_list=[] -# self.dct_list=self.get_attr(tier,attr,[('source',generator)]) -# for item in self.dct_list: -# module_id=item['module_id'] -# self.module_id_list.append(module_id) -# return self.module_id_list - - -class OpGeneratorResult (Result): - - ''' - This class returns a generator flat results - ''' - - def get_attr(self, tier, attr, match=None): - if tier == "Client": - typ = 'ModuleClientState' - elif tier == "Server": - typ = 'ModuleServerState' - else: - raise Exception("Invalid Arguments - bad tier") - return _OpResultGet(self, typ, attr, match) - - -class OpVRouterResult (Result): - - ''' - This class returns a vrouter flat results - ''' - - def get_attr(self, tier, attr, match=None): - if tier == "Stats": - typ = 'VrouterStatsAgent' - elif tier == "Agent": - typ = 'VrouterAgent' - elif tier == "Node": - typ = 'NodeStatus' - else: - raise Exception("Invalid Arguments - bad tier") - return _OpResultGet(self, typ, attr, match) - - -class OpBGPRouterResult (Result): - - ''' - This class returns a BGP-ROUTER UVE object - ''' - - def get_attr(self, tier, attr, match=None): - if tier == "Control": - typ = 'BgpRouterState' - elif tier == "Node": - typ = 'NodeStatus' - else: - raise Exception("Invalid Arguments - bad tier") - return _OpResultGet(self, typ, attr, match) - - -class OpVNResult (Result): - - ''' - This class returns a VN UVE object - ''' - - def get_attr(self, tier, attr, match=None): - #import pdb; pdb.set_trace () - if tier == "Config": - typ = 'UveVirtualNetworkConfig' - elif tier == "Agent": - typ = 'UveVirtualNetworkAgent' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - - -class OpVMResult (Result): - - ''' - This class returns a VM UVE object - ''' - - def get_attr(self, tier, attr, match=None): - #import pdb; pdb.set_trace () - if tier == "Config": - typ = 'UveVirtualMachineConfig' - elif tier == "Agent": - typ = 'UveVirtualMachineAgent' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - - -class OpHrefResult(Result): - - '''Get all hrefs for a uve type - ''' - - def get_attr(self, tier, attr=None, match=None): - - if tier == "Href": - typ = 'href' - elif tier == "Name": - typ = 'name' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - -class OpHrefTableResult(Result): - - '''Get all hrefs for a uve type - ''' - - def get_attr(self, tier, attr=None, match=None): - - if tier == "Href": - typ = 'href' - elif tier == "Name": - typ = 'name' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - -class OpBGPPeerResult (Result): - - ''' - This class returns a bgp-peer UVE object - ''' - - def get_attr(self, tier, attr, match=None): - if tier == "Control": - typ = 'BgpPeerInfoData' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - - -class OpBGPXmppPeerResult (Result): - - ''' - This class returns a bgp_xmpp_peer UVE object - ''' - - def get_attr(self, tier, attr, match=None): - if tier == "Control": - typ = 'XmppPeerInfoData' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - - -class OpSIResult (Result): - - ''' - This class returns a service instance UVE object - ''' - - def get_attr(self, tier, attr=None, match=None): - if tier == "Config": - typ = 'UveSvcInstanceConfig' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - - -class OpSTResult (Result): - - ''' - This class returns a service template UVE object - ''' - - def get_attr(self, tier, attr=None, match=None): - if tier == "Config": - typ = 'UveServiceChainData' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - - -class OpCollectorResult (Result): - - ''' - This class returns a collector UVE object - ''' - - def get_attr(self, tier, attr, match=None): - #import pdb; pdb.set_trace () - if tier == "Node": - typ = 'NodeStatus' - elif tier == "Collector": - typ = 'CollectorState' - elif tier == "Module": - typ = 'ModuleCpuState' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - - -class OpConfigResult (Result): - - ''' - This class returns a config node UVE object - ''' - - def get_attr(self, tier, attr, match=None): - #import pdb; pdb.set_trace () - if tier == "Node": - typ = 'NodeStatus' -# elif tier == "Collector": -# typ = 'CollectorState' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - -class OpServiceChainResult (Result): - - ''' - This class returns a service chain node UVE object - ''' - - def get_attr(self, tier, attr=None, match=None): - if tier == "Config": - typ = 'value' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - -class OpDbResult(Result): - - ''' - This class returns a database node UVE object - ''' - - def get_attr(self, tier, attr=None, match=None): - if tier == "Node": - typ = 'NodeStatus' - elif tier == 'DatabasePurge': - typ = 'DatabasePurgeInfo' - elif tier == 'DatabaseUsage': - typ = 'DatabaseUsageInfo' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) - -class OpVmIntfResult(Result): - - ''' - This class returns a database node UVE object - ''' - - def get_attr(self, tier, attr=None, match=None): - if tier == "Agent": - typ = 'UveVMInterfaceAgent' - else: - raise Exception("Invalid Arguments - bad tier") - - return _OpResultGet(self, typ, attr, match) diff --git a/tcutils/collector/opserver_util.py b/tcutils/collector/opserver_util.py deleted file mode 100644 index 0b7d33251..000000000 --- a/tcutils/collector/opserver_util.py +++ /dev/null @@ -1,526 +0,0 @@ -# -# OpServer Utils -# -# Utility functions for Operational State Server for VNC -# -# Created by Megh Bhatt on 03/04/2013 -# -# Copyright (c) 2013, Contrail Systems, Inc. All rights reserved. -# - -import datetime -import time -import requests -import pkg_resources -import xmltodict -import json -import gevent -try: - from pysandesh.gen_py.sandesh.ttypes import SandeshType -except: - class SandeshType(object): - SYSTEM = 1 - TRACE = 4 - - -def enum(**enums): - return type('Enum', (), enums) -# end enum - - -class OpServerUtils(object): - - TIME_FORMAT_STR = '%Y %b %d %H:%M:%S.%f' - DEFAULT_TIME_DELTA = 10 * 60 * 1000000 # 10 minutes in microseconds - USECS_IN_SEC = 1000 * 1000 - OBJECT_ID = 'ObjectId' - - POST_HEADERS = {'Content-type': - 'application/json; charset="UTF-8"', 'Expect': '202-accepted'} - - @staticmethod - def utc_timestamp_usec(): - epoch = datetime.datetime.utcfromtimestamp(0) - now = datetime.datetime.utcnow() - delta = now - epoch - return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) - # end utc_timestamp_usec - - @staticmethod - def get_start_end_time(start_time, end_time): - if start_time == None and end_time == None: - end_time = OpServerUtils.utc_timestamp_usec() - start_time = end_time - OpServerUtils.DEFAULT_TIME_DELTA - elif start_time == None and end_time != None: - start_time = end_time - OpServerUtils.DEFAULT_TIME_DELTA - elif start_time != None and end_time == None: - end_time = start_time + OpServerUtils.DEFAULT_TIME_DELTA - return start_time, end_time - # end get_start_end_time - - @staticmethod - def post_url_http(url, params): - try: - print 'request version : %s'%(pkg_resources.get_distribution("requests").version[0]) - if int(pkg_resources.get_distribution("requests").version[0]) >= 1: - response = requests.post(url, stream=True, - data=params, - headers=OpServerUtils.POST_HEADERS) - else: - response = requests.post(url, prefetch=False, - data=params, - headers=OpServerUtils.POST_HEADERS) - except requests.exceptions.ConnectionError, e: - print "Connection to %s failed" % url - return None - if response.status_code in [202 , 200]: - return response.text - else: - print "HTTP error code: %d" % response.status_code - return None - # end post_url_http - - @staticmethod - def get_url_http(url): - data = {} - try: - if int(pkg_resources.get_distribution("requests").version[0]) >= 1: - data = requests.get(url, stream=True) - else: - data = requests.get(url, prefetch=False) - except requests.exceptions.ConnectionError, e: - print "Connection to %s failed" % url - - return data - # end get_url_http - - @staticmethod - def parse_query_result(result): - done = False - resit = result.iter_lines() - while not done: - try: - ln = resit.next() - if ln == '{"value": [': - continue - if ln == ']}': - done = True - continue - if ln[0] == ',': - out_line = '[ {} ' + ln + ' ]' - else: - out_line = '[ {} , ' + ln + ' ]' - - out_list = json.loads(out_line) - out_list.pop(0) - for i in out_list: - yield i - except Exception as e: - print "Error parsing %s results: %s" % (ln, str(e)) - return - # end parse_query_result - - @staticmethod - def get_query_result(opserver_ip, opserver_port, qid): - while True: - url = OpServerUtils.opserver_query_url( - opserver_ip, opserver_port) + '/' + qid - resp = OpServerUtils.get_url_http(url) - if resp.status_code != 200: - yield {} - return - status = json.loads(resp.text) - if status['progress'] != 100: - gevent.sleep(0.5) - continue - else: - for chunk in status['chunks']: - url = OpServerUtils.opserver_url( - opserver_ip, opserver_port) + chunk['href'] - resp = OpServerUtils.get_url_http(url) - if resp.status_code != 200: - yield {} - else: - for result in OpServerUtils.parse_query_result(resp): - yield result - return - # end get_query_result - - @staticmethod - def convert_to_time_delta(time_str): - num = int(time_str[:-1]) - if time_str.endswith('s'): - return datetime.timedelta(seconds=num) - elif time_str.endswith('m'): - return datetime.timedelta(minutes=num) - elif time_str.endswith('h'): - return datetime.timedelta(hours=num) - elif time_str.endswith('d'): - return datetime.timedelta(days=num) - # end convert_to_time_delta - - @staticmethod - def convert_to_utc_timestamp_usec(time_str): - # First try datetime.datetime.strptime format - try: - dt = datetime.datetime.strptime( - time_str, OpServerUtils.TIME_FORMAT_STR) - except ValueError: - # Try now-+ format - if time_str == 'now': - return OpServerUtils.utc_timestamp_usec() - else: - # Handle now-/+1h format - if time_str.startswith('now'): - td = OpServerUtils.convert_to_time_delta( - time_str[len('now'):]) - else: - # Handle -/+1h format - td = OpServerUtils.convert_to_time_delta(time_str) - - utc_tstamp_usec = OpServerUtils.utc_timestamp_usec() - return utc_tstamp_usec + ((td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6)) - else: - return int(time.mktime(dt.timetuple()) * 10 ** 6) - # end convert_to_utc_timestamp_usec - - @staticmethod - def opserver_url(ip, port): - return "http://" + ip + ":" + port - # end opserver_url - - @staticmethod - def opserver_query_url(opserver_ip, opserver_port): - return "http://" + opserver_ip + ":" + opserver_port + "/analytics/query" - # end opserver_query_url - - @staticmethod - def opserver_operation_url(opserver_ip, opserver_port): - return "http://" + opserver_ip + ":" + opserver_port + "/analytics/operation" - # end opserver_operation_url - - @staticmethod - def opserver_db_purge_url(opserver_ip, opserver_port): - operation_url = OpServerUtils.opserver_operation_url(opserver_ip, opserver_port) - return "%s/database-purge"%operation_url - # end opserver_db_purge_url - - @staticmethod - def messages_xml_data_to_dict(messages_dict, msg_type): - if msg_type in messages_dict: - # convert xml value to dict - try: - messages_dict[msg_type] = xmltodict.parse( - messages_dict[msg_type]) - except: - pass - # end messages_xml_data_to_dict - - @staticmethod - def messages_data_dict_to_str(messages_dict, message_type, sandesh_type): - data_dict = messages_dict[message_type] - return OpServerUtils._data_dict_to_str(data_dict, sandesh_type) - # end messages_data_dict_to_str - - @staticmethod - def _data_dict_to_str(data_dict, sandesh_type): - data_str = None - for key, value in data_dict.iteritems(): - # Ignore if type is sandesh - if '@type' == key and value == 'sandesh': - continue - # Do not print 'file' and 'line' - if 'file' == key or 'line' == key: - continue - # Confirm value is dict - if isinstance(value, dict): - value_dict = value - else: - continue - - # Handle struct, list - if '@type' in value_dict: - if value_dict['@type'] == 'struct': - for vdict_key, vdict_value in value_dict.iteritems(): - if isinstance(vdict_value, dict): - if data_str == None: - data_str = '' - else: - data_str += ', ' - data_str += '[' + vdict_key + ': ' + \ - OpServerUtils._data_dict_to_str( - vdict_value, sandesh_type) + ']' - continue - if value_dict['@type'] == 'list': - if data_str == None: - data_str = '' - else: - data_str += ', ' - vlist_dict = value_dict['list'] - # Handle list of basic types - if 'element' in vlist_dict: - if not isinstance(vlist_dict['element'], list): - velem_list = [vlist_dict['element']] - else: - velem_list = vlist_dict['element'] - data_str += '[' + key + ':' - for velem in velem_list: - data_str += ' ' + velem - data_str += ']' - # Handle list of complex types - else: - data_str += '[' + key + ':' - for vlist_key, vlist_value in vlist_dict.iteritems(): - if isinstance(vlist_value, dict): - vlist_value_list = [vlist_value] - elif isinstance(vlist_value, list): - vlist_value_list = vlist_value - else: - continue - for vdict in vlist_value_list: - data_str += ' [' + \ - OpServerUtils._data_dict_to_str( - vdict, sandesh_type) + ']' - data_str += ']' - continue - else: - if data_str == None: - data_str = '' - else: - data_str += ', ' - data_str += '[' + \ - OpServerUtils._data_dict_to_str( - value_dict, sandesh_type) + ']' - continue - - if sandesh_type == SandeshType.SYSTEM or sandesh_type == SandeshType.TRACE: - if data_str == None: - data_str = '' - else: - data_str += ' ' - if '#text' in value_dict: - data_str += value_dict['#text'] - if 'element' in value_dict: - data_str += value_dict['element'] - else: - if data_str == None: - data_str = '' - else: - data_str += ', ' - if '#text' in value_dict: - data_str += key + ' = ' + value_dict['#text'] - elif 'element' in value_dict: - data_str += key + ' = ' + value_dict['element'] - else: - data_str += key + ' = ' - - if data_str == None: - data_str = '' - return data_str - # end _data_dict_to_str - - @staticmethod - def get_query_dict(table, start_time=None, end_time=None, - select_fields=None, - where_clause="", - sort_fields=None, sort=None, limit=None, filter=None, dir=None): -# @staticmethod -# def get_query_dict(table, start_time = None, end_time = None, -# select_fields = None, -# where_clause = "", -# sort_fields = None, sort = None, limit = None, filter = None): - """ - This function takes in the query parameters, format appropriately and calls - ReST API to the :mod:`opserver` to get data - - :param table: table to do the query on - :type table: str - :param start_time: start_time of the query's timeperiod - :type start_time: int - :param end_time: end_time of the query's timeperiod - :type end_time: int - :param select_fields: list of columns to be returned in the final result - :type select_fields: list of str - :param where_clause: list of match conditions for the query - :type where_clause: list of match, which is a pair of str ANDed - :dir:int - :returns: str -- dict of query request - :raises: Error - - """ - - try: - if start_time.isdigit() and end_time.isdigit(): - start_time = int(start_time) - end_time = int(end_time) - else: - start_time = OpServerUtils.convert_to_utc_timestamp_usec( - start_time) - end_time = OpServerUtils.convert_to_utc_timestamp_usec( - end_time) - except: - print 'Incorrect start-time (%s) or end-time (%s) format' % (start_time, - end_time) - return None - - sf = select_fields - lstart_time, lend_time = OpServerUtils.get_start_end_time(start_time, - end_time) - where = [] - for term in where_clause.split('OR'): - term_elem = [] - for match in term.split('AND'): - if match == '': - continue - match_s = match.strip(' ()') - match_e = match_s.split('=') - match_e[0] = match_e[0].strip(' ()') - match_e[1] = match_e[1].strip(' ()') - match_v = match_e[1].split("<") - - if len(match_v) is 1: - if match_v[0][-1] is '*': - match_prefix = match_v[0][:(len(match_v[0]) - 1)] - print match_prefix - match_elem = OpServerUtils.Match(name=match_e[0], - value=match_prefix, - op=OpServerUtils.MatchOp.PREFIX) - else: - match_elem = OpServerUtils.Match(name=match_e[0], - value=match_v[0], - op=OpServerUtils.MatchOp.EQUAL) - else: - match_elem = OpServerUtils.Match(name=match_e[0], - value=match_v[0], - op=OpServerUtils.MatchOp.IN_RANGE, value2=match_v[1]) - term_elem.append(match_elem.__dict__) - - if len(term_elem) == 0: - where = None - else: - where.append(term_elem) - - filter_terms = [] - if filter is not None: - for match in filter.split(','): - match_s = match.strip(' ()') - match_e = match_s.split('=') - match_op = ["", ""] - if (len(match_e) == 2): - match_op[0] = match_e[0].strip(' ()') - match_op[1] = match_e[1].strip(' ()') - op = OpServerUtils.MatchOp.REGEX_MATCH - - match_e = match_s.split('<') - if (len(match_e) == 2): - match_op[0] = match_e[0].strip(' ()') - match_op[1] = match_e[1].strip(' ()') - op = OpServerUtils.MatchOp.LEQ - - match_e = match_s.split('>') - if (len(match_e) == 2): - match_op[0] = match_e[0].strip(' ()') - match_op[1] = match_e[1].strip(' ()') - op = OpServerUtils.MatchOp.GEQ - - match_elem = OpServerUtils.Match(name=match_op[0], - value=match_op[1], - op=op) - filter_terms.append(match_elem.__dict__) - - if len(filter_terms) == 0: - filter_terms = None - - flowtable_query = OpServerUtils.Query(table, - start_time=lstart_time, - end_time=lend_time, - select_fields=sf, - where=where, - sort_fields=sort_fields, - sort=sort, - limit=limit, - filter=filter_terms, - dir=dir) - -# flowtable_query = OpServerUtils.Query(table, -# start_time = lstart_time, -# end_time = lend_time, -# select_fields = sf, -# where = where, -# sort_fields = sort_fields, -# sort = sort, -# limit = limit, -# filter = filter_terms -# ) - - return flowtable_query.__dict__ - - @staticmethod - def get_json_body(*args,**kwargs): - json = OpServerUtils.Json_Body(*args,**kwargs) - return json.__dict__ - #end get_json_body - - class Json_Body(object): - def __init__(self,*args,**kwargs): - self.purge_input = kwargs['purge_input'] - #end Json_Body - - class Query(object): - table = None - start_time = None - end_time = None - select_fields = None - where = None - sort = None - sort_fields = None - limit = None - filter = None - dir = None - - def __init__( - self, table, start_time, end_time, select_fields, where=None, - sort_fields=None, sort=None, limit=None, filter=None, dir=None): -# def __init__(self, table, start_time, end_time, select_fields, where = None, -# sort_fields = None, sort = None, limit = None, filter = None): - self.table = table - self.start_time = start_time - self.end_time = end_time - self.select_fields = select_fields - if where is not None: - self.where = where - if sort_fields is not None: - self.sort_fields = sort_fields - if sort is not None: - self.sort = sort - if limit is not None: - self.limit = limit - if filter is not None: - self.filter = filter - if dir is not None: - self.dir = dir - # end __init__ - - # end class Query - - MatchOp = enum(EQUAL=1, NOT_EQUAL=2, IN_RANGE=3, - NOT_IN_RANGE=4, LEQ=5, GEQ=6, PREFIX=7, REGEX_MATCH=8) - - SortOp = enum(ASCENDING=1, DESCENDING=2) - - class Match(object): - name = None - value = None - op = None - value2 = None - - def __init__(self, name, value, op, value2=None): - self.name = name - self.value = value - self.op = op - self.value2 = value2 - # end __init__ - - # end class Match - -# end class OpServerUtils diff --git a/tcutils/commands.py b/tcutils/commands.py deleted file mode 100644 index d30ae167c..000000000 --- a/tcutils/commands.py +++ /dev/null @@ -1,86 +0,0 @@ -"""Module to launch any local/remote command.""" - -import os -import signal -import tempfile -import logging as LOG -from subprocess import Popen - -import paramiko - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - - -class Command(object): - - """Launch local command.""" - - def __init__(self, cmd): - self.cmd = cmd - self.stdout = None - self.stderr = None - self.fstdout = tempfile.NamedTemporaryFile(mode='w', - prefix='CMD_OUT_') - self.fstderr = tempfile.NamedTemporaryFile(mode='w', - prefix='CMD_ERR_') - - def start(self): - """Launches a local command as background process.""" - try: - self.execprocess = Popen([self.cmd], - stdout=self.fstdout, - stderr=self.fstderr, - shell=True) - except KeyboardInterrupt: - self.stop() - - def stop(self): - """Stops the background process and exits. - - Return tuple of (RC, stdout, stderr).""" - rc = self.execprocess.poll() - if rc is None: - # process still runs, kill it. - os.kill(self.execprocess.pid, signal.SIGTERM) - process_id, rc = os.waitpid(self.execprocess.pid, 0) - - with file(self.fstdout.name, 'r') as fh: - self.stdout = fh.read() - with file(self.fstderr.name, 'r') as fh: - self.stderr = fh.read() - - return (rc, self.stdout, self.stderr) - - -def ssh(host, user, passwd, log=LOG): - """SSH to any host. - """ - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(host, username=user, password=passwd) - return ssh - - -def execute_cmd(session, cmd, log=LOG): - """Executing long running commands in background through fabric has issues - So implemeted this to execute the command. - """ - log.debug("Executing command: %s" % cmd) - stdin, stdout, stderr = session.exec_command(cmd) - - -def execute_cmd_out(session, cmd, log=LOG): - """Executing long running commands in background through fabric has issues - So implemeted this to execute the command. - """ - log.debug("Executing command: %s" % cmd) - stdin, stdout, stderr = session.exec_command(cmd) - out = None - err = None - out = stdout.read() - err = stderr.read() - if out: - log.debug("STDOUT: %s", out) - if err: - log.debug("STDERR: %s", err) - return (out, err) diff --git a/tcutils/config/__init__.py b/tcutils/config/__init__.py deleted file mode 100644 index 9e3593535..000000000 --- a/tcutils/config/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Config Utils""" diff --git a/tcutils/config/discovery_tests.py b/tcutils/config/discovery_tests.py deleted file mode 100644 index f54dc6bf9..000000000 --- a/tcutils/config/discovery_tests.py +++ /dev/null @@ -1,1366 +0,0 @@ -# Need to import path to test/fixtures and test/scripts/ -# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/' -# -# To run tests, you can do 'python -m testtools.run tests'. To run specific tests, -# You can do 'python -m testtools.run -l tests' -# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD -# -from netaddr import IPNetwork - -import fixtures -from tcutils.util import * -from netaddr import * -from time import sleep -import logging as LOG -import re -import socket -from discovery_util import DiscoveryServerUtils -import json - -class DiscoveryVerification(fixtures.Fixture): - - def __init__(self, inputs, cn_inspect, agent_inspect, ops_inspect, ds_inspect, logger=LOG): - - self.inputs = inputs - self.ops_inspect = ops_inspect - self.agent_inspect = agent_inspect - self.cn_inspect = cn_inspect - self.ds_inspect = ds_inspect - self.logger = logger - self.ds_port = 5998 -# self.get_all_publishers_by_topology() - - def get_all_control_services_by_topology(self): - - publisher_tuple = [] - services = ['xmpp-server'] - for service in services: - # for ip in self.inputs.bgp_ips: - for host in self.inputs.bgp_names: - control_ip = self.inputs.host_data[host]['host_control_ip'] - # t=(ip,service) - t = (control_ip, service) - publisher_tuple.append(t) - self.logger.info( - "Calculated control services as per the testbed file..%s" % - (publisher_tuple)) - return publisher_tuple - - def get_all_dns_services_by_topology(self): - - publisher_tuple = [] - services = ['dns-server'] - for service in services: - for host in self.inputs.bgp_names: - control_ip = self.inputs.host_data[host]['host_control_ip'] - # t=(ip,service) - t = (control_ip, service) - publisher_tuple.append(t) - self.logger.info( - "Calculated dns services as per the testbed file..%s" % - (publisher_tuple)) - return publisher_tuple - - def get_all_api_services_by_topology(self): - - publisher_tuple = [] - self.logger.info("Calculating api services as per the testbed file..") - services = ['ApiServer'] - for service in services: - for host in self.inputs.cfgm_names: - control_ip = self.inputs.host_data[host]['host_control_ip'] - # t=(ip,service) - t = (control_ip, service) - publisher_tuple.append(t) - self.logger.info( - "Calculated api services as per the testbed file..%s" % - (publisher_tuple)) - return publisher_tuple - - def get_all_ifmap_services_by_topology(self): - - publisher_tuple = [] - self.logger.info( - "Calculating ifmap services as per the testbed file..") - services = ['IfmapServer'] - for service in services: - for host in self.inputs.cfgm_names: - control_ip = self.inputs.host_data[host]['host_control_ip'] - # t=(ip,service) - t = (control_ip, service) - publisher_tuple.append(t) - self.logger.info( - "Calculated ifmap services as per the testbed file..%s" % - (publisher_tuple)) - return publisher_tuple - - def get_all_collector_services_by_topology(self): - - publisher_tuple = [] - self.logger.info( - "Calculating collector services as per the testbed file..") - services = ['Collector'] - for service in services: - for host in self.inputs.collector_names: - control_ip = self.inputs.host_data[host]['host_control_ip'] - # t=(ip,service) - t = (control_ip, service) - publisher_tuple.append(t) - self.logger.info( - "Calculated collector services as per the testbed file..%s" % - (publisher_tuple)) - return publisher_tuple - - def get_all_opserver_by_topology(self): - - publisher_tuple = [] - self.logger.info( - "Calculating opserver services as per the testbed file..") - services = ['OpServer'] - for service in services: - for host in self.inputs.collector_names: - control_ip = self.inputs.host_data[host]['host_control_ip'] - # t=(ip,service) - t = (control_ip, service) - publisher_tuple.append(t) - self.logger.info("Calculated opserver as per the testbed file..%s" % - (publisher_tuple)) - return publisher_tuple - - @retry_for_value(delay=5, tries=5) - def get_all_control_services(self, ds_ip): - '''http://10.204.216.7:5998/services.json''' - - lst_ip_service_tuple = [] - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr( - 'Service', match=('service_type', 'xmpp-server')) - for elem in dct: - ip = elem['info']['ip-address'] - t = (ip, 'xmpp-server') - lst_ip_service_tuple.append(t) - except Exception as e: - raise - finally: - self.logger.info("Registered control services in discovery %s %s" % - (ds_ip, lst_ip_service_tuple)) - return lst_ip_service_tuple - - @retry_for_value(delay=5, tries=5) - def get_all_collector_services(self, ds_ip): - '''http://10.204.216.7:5998/services.json''' - - lst_ip_service_tuple = [] - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', 'Collector')) - for elem in dct: - ip = elem['info']['ip-address'] - t = (ip, 'Collector') - lst_ip_service_tuple.append(t) - except Exception as e: - print e - raise - finally: - self.logger.info( - "Registered collector services in discovery %s %s" % - (ds_ip, lst_ip_service_tuple)) - return lst_ip_service_tuple - - @retry_for_value(delay=5, tries=5) - def get_all_api_services(self, ds_ip): - '''http://10.204.216.7:5998/services.json''' - - lst_ip_service_tuple = [] - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', 'ApiServer')) - for elem in dct: - ip = elem['info']['ip-address'] - t = (ip, 'ApiServer') - lst_ip_service_tuple.append(t) - except Exception as e: - print e - finally: - self.logger.info("Registered api services %s %s" % - (ds_ip, lst_ip_service_tuple)) - return lst_ip_service_tuple - - @retry_for_value(delay=5, tries=5) - def get_all_ifmap_services(self, ds_ip): - '''http://10.204.216.7:5998/services.json''' - - lst_ip_service_tuple = [] - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr( - 'Service', match=('service_type', 'IfmapServer')) - for elem in dct: - ip = elem['info']['ip-address'] - t = (ip, 'IfmapServer') - lst_ip_service_tuple.append(t) - except Exception as e: - print e - raise - finally: - self.logger.info("Registered ifmap services in discovery %s %s" % - (ds_ip, lst_ip_service_tuple)) - return lst_ip_service_tuple - - @retry_for_value(delay=5, tries=5) - def get_all_dns_services(self, ds_ip): - '''http://10.204.216.7:5998/services.json''' - - lst_ip_service_tuple = [] - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', 'dns-server')) - for elem in dct: - ip = elem['info']['ip-address'] - t = (ip, 'dns-server') - lst_ip_service_tuple.append(t) - except Exception as e: - print e - raise - finally: - self.logger.info("Registered dns services in discovery %s %s" % - (ds_ip, lst_ip_service_tuple)) - return lst_ip_service_tuple - - @retry_for_value(delay=5, tries=5) - def get_all_opserver(self, ds_ip): - '''http://10.204.216.7:5998/services.json''' - - lst_ip_service_tuple = [] - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', 'OpServer')) - for elem in dct: - ip = elem['info']['ip-address'] - t = (ip, 'OpServer') - lst_ip_service_tuple.append(t) - except Exception as e: - print e - raise - finally: - self.logger.info("Registered contrail-analytics-apis in discovery %s %s" % - (ds_ip, lst_ip_service_tuple)) - return lst_ip_service_tuple - - def get_all_services_by_service_name(self, ds_ip, service=None): - '''http://10.204.216.7:5998/services.json''' - - lst_ip_service_tuple = [] - dct = [] - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', service)) - for elem in dct: - ip = elem['info']['ip-address'] - t = (ip, service) - lst_ip_service_tuple.append(t) - except Exception as e: - print e - raise - finally: - self.logger.info("Registered in discovery %s %s..%s" % - (ds_ip, service, lst_ip_service_tuple)) - return dct - - def publish_service_to_discovery(self, ds_ip, service=None, ip=None, port=20003, admin_state=None): - '''http://discovery-server-ip:5998/publish''' - - obj = None - try: - if not admin_state: - obj = self.ds_inspect[ds_ip].publish_service( - service=service, ip=ip, port=port) - else: - obj = self.ds_inspect[ds_ip].publish_service( - service=service, ip=ip, port=port, admin_state=admin_state) - except Exception as e: - print e - raise - finally: - return obj - - def update_service( - self, - ds_ip, - service=None, - ip=None, - admin_state=None, - oper_state=None, - oper_state_reason=None): - try: - data = { - "service-type": service, - } - if oper_state: - data['oper-state'] = oper_state - if oper_state_reason: - data['oper-state-reason'] = oper_state_reason - if admin_state: - data['admin-state'] = admin_state - headers = { - 'Content-type': 'application/json', - } - service_id = self.get_service_id(ds_ip, (ip, service)) - service_id = service_id.split(':')[0] - url = "http://%s:%s/service/%s" % (ds_ip, str(self.ds_port), service_id) - json_body = json.dumps(data) - resp = DiscoveryServerUtils.put_url_http(url, json_body) - except Exception as e: - print str(e) - finally: - if resp: - print 'resp: %s' % (resp) - return resp - # end update_service - - def subscribe_service_from_discovery(self, ds_ip, service=None, instances=None, client_id=None): - '''http://discovery-server-ip:5998/subscribe''' - - obj = None - try: - obj = self.ds_inspect[ds_ip].subscribe_service( - service=service, instances=instances, client_id=client_id) - except Exception as e: - print e - raise - finally: - return obj - - def cleanup_service_from_discovery(self, ds_ip): - '''http://discovery-server-ip:5998/cleanup''' - - obj = None - try: - obj = self.ds_inspect[ds_ip].cleanup_service() - except Exception as e: - print e - raise - finally: - return obj - - def get_service_status(self, ds_ip, service_tuple=()): - - ip = service_tuple[0] - svc = service_tuple[1] - status = None - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', svc)) - for elem in dct: - if ip in elem['info']['ip-address']: - status = elem['status'] - self.logger.info("dct:%s" % (elem)) - except Exception as e: - raise - finally: - return status - - def get_service_admin_state(self, ds_ip, service_tuple=()): - - ip = service_tuple[0] - svc = service_tuple[1] - status = None - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', svc)) - for elem in dct: - if ip in elem['info']['ip-address']: - status = elem['admin_state'] - except Exception as e: - raise - finally: - return status - - def get_service_id(self, ds_ip, service_tuple=(), service_status='up'): - - ip = service_tuple[0] - svc = service_tuple[1] - status = None - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', svc)) - for elem in dct: - if ip == elem['info']['ip-address'] and elem['status'] == service_status: - status = elem['service_id'] - except Exception as e: - raise - finally: - return status - - def get_service_in_use(self, ds_ip, service_tuple=()): - - ip = service_tuple[0] - svc = service_tuple[1] - status = None - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', svc)) - for elem in dct: - if ip in elem['info']['ip-address']: - status = elem['in_use'] - except Exception as e: - print e - finally: - return status - - def get_service_prov_state(self, ds_ip, service_tuple=()): - - ip = service_tuple[0] - svc = service_tuple[1] - status = None - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', svc)) - for elem in dct: - if ip in elem['info']['ip-address']: - status = elem['prov_state'] - except Exception as e: - print e - finally: - return status - - def get_service_endpoint_by_service_id(self, ds_ip, service_id=None): - - t2 = () - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_id', service_id)) - for elem in dct: - t1 = (elem['info']['ip-address'], elem['info']['port']) - t2 = (t1, elem['service_type']) - except Exception as e: - print e - finally: - return t2 - - def get_service_id_by_service_end_point(self, ds_ip, service_tuple=()): - '''Returns service id of a (service type,ip)''' - - ip = service_tuple[0] - service = service_tuple[1] - t2 = None - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_type', service)) - for elem in dct: - if (ip == elem['info']['ip-address']): - t2 = elem['service_id'] - except Exception as e: - print e - finally: - return t2 - - def get_service_status_by_service_id(self, ds_ip, service_id=None): - - t2 = {} - try: - obj = self.ds_inspect[ds_ip].get_ds_services() - dct = obj.get_attr('Service', match=('service_id', service_id)) - for elem in dct: - t1 = {} - t1['prov_state'] = elem['prov_state'] - t2.update(t1) - t1['admin_state'] = elem['admin_state'] - t2.update(t1) - t1['status'] = elem['status'] - t2.update(t1) - t1['in_use'] = elem['in_use'] - t2.update(t1) - t1['ts_use'] = elem['ts_use'] - t2.update(t1) - except Exception as e: - print e - finally: - return t2 - - def get_hostname_from_hostdata_by_ip(self, hostip): - - for elem in self.inputs.host_data.values(): - if ((elem['host_control_ip'] == hostip) | (elem['host_data_ip'] == hostip) | (elem['host_ip'] == hostip)): - return elem['name'] - return None - - @retry_for_value(delay=5, tries=5) - def get_subscribed_service_id(self, ds_ip, client=(), service=None , - instance = ''): - '''Returns service id subscribed by a client''' - - client_ip = client[0] - client_svc = client[1] - service_id = [] - host_name = self.get_hostname_from_hostdata_by_ip(client_ip) -# host_name = socket.gethostbyaddr(client_ip)[0] - try: - obj = self.ds_inspect[ds_ip].get_ds_clients() - d_name = socket.gethostname().split('.') - d_name = '.'.join(d_name[1:]) - host = host_name.split('.')[0] - if instance: - client_id = '%s:%s:%s' % (host, client_svc,instance) - else: - client_id = '%s:%s' % (host, client_svc) - host_with_dname = host + '.' + d_name - dct = obj.get_attr('Clients', match=('client_id', client_id)) - - if not dct: - client_id = '%s:%s:%s' % (host_with_dname, client_svc,instance) - dct = obj.get_attr('Clients', match=('client_id', client_id)) - if not dct: - client_id = '%s:%s' % (host, client_svc) - dct = obj.get_attr('Clients', match=('client_id', client_id)) - if not dct: - client_id = '%s:%s' % (host_with_dname, client_svc) - dct = obj.get_attr('Clients', match=('client_id', client_id)) - if not dct: - host_name = socket.gethostbyaddr(client_ip)[0] - # nodea18.englab.juniper.net:contrail-api - client_id = '%s:%s' % (host_name, client_svc) - dct = obj.get_attr('Clients', match=('client_id', client_id)) - - for elem in dct: - if service in elem['service_type']: - client_type = elem['client_type'] - if re.search(client_svc, client_type): - service_id.append(elem['service_id']) - except Exception as e: - print e - finally: - return service_id - - @retry_for_value(delay=5, tries=5) - def get_xmpp_server_of_agent(self, ds_ip, agent_ip=None): - - control_nodes = [] - try: - lst_service_id = self.get_subscribed_service_id( - ds_ip, client=(agent_ip, 'contrail-vrouter-agent'), service='xmpp-server',instance = '0') - for id in lst_service_id: - node = self.get_service_endpoint_by_service_id( - ds_ip, service_id=id) - control_nodes.append(node) - except Exception as e: - print e - finally: - return control_nodes - - def get_client_names_subscribed_to_a_service(self, ds_ip, service_tuple=()): - return self._get_clients_subscribed_to_a_service(ds_ip, service_tuple, - return_type='name') - - def get_all_clients_subscribed_to_a_service(self, ds_ip, service_tuple=()): - return self._get_clients_subscribed_to_a_service(ds_ip, service_tuple, - return_type='ip') - - @retry_for_value(delay=5, tries=5) - def _get_clients_subscribed_to_a_service(self, ds_ip, service_tuple=(), - return_type='ip'): - - clients = [] - client_names = [] - ip = service_tuple[0] - service = service_tuple[1] - try: - service_id = self.get_service_id( - ds_ip, service_tuple=service_tuple) - obj = self.ds_inspect[ds_ip].get_ds_clients() - dct = obj.get_attr('Clients', match=('service_id', service_id)) - for elem in dct: - client = elem['client_id'] - cl = client.split(':') - hostname = cl[0] - client_names.append(client) - client_ip = self.inputs.host_data[hostname]['host_control_ip'] - clients.append(client_ip) - except Exception as e: - self.logger.exception(e) - finally: - if return_type == 'ip': - return clients - else: - return client_names - # end - - @retry_for_value(delay=5, tries=5) - def get_all_client_dict_by_service_subscribed_to_a_service(self, ds_ip, subscriber_service, subscribed_service): - - ret = [] - try: - obj = self.ds_inspect[ds_ip].get_ds_clients() - dct = obj.get_attr( - 'Clients', match=('client_type', subscriber_service)) - for elem in dct: - if (elem['service_type'] == subscribed_service): - ret.append(elem) - except Exception as e: - print e - finally: - return ret - - def dict_match(self, args_dict={}): - - for k, v in args_dict.items(): - tmp = v - tmp_key = k - break - - result = True - try: - for k, v in args_dict.items(): - if (tmp == v): - result = result and True - else: - result = result and False - for elem in tmp: - for el in v: - if (elem == el): - tmp.remove(elem) - v.remove(el) - break - else: - self.logger.warn("Mismatch : \n%s\n\n\n %s" % - (tmp, v)) - except Exception as e: - self.logger.warn("Got exception as %s" % (e)) - result = result and False - finally: - return result - - def verify_registered_services_to_discovery_service(self, ds_ip=None): - - result = True - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - - expected_control_services = self.get_all_control_services_by_topology() - expected_collector_services = self.get_all_collector_services_by_topology( - ) - expected_api_services = self.get_all_api_services_by_topology() - expected_ifmap_services = self.get_all_ifmap_services_by_topology() - expected_opserver = self.get_all_opserver_by_topology() - expected_dns_services = self.get_all_dns_services_by_topology() - registered_control_services = self.get_all_control_services(ds_ip) - registered_api_services = self.get_all_api_services(ds_ip) - registered_ifmap_services = self.get_all_ifmap_services(ds_ip) - registered_collector_services = self.get_all_collector_services(ds_ip) - registered_opserver = self.get_all_opserver(ds_ip) - registered_dns_services = self.get_all_dns_services(ds_ip) - # checking for missing registered service - diff = set(expected_control_services) ^ set( - registered_control_services) - if diff: - self.logger.warn("Inconsistency in registerd services %s" % (diff)) - result = result and False - else: - self.logger.info("%s registered to discover service" % - (expected_control_services)) - result = result and True - # checking for missing registered service - diff = set(expected_collector_services) ^ set( - registered_collector_services) - if diff: - self.logger.warn("Inconsistency in registerd services %s" % (diff)) - result = result and False - else: - self.logger.info("%s registered to discover service" % - (expected_collector_services)) - result = result and True - # checking for missing registered service - diff = set(expected_api_services) ^ set(registered_api_services) - if diff: - self.logger.warn("Inconsistency in registerd services %s" % (diff)) - result = result and False - else: - self.logger.info("%s registered to discover service" % - (expected_api_services)) - result = result and True - # checking for missing registered service - diff = set(expected_ifmap_services) ^ set(registered_ifmap_services) - if diff: - self.logger.warn("Inconsistency in registerd services %s" % (diff)) - result = result and False - else: - self.logger.info("%s registered to discover service" % - (expected_ifmap_services)) - result = result and True - - # checking for missing registered service - diff = set(expected_opserver) ^ set(registered_opserver) - if diff: - self.logger.warn("Inconsistency in registerd services %s" % (diff)) - result = result and False - else: - self.logger.info("%s registered to discover service" % - (registered_opserver)) - result = result and True - - # checking for missing registered service - diff = set(expected_dns_services) ^ set(registered_dns_services) - if diff: - self.logger.warn("Inconsistency in registerd services %s" % (diff)) - result = result and False - else: - self.logger.info("%s registered to discover service" % - (registered_dns_services)) - result = result and True - - # Verifying the service provision state/status/admin state - self.logger.info("Checking for control node service") - for service in registered_control_services: - t = {} - service_id = self.get_service_id_by_service_end_point( - ds_ip, service_tuple=service) - t = self.get_service_status_by_service_id( - ds_ip, service_id=service_id) - self.logger.info("Service health: %s" % (t)) - if (t['admin_state'] == 'up'and t['status'] == 'up'): - self.logger.info("%s service is up" % (service,)) - result = result and True - else: - self.logger.warn("%s service not up" % (service,)) - result = result and False - - self.logger.info("Checking for api service") - for service in registered_api_services: - t = {} - service_id = self.get_service_id_by_service_end_point( - ds_ip, service_tuple=service) - t = self.get_service_status_by_service_id( - ds_ip, service_id=service_id) - self.logger.info("Service health: %s" % (t)) - if (t['admin_state'] == 'up'and t['status'] == 'up'): - self.logger.info("%s service is up" % (service,)) - result = result and True - else: - self.logger.warn("%s service not up" % (service,)) - result = result and False - - self.logger.info("Checking for ifmap service") - for service in registered_ifmap_services: - t = {} - service_id = self.get_service_id_by_service_end_point( - ds_ip, service_tuple=service) - t = self.get_service_status_by_service_id( - ds_ip, service_id=service_id) - self.logger.info("Service health: %s" % (t)) - if (t['admin_state'] == 'up'and t['status'] == 'up'): - self.logger.info("%s service is up" % (service,)) - result = result and True - else: - self.logger.warn("%s service not up" % (service,)) - result = result and False - - self.logger.info("Checking for collector service") - for service in registered_collector_services: - t = {} - service_id = self.get_service_id_by_service_end_point( - ds_ip, service_tuple=service) - t = self.get_service_status_by_service_id( - ds_ip, service_id=service_id) - self.logger.info("Service health: %s" % (t)) - if (t['admin_state'] == 'up'and t['status'] == 'up'): - self.logger.info("%s service is up" % (service,)) - result = result and True - else: - self.logger.warn("%s service not up" % (service,)) - result = result and False - - self.logger.info("Checking for dns service") - for service in registered_dns_services: - t = {} - service_id = self.get_service_id_by_service_end_point( - ds_ip, service_tuple=service) - t = self.get_service_status_by_service_id( - ds_ip, service_id=service_id) - self.logger.info("Service health: %s" % (t)) - if (t['admin_state'] == 'up'and t['status'] == 'up'): - self.logger.info("%s service is up" % (service,)) - result = result and True - else: - self.logger.warn("%s service not up" % (service,)) - result = result and False - - self.logger.info("Checking for opserver") - for service in registered_opserver: - t = {} - service_id = self.get_service_id_by_service_end_point( - ds_ip, service_tuple=service) - t = self.get_service_status_by_service_id( - ds_ip, service_id=service_id) - self.logger.info("Service health: %s" % (t)) - if (t['admin_state'] == 'up'and t['status'] == 'up'): - self.logger.info("%s service is up" % (service,)) - result = result and True - else: - self.logger.warn("%s service not up" % (service,)) - result = result and False - - return result - - @retry(delay=1, tries=10) - def verify_bgp_connection(self, ds_ip=None): - - result = True - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - for host in self.inputs.host_names: - control_ip = self.inputs.host_data[host]['host_control_ip'] - username = self.inputs.host_data[host]['username'] - password = self.inputs.host_data[host]['password'] - if host in self.inputs.compute_names: - host_ip = self.inputs.host_data[host]['host_ip'] - # Verify the connection between compute to all control nodes - inspect_h = self.agent_inspect[host_ip] - agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status() - - # Calculating the the expected list of bgp peer - expected_bgp_peer = [] - if (len(self.inputs.bgp_control_ips) <= 2): - expected_bgp_peer = self.inputs.bgp_control_ips[:] - else: - bgp_peer_tuple_from_discovery = self.get_xmpp_server_of_agent( - ds_ip, agent_ip=control_ip) - for t in bgp_peer_tuple_from_discovery: - ip = t[0][0] - expected_bgp_peer.append(ip) - self.logger.info("%s compute is subscribed to %s bgp nodes" % - (host, expected_bgp_peer)) - expected_bgp_peer_by_addr = [] - actual_bgp_peer = [] -# - # Get the actual list of controller IP - for i in xrange(len(agent_xmpp_status)): - actual_bgp_peer.append( - agent_xmpp_status[i]['controller_ip']) - - # Matching the expected and actual bgp contreoller - # sort the value for list match - actual_bgp_peer.sort() - expected_bgp_peer.sort() - self.logger.info("Actual XmppServer for %s : %s" % - (host, actual_bgp_peer)) - self.logger.info("Expected XmppServer for %s : %s" % - (host, expected_bgp_peer)) - - if actual_bgp_peer != expected_bgp_peer: - result = result and False - self.logger.error( - 'All the required BGP controller has not found in agent introspect for %s' % (host)) - for entry in agent_xmpp_status: - if entry['state'] != 'Established': - result = result and False - self.logger.error( - 'From agent %s connection to control node %s is not Established' % - (host, entry['controller_ip'])) - if host in self.inputs.bgp_names: - host_ip = self.inputs.host_data[host]['host_ip'] - # Verify the connection between all control nodes - cn_bgp_entry = self.cn_inspect[ - host_ip].get_cn_bgp_neigh_entry() - control_node_bgp_peer_list = [] - control_node_bgp_xmpp_peer_list = [] - if type(cn_bgp_entry) == type(dict()): - if cn_bgp_entry['peer'] in self.inputs.bgp_names: - if cn_bgp_entry['state'] != 'Established': - self.logger.error('For control node %s, with peer %s peering is not Established. Current State %s ' % ( - host, cn_bgp_entry['peer'], cn_bgp_entry['state'])) - if cn_bgp_entry['encoding'] == 'BGP': - control_node_bgp_peer_list = [ - cn_bgp_entry['peer']] - else: - control_node_bgp_xmpp_peer_list = [ - cn_bgp_entry['peer']] - else: - for entry in cn_bgp_entry: - if entry['peer'] in self.inputs.bgp_names: - if entry['state'] != 'Established': - result = result and False - self.logger.error('For control node %s, with peer %s peering is not Established. Current State %s ' % ( - host, entry['peer'], entry['state'])) - if entry['encoding'] == 'BGP': - control_node_bgp_peer_list.append( - entry['peer']) - else: - control_node_bgp_xmpp_peer_list.append( - entry['peer']) - - # Verify all required xmpp entry is present in control node - # Get computes subscribed to this control node - computes = self.get_client_names_subscribed_to_a_service( - ds_ip, service_tuple=(control_ip, 'xmpp-server')) - computes = self._get_short_client_names(computes) - self.logger.info("%s bgp node subscribed by %s xmpp-clients" % - (control_ip, computes)) - self.logger.info( - "From control node introspect, xmpp-clients: %s" % - (control_node_bgp_xmpp_peer_list)) - - if computes != control_node_bgp_xmpp_peer_list: - result = result and False - self.logger.error( - 'The required XMPP entry not present in control node introspect for %s' % (host)) - self.logger.error('Xmpp clients in discovery but not in '\ - 'control node : %s' % ( - set(computes)-set(control_node_bgp_xmpp_peer_list))) - self.logger.error('Xmpp clients in Control node but not '\ - 'in discovery clients list : %s' % ( - set(control_node_bgp_xmpp_peer_list)-set(computes))) - # Verify all required BGP entry is present in control node - control_node_bgp_peer_list.append(host) - - # sort the value for list match - control_node_bgp_peer_list.sort() - self.inputs.bgp_names.sort() - if not set(self.inputs.bgp_names).issubset(control_node_bgp_peer_list): - result = result and False - self.logger.error( - 'Expected BGP peers for %s:(%s), Got : (%s)' % (host, - self.inputs.bgp_names, control_node_bgp_peer_list)) - if not result: - self.logger.error( - 'One or more process-states are not correct on nodes') - return result - # end verify_control_connection - - def _get_short_client_names(self, client_names_list): - disc_list = [] - for item in client_names_list: - # Remove 'contrail-tor-agent' or 'contrail-vrouter-agent' - # client id for vrouter is of format nodek2:contrail-vrouter-agent:0 - item = item.split(':') - if 'contrail-tor-agent' in item: - val = '%s-%s' % (item[0], item[2]) - else: - val = '%s' % (item[0]) - - disc_list.append(val) - disc_list.sort() - return disc_list - # end _get_short_client_names - - - def verify_agents_connected_to_dns_service(self, ds_ip=None): - '''Verifies that agents connected to dns service''' - - result = True - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - for ip in self.inputs.compute_ips: - dns_nodes = [] - try: - lst_service_id = self.get_subscribed_service_id( - ds_ip, client=(ip, 'contrail-vrouter-agent'), service='dns-server',instance = '0') - for id in lst_service_id: - node = self.get_service_endpoint_by_service_id( - ds_ip, service_id=id) - dns_nodes.append(node) - except Exception as e: - print e - if dns_nodes: - self.logger.info("Agent %s connected to dns-service %s" % - (ip, dns_nodes)) - result = result and True - else: - self.logger.warn( - "Agent %s not connected to any dns-service" % (ip)) - return False - self.logger.info( - "Verifying that dns-servers belongs to this test bed") - dns_ips = [] - for t in dns_nodes: - dns_ip = t[0][0] - dns_ips.append(dns_ip) - dns_ips.sort() - self.inputs.bgp_ips.sort() - if (set(dns_ips).issubset(self.inputs.bgp_control_ips)): - self.logger.info( - "Agent %s is connected to proper dns-servers %s" % - (ip, dns_ips)) - result = result and True - else: - self.logger.warn( - "Agent %s is not connected to proper dns-servers %s" % (ip, dns_ips)) - self.logger.info("Proper dns servers should be %s" % - (self.inputs.bgp_ips)) - result = result and False - return result - - def verify_agents_connected_to_collector_service(self, ds_ip=None): - '''Verifies that agents connected to collector service''' - - result = True - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - for ip in self.inputs.compute_control_ips: - collector_nodes = [] - try: - lst_service_id = self.get_subscribed_service_id( - ds_ip, client=(ip, 'contrail-vrouter-agent'), service='Collector',instance = '0') - for id in lst_service_id: - node = self.get_service_endpoint_by_service_id( - ds_ip, service_id=id) - collector_nodes.append(node) - except Exception as e: - print e - if collector_nodes: - self.logger.info("Agent %s connected to collector-service %s" % - (ip, collector_nodes)) - result = result and True - else: - self.logger.warn( - "Agent %s not connected to any collector-service" % (ip)) - return False - self.logger.info( - "Verifying that collectors belongs to this test bed") - collector_ips = [] - for t in collector_nodes: - collector_ip = t[0][0] - collector_ips.append(collector_ip) - collector_ips.sort() - self.inputs.collector_control_ips.sort() - if (set(collector_ips).issubset(self.inputs.collector_control_ips)): - self.logger.info( - "Agent %s is connected to proper collectors %s" % - (ip, collector_ips)) - result = result and True - else: - self.logger.warn( - "Agent %s is not connected to proper collectors %s" % - (ip, collector_ips)) - self.logger.info("Proper collectors should be %s" % - (self.inputs.collector_ips)) - result = result and False - return result - - def verify_dns_agent_connected_to_collector_service(self, ds_ip=None): - '''Verifies that dns agents connected to collector service''' - - result = True - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - for ip in self.inputs.bgp_control_ips: - collector_nodes = [] - try: - lst_service_id = self.get_subscribed_service_id( - ds_ip, client=(ip, 'contrail-dns'), service='Collector') - for id in lst_service_id: - node = self.get_service_endpoint_by_service_id( - ds_ip, service_id=id) - collector_nodes.append(node) - except Exception as e: - print e - if collector_nodes: - self.logger.info( - "contrail-dns %s connected to collector-service %s" % - (ip, collector_nodes)) - result = result and True - else: - self.logger.warn( - "contrail-dns %s not connected to any collector-service" % (ip)) - return False - self.logger.info( - "Verifying that collectors belongs to this test bed") - collector_ips = [] - for t in collector_nodes: - collector_ip = t[0][0] - collector_ips.append(collector_ip) - collector_ips.sort() - self.inputs.collector_control_ips.sort() - if (set(collector_ips).issubset(self.inputs.collector_control_ips)): - self.logger.info( - "contrail-dns %s is connected to proper collectors %s" % - (ip, collector_ips)) - result = result and True - else: - self.logger.warn( - "contrail-dns %s is not connected to proper collectors %s" % - (ip, collector_ips)) - self.logger.info("Proper collectors should be %s" % - (self.inputs.collector_ips)) - result = result and False - return result - - def verify_control_nodes_connected_to_collector_service(self, ds_ip=None): - '''Verifies that dns agents connected to collector service''' - - result = True - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - for ip in self.inputs.bgp_control_ips: - collector_nodes = [] - try: - lst_service_id = self.get_subscribed_service_id( - ds_ip, client=(ip, 'contrail-control'), service='Collector') - for id in lst_service_id: - node = self.get_service_endpoint_by_service_id( - ds_ip, service_id=id) - collector_nodes.append(node) - except Exception as e: - print e - if collector_nodes: - self.logger.info( - "contrail-control %s connected to collector-service %s" % - (ip, collector_nodes)) - result = result and True - else: - self.logger.warn( - "contrail-control %s not connected to any collector-servicet" % (ip)) - return False - self.logger.info( - "Verifying that collectors belongs to this test bed") - collector_ips = [] - for t in collector_nodes: - collector_ip = t[0][0] - collector_ips.append(collector_ip) - collector_ips.sort() - self.inputs.collector_control_ips.sort() - if (set(collector_ips).issubset(self.inputs.collector_control_ips)): - self.logger.info( - "contrail-control %s is connected to proper collectors %s" % - (ip, collector_ips)) - result = result and True - else: - self.logger.warn( - "contrail-control %s is not connected to proper collectors %s" % (ip, collector_ips)) - self.logger.info("Proper collectors should be %s" % - (self.inputs.collector_ips)) - result = result and False - return result - - def verify_control_nodes_subscribed_to_ifmap_service(self, ds_ip=None): - '''Verifies that control nodes subscribed to ifmap service''' - - result = True - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - for host in self.inputs.bgp_names: - host_ip = self.inputs.host_data[host]['host_ip'] - control_ip = self.inputs.host_data[host]['host_control_ip'] - subscribed_ifmap_nodes_from_discovery = [] - subscribed_ifmap_nodes_from_cn_introspect = [] - try: - lst_service_id = self.get_subscribed_service_id( - ds_ip, client=(control_ip, 'contrail-control'), service='IfmapServer') - for id in lst_service_id: -# uid = (id,'IfmapServer') - endpoint = self.get_service_endpoint_by_service_id( - ds_ip, service_id=id) - node = endpoint - subscribed_ifmap_nodes_from_discovery.append(node) - l = self.cn_inspect[host_ip].get_if_map_peer_server_info( - match='ds_peer_info') - for elem in subscribed_ifmap_nodes_from_discovery: - result1 = True - for elem1 in l['IFMapDSPeerInfo']['ds_peer_list']: - if (elem[0][0] == elem1['host'] and elem[0][1] == elem1['port']): - self.logger.info( - "contrail-control %s connected to ifmapservice %s" % (control_ip, elem1)) - result = result and True - result1 = True - break - else: - result1 = False - continue - if not result1: - self.logger.warn( - "contrail-control %s not connected to any ifmapservice" % (control_ip)) - result = result and False - except Exception as e: - result = result and False - self.logger.warn("Got exception as %s" % e) - return result - - def verify_dns_agent_subscribed_to_ifmap_service(self, ds_ip=None): - '''Verifies that dns agent subscribed to ifmap service''' - - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - result = True - result1 = True - for ip in self.inputs.bgp_control_ips: - subscribed_ifmap_nodes_from_discovery = [] - subscribed_ifmap_nodes_from_cn_introspect = [] - try: - lst_service_id = self.get_subscribed_service_id( - ds_ip, client=(ip, 'contrail-dns'), service='IfmapServer') - for id in lst_service_id: - node = self.get_service_endpoint_by_service_id( - ds_ip, service_id=id) - subscribed_ifmap_nodes_from_discovery.append(node) - for elem in subscribed_ifmap_nodes_from_discovery: - # if (self.inputs.cfgm_control_ip in elem[0][0]): - if (elem[0][0] in self.inputs.cfgm_control_ips): - self.logger.info( - "Dns agent %s connected to ifmapservice %s" % - (ip, subscribed_ifmap_nodes_from_discovery)) - result = result and True - result1 = True - break - else: - result1 = False - if not result1: - self.logger.warn( - "Dns agent %s not connected to any ifmapservice" % (ip)) - result = result and False - - except Exception as e: - self.logger.warn("Got exception as %s" % (e)) - result = result and False - return result - - def verify_ApiServer_subscribed_to_collector_service(self, ds_ip=None): - '''Verifies that ApiServer subscribed to collector service''' - - result = True - if not ds_ip: - ds_ip = self.inputs.cfgm_ip - ip = self.inputs.cfgm_control_ip - collector_nodes = [] - try: - lst_service_id = self.get_subscribed_service_id( - ds_ip, client=(ip, 'contrail-api'), service='Collector') - for id in lst_service_id: - node = self.get_service_endpoint_by_service_id( - ds_ip, service_id=id) - collector_nodes.append(node) - except Exception as e: - print e - if collector_nodes: - self.logger.info("contrail-api %s connected to collector-service %s" % - (ip, collector_nodes)) - result = result and True - else: - self.logger.warn( - "contrail-api %s not connected to any collector-servicet" % (ip)) - return False - self.logger.info("Verifying that collectors belongs to this test bed") - collector_ips = [] - for t in collector_nodes: - collector_ip = t[0][0] - collector_ips.append(collector_ip) - collector_ips.sort() - self.inputs.collector_control_ips.sort() - if (set(collector_ips).issubset(self.inputs.collector_control_ips)): - self.logger.info( - "contrail-api %s is connected to proper collectors %s" % - (ip, collector_ips)) - result = result and True - else: - self.logger.warn( - "contrail-api %s is not connected to proper collectors %s" % - (ip, collector_ips)) - self.logger.info("Proper collectors should be %s" % - (self.inputs.collector_ips)) - result = result and False - return result - - - def verify_daemon_subscribed_to_discovery_service(self, daemon_name, disc_svc_name): - '''Verifies that daemon is subscribed to discovery service''' - - result = True - dct = [] - for ip in self.inputs.cfgm_ips: - try: - dct = self.get_all_client_dict_by_service_subscribed_to_a_service( - ip, daemon_name, disc_svc_name) - if not dct: - self.logger.error( - "No %s connected to %s as per discovery %s" % (daemon_name, disc_svc_name, ip)) - result = result and False - else: - for elem in dct: - svc_id = elem['service_id'] - node = self.get_service_endpoint_by_service_id( - ip, service_id=svc_id) - self.logger.info( - "%s is connected to %s as per discovery %s" % - (elem['client_id'], node, ip)) - result = result and True - except Exception as e: - self.logger.warn( - "Got exception in verify_daemon_subscribed_to_discovery_service (%s, %s, %s) as %s" % (daemon_name, disc_svc_name, ip, e)) - return result - - def verify_Schema_subscribed_to_collector_service(self): - '''Verifies that Schema subscribed to collector service''' - - return self.verify_daemon_subscribed_to_discovery_service('contrail-schema', 'Collector') - - def verify_ServiceMonitor_subscribed_to_collector_service(self): - '''Verifies that ServiceMonitor subscribed to collector service''' - - return self.verify_daemon_subscribed_to_discovery_service('contrail-svc-monitor', 'Collector') - - def verify_webui_subscribed_to_opserver_service(self): - '''Verifies that WebUI subscribed to OpServer service''' - - return self.verify_daemon_subscribed_to_discovery_service('ContrailWebUI', 'OpServer') - - def verify_webui_subscribed_to_apiserver_service(self): - '''Verifies that WebUI subscribed to ApiServer service''' - - return self.verify_daemon_subscribed_to_discovery_service('ContrailWebUI', 'ApiServer') - - def cross_verification_objects_in_all_discovery(self): - - result = True - svc_obj_lst = [] - obj = {} - service_list = ['OpServer', 'dns-server', 'IfmapServer', - 'ApiServer', 'xmpp-server', 'Collector'] - for svc in service_list: - for ip in self.inputs.cfgm_ips: - client_obj_lst = [] - dct = self.get_all_services_by_service_name(ip, service=svc) - svc_obj_lst.append(dct) - obj[ip] = dct - try: - assert self.dict_match(obj) - except Exception as e: - result = result and False - return result - - def get_zookeeper_status(self, ip=None): - - zoo_keeper_status = {} - ips = [] - if not ip: - ips = self.inputs.cfgm_ips[:] - else: - ips = [ip] - for ds_ip in ips: - command = self.inputs.run_cmd_on_server( - ds_ip, '/usr/lib/zookeeper/bin/zkServer.sh status', password='c0ntrail123') - status = command.split(":")[-1] - zoo_keeper_status[ds_ip] = status - return zoo_keeper_status - - def modify_conf_file(self, service, section, option, value, username, password): - cmd_set = 'openstack-config --set ' - conf_file = '/etc/contrail/' + service + '.conf ' - cmd = cmd_set + conf_file + section + ' ' + option + ' ' + str(value) - for ip in self.inputs.cfgm_ips: - self.inputs.run_cmd_on_server(ip, cmd, username, password) - # end modify_conf_file - - def change_ttl_short_and_hc_max_miss(self, ttl_short=2, hc_max_miss=3000): - # Changing the hc_max_miss=3000 and verifying that the services are - # down after 25 mins - username = self.inputs.host_data[self.inputs.cfgm_ip]['username'] - password = self.inputs.host_data[self.inputs.cfgm_ip]['password'] - self.modify_conf_file('contrail-discovery', 'DEFAULTS', 'hc_max_miss', hc_max_miss, username, password) - self.modify_conf_file('contrail-discovery', 'DEFAULTS', 'ttl_short', ttl_short, username, password) - conf_file = '/etc/contrail/contrail-discovery.conf ' - cmd = 'cat ' + conf_file - for ip in self.inputs.cfgm_ips: - out_put = self.inputs.run_cmd_on_server(ip, cmd, username, password) - self.logger.info("%s" % (out_put)) - self.inputs.restart_service('contrail-discovery', [ip]) - time.sleep(10) - # end change_ttl_short_and_hc_max_miss diff --git a/tcutils/config/discovery_util.py b/tcutils/config/discovery_util.py deleted file mode 100644 index 356540184..000000000 --- a/tcutils/config/discovery_util.py +++ /dev/null @@ -1,144 +0,0 @@ -# -# OpServer Utils -# -# Utility functions for Operational State Server for VNC -# -# Created by Sandip Dey on 24/09/2013 -# -# Copyright (c) 2013, Contrail Systems, Inc. All rights reserved. -# - -import datetime -import time -import requests -import pkg_resources -import xmltodict -import json -import gevent -import logging as LOG - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.INFO) - -try: - from pysandesh.gen_py.sandesh.ttypes import SandeshType -except: - class SandeshType(object): - SYSTEM = 1 - TRACE = 4 - - -def enum(**enums): - return type('Enum', (), enums) -# end enum - - -class DiscoveryServerUtils(object): - - TIME_FORMAT_STR = '%Y %b %d %H:%M:%S.%f' - DEFAULT_TIME_DELTA = 10 * 60 * 1000000 # 10 minutes in microseconds - USECS_IN_SEC = 1000 * 1000 - OBJECT_ID = 'ObjectId' - -# POST_HEADERS = {'Content-type': 'application/json; charset="UTF-8"', 'Expect':'202-accepted'} - POST_HEADERS = {'Content-type': 'application/json'} - - @staticmethod - def post_url_http(url, params): - try: - if int(pkg_resources.get_distribution("requests").version[0]) == 1: - response = requests.post(url, stream=True, - data=params, - headers=DiscoveryServerUtils.POST_HEADERS) - else: - response = requests.post(url, - data=params, - headers=DiscoveryServerUtils.POST_HEADERS) - except requests.exceptions.ConnectionError, e: - print "Connection to %s failed" % url - return None - print 'response: %s' % (response) - if response.status_code == 200: - return response.text - else: - print "HTTP error code: %d" % response.status_code - return None - # end post_url_http - - @staticmethod - def put_url_http(url, params): - try: - if int(pkg_resources.get_distribution("requests").version[0]) == 1: - response = requests.put(url, stream=True, - data=params, - headers=DiscoveryServerUtils.POST_HEADERS) - else: - response = requests.put(url, - data=params, - headers=DiscoveryServerUtils.POST_HEADERS) - except requests.exceptions.ConnectionError, e: - LOG.error("Connection to %s failed", url) - return None - LOG.info("response: %s" % response) - if response.status_code == 200: - return response.text - else: - LOG.error("HTTP error code: %d" % response.status_code) - return None - # end put_url_http - - @staticmethod - def get_url_http(url): - data = None - try: - if int(pkg_resources.get_distribution("requests").version[0]) == 1: - data = requests.get(url, stream=True) - else: - data = requests.get(url) - except requests.exceptions.ConnectionError, e: - print "Connection to %s failed" % url - if data.status_code == 200: - return data.text - else: - print "HTTP error code: %d" % response.status_code - return None - - # end get_url_http - - @staticmethod - def discovery_url(ip, port): - return "http://" + ip + ":" + port - # end discovery_url - - @staticmethod - def discovery_publish_service_url(discovery_ip, discovery_port): - return "http://" + discovery_ip + ":" + discovery_port + "/publish" - # end discovery_query_url - - @staticmethod - def discovery_subscribe_service_url(discovery_ip, discovery_port): - return "http://" + discovery_ip + ":" + discovery_port + "/subscribe" - # end discovery_query_url - - @staticmethod - def discovery_cleanup_service_url(discovery_ip, discovery_port): - return "http://" + discovery_ip + ":" + discovery_port + "/cleanup" - # end discovery_query_url - - @staticmethod - def messages_xml_data_to_dict(messages_dict, msg_type): - if msg_type in messages_dict: - # convert xml value to dict - try: - messages_dict[msg_type] = xmltodict.parse( - messages_dict[msg_type]) - except: - pass - # end messages_xml_data_to_dict - - @staticmethod - def messages_data_dict_to_str(messages_dict, message_type, sandesh_type): - data_dict = messages_dict[message_type] - return DiscoveryServerUtils._data_dict_to_str(data_dict, sandesh_type) - # end messages_data_dict_to_str - -# end class DiscoveryServerUtils diff --git a/tcutils/config/ds_introspect_utils.py b/tcutils/config/ds_introspect_utils.py deleted file mode 100755 index 6f40f88fd..000000000 --- a/tcutils/config/ds_introspect_utils.py +++ /dev/null @@ -1,137 +0,0 @@ -import sys -import urllib2 -import xmltodict -import json -import requests -import socket -from lxml import etree -from tcutils.verification_util import * -from ds_results import * -from discovery_util import DiscoveryServerUtils - - -class VerificationDsSrv (VerificationUtilBase): - - def __init__(self, ip, port=5998, logger=LOG): - super(VerificationDsSrv, self).__init__(ip, port, logger=logger) - - def get_ds_services(self): - '''http://10.204.216.7:5998/services''' - res = None - try: - services_dict = self.dict_get('services.json') - res = DsServicesResult(services_dict) - except Exception as e: - print e - finally: - return res - - def get_ds_clients(self): - '''http://10.204.216.7:5998/clients''' - res = None - try: - #import pdb; pdb.set_trace() - clients_dict = self.dict_get('clients.json') - res = DsClientsResult(clients_dict) - except Exception as e: - print e - finally: - return res - - def get_ds_stats(self): - '''http://10.204.216.7:5998/stats''' - res = None - try: - #import pdb; pdb.set_trace() - stats_dict = self.dict_get('stats') - res = DsStatsResult(stats_dict) - except Exception as e: - print e - finally: - return res - - def get_ds_config(self): - '''http://10.204.216.7:5998/config''' - res = None - try: - config_dict = self.dict_get('config') - res = DsConfigResult(config_dict) - except Exception as e: - print e - finally: - return res - - def publish_service(self, service='foo', ip=None, port=None, admin_state=None): - '''Used to publish service from test { "control-node": {"ip_addr": "192.168.2.0", "port":1682 }}''' - resp = None - try: - service_url = DiscoveryServerUtils.discovery_publish_service_url( - self._ip, str(self._port)) - print 'url: %s' % service_url - if not admin_state: - json_body = '{' + '"' + service + '"' + \ - ': {' + '"ip-address":' + '"' + \ - ip + '"' + ',"port":' + str(port) + '}}' - else: - json_body = '{' + '"' + service + '"' + \ - ': {' + '"ip-address":' + '"' + \ - ip + '"' + ',"port":' + str(port) + \ - '}, "service-type":' + '"' + service + \ - '",' + '"admin-state":' + '"' + \ - admin_state + '"' + '}' - print 'json_body: %s' % json_body - resp = DiscoveryServerUtils.post_url_http(service_url, json_body) - if resp: - resp = json.loads(resp) - except Exception as e: - print str(e) - finally: - print 'resp: %s' % (resp) - return resp - - def subscribe_service(self, service='foo', instances=None, client_id=None): - '''POST http://discovery-server-ip:5998/subscribe - Content-Type: application/json or application/xml - Body: Service type, instance count, client ID - JSON: { "service": "control-node", "instances": 1, "client": "6c3f48bf-1098-46e8-8117-5cc745b45983"} - XML: 1UUID - - Response: TTL, List of - JSON: {"Apiservice": [{"ip_addr": "10.84.13.34", "port": "8082"}], "ttl": 357} - XML: 300192.168.2.01682''' - resp = None - try: - service_url = DiscoveryServerUtils.discovery_subscribe_service_url( - self._ip, str(self._port)) - print 'url: %s' % service_url - json_body = '{' + '"service": ' + '"' + service + '"' + \ - ', "instances": ' + \ - str(instances) + ', ' + '"client": "' + client_id + '"}' - print 'json_body: %s' % json_body - resp = DiscoveryServerUtils.post_url_http(service_url, json_body) - if resp: - resp = json.loads(resp) - except Exception as e: - print str(e) - finally: - print 'resp: %s' % (resp) - return resp - - def cleanup_service(self): - '''GET http://discovery-server-ip:5998/cleanup''' - resp = None - try: - service_url = DiscoveryServerUtils.discovery_cleanup_service_url( - self._ip, str(self._port)) - print 'url: %s' % service_url - resp = DiscoveryServerUtils.get_url_http(service_url) - if resp: - resp = json.loads(resp) - except Exception as e: - print str(e) - finally: - print 'resp: %s' % (resp) - return resp - -if __name__ == '__main__': - vns = VerificationDsSrv('127.0.0.1') diff --git a/tcutils/config/ds_results.py b/tcutils/config/ds_results.py deleted file mode 100644 index 7f5e0b7ad..000000000 --- a/tcutils/config/ds_results.py +++ /dev/null @@ -1,128 +0,0 @@ -import re -from tcutils.verification_util import * -from collections import defaultdict - - -def _dsResultGet(dct, p1, p2, match=None): - ret = None - try: -# if p2: -# res = dct.xpath(p1,p2) -# else: - res = dct.xpath(p1) - ret1 = res - if match: - ret2 = [] - if isinstance(ret1, list): - for elem in ret1: - if isinstance(elem, dict): - for k, v in elem.items(): - if isinstance(match, tuple): - if ((match[0] == k)and (match[1] == v)): - ret2.append(elem) - break - elif (isinstance(v, dict)): - if (match[0] in v.keys() and (match[1] in v.values()or (int(match[1]) in v.values()))): - ret2.append(elem) - break - else: - if(match in v): - ret2.append(elem) - break - elif (isinstance(v, dict)): - if(match in v.values()or int(match) in v.values()): - ret2.append(elem) - break - else: - if (match == elem): - ret2.append(elem) - else: - for k, v in ret1.items(): - if isinstance(match, tuple): - if (match[0] == k and match[1] == v): - ret2.append(ret1) - else: - if(match == v): - ret2.append(ret1) - ret = ret2 - else: - ret = ret1 - - except Exception as e: - print e - finally: - return ret - - -class DsServicesResult (Result): - - ''' - This class returns a generator flat results - ''' - - def get_attr(self, tier, attr=None, match=None): - if tier == "Service": - typ = 'services' - else: - raise Exception("Invalid Arguments - bad tier") - return _dsResultGet(self, typ, attr, match) - - @property - def info(self): - services = {'xmpp-server': 'control-node', - 'OpServer': 'analytics', - 'ApiServer': 'config'} - service_dict = defaultdict(list) - for service in self.xpath('services'): - svc_type = services.get(service['service_type'], None) - if not svc_type: - continue - service_dict[svc_type].append(service['info']['ip-address']) - return service_dict - -class DsClientsResult (Result): - - ''' - This class returns a vrouter flat results - ''' - - def get_attr(self, tier, attr=None, match=None): - if tier == "Clients": - typ = 'services' - else: - raise Exception("Invalid Arguments - bad tier") - return _dsResultGet(self, typ, attr, match) - - -class DsStatsResult (Result): - - ''' - This class returns a BGP-ROUTER UVE object - ''' - - def get_attr(self, tier, attr, match=None): - if tier == "Control": - typ = 'BgpRouterState' - # elif tier == "Agent": - # typ = 'VrouterAgent' - else: - raise Exception("Invalid Arguments - bad tier") - return _dsResultGet(self, typ, attr, match) - - -class DsConfigResult (Result): - - ''' - This class returns a VN UVE object - ''' - - def get_attr(self, tier, attr, match=None): - #import pdb; pdb.set_trace () - if tier == "Config": - typ = 'UveVirtualNetworkConfig' - elif tier == "Agent": - typ = 'UveVirtualNetworkAgent' - else: - raise Exception("Invalid Arguments - bad tier") - - return _dsResultGet(self, typ, attr, match) diff --git a/tcutils/config/svc_mon_introspect_utils.py b/tcutils/config/svc_mon_introspect_utils.py deleted file mode 100755 index 0e22f6664..000000000 --- a/tcutils/config/svc_mon_introspect_utils.py +++ /dev/null @@ -1,45 +0,0 @@ -import logging as LOG - -from tcutils.verification_util import * -from svc_mon_results import * - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - - -class SvcMonInspect(VerificationUtilBase): - - def __init__(self, ip, logger=LOG, args=None): - super(SvcMonInspect, self).__init__( - ip, 8088, XmlDrv, logger=logger, args=args) - self._cache = { - 'si': {}, - } - - def update_cache(self, otype, fq_path, d): - self._cache[otype]['::'.join(fq_path)] = d - - def try_cache(self, otype, fq_path, refresh): - p = None - try: - if not (refresh or self.get_force_refresh()): - p = self._cache[otype]['::'.join(fq_path)] - except KeyError: - pass - return p - - def get_service_instance(self, name, refresh=False): - ''' - method: get_service_instance find a service instance by name - returns CsSvcInstance object, None if not found - - ''' - obj = self.try_cache('si', [name], refresh) - if not obj: - # cache miss - xml_data = self.dict_get('Snh_ServiceInstanceList?si_name=%s'%name) - instances = xml_data.xpath('./si_names/list/ServiceInstance') - if instances: - obj = CsSvcInstance(instances[0]) - self.update_cache('si', [name], obj) - return obj - diff --git a/tcutils/config/svc_mon_results.py b/tcutils/config/svc_mon_results.py deleted file mode 100644 index bfbb95d14..000000000 --- a/tcutils/config/svc_mon_results.py +++ /dev/null @@ -1,25 +0,0 @@ -import re -from tcutils.verification_util import * - -class CsSvcInstance (Result): - - def fqname(self): - return self.xpath('name') - - def get_vrouter_name(self, ha_state): - vrouter = 'none' - for vm in self.xpath('vm_list', 'list') or []: - if ha_state in vm['ha']: - vrouter = vm['vr_name'] - break - return None if vrouter.lower() == 'none' else vrouter - - def active_vrouter(self): - return self.get_vrouter_name('active') - - def standby_vrouter(self): - return self.get_vrouter_name('standby') - - def is_launched(self): - return self.xpath('si_state') == 'active' - diff --git a/tcutils/config/vcenter_verification.py b/tcutils/config/vcenter_verification.py deleted file mode 100644 index 2035752db..000000000 --- a/tcutils/config/vcenter_verification.py +++ /dev/null @@ -1,69 +0,0 @@ -from tcutils.util import retry -from tcutils.config import vmware_introspect_utils - -class VMWareVerificationLib: - '''Clas to hold verification helper functions for vcenter plugin introspect''' - def __init__(self,inputs): - self.inputs = inputs - self.vcntr_introspect = None - self.logger = self.inputs.logger - - def get_introspect(self): - try: - for ip in self.inputs.cfgm_ips: - vc_inspect = vmware_introspect_utils.\ - get_vcenter_plugin_introspect_elements(\ - vmware_introspect_utils.VMWareInspect(ip)) - if (vc_inspect['master'][0] == 'true'): - self.vcntr_introspect = vmware_introspect_utils.VMWareInspect(ip) - break - except Exception as e: - self.logger.exception(e) - - @retry(delay=10, tries=10) - def verify_vm_in_vcenter(self, vrouter_ip,vm_name, *args): - - #everytime verify_vm_in_vcenter should be called with introspect refreshed - self.get_introspect() - vrouter_details = vmware_introspect_utils.get_vrouter_details(self.vcntr_introspect, vrouter_ip) - for virtual_machine in vrouter_details.virtual_machines: - if virtual_machine.name == vm_name: - self.logger.info("Vcenter plugin verification:%s launched in vorouter %s in virtual network %s"\ - %(vm_name,vrouter_ip,virtual_machine.virtual_network)) - return True - self.logger.error("Vcenter plugin verification:%s NOT launched in vorouter %s "\ - %(vm_name,vrouter_ip)) - return False - - @retry(delay=10, tries=10) - def verify_vm_not_in_vcenter(self, vrouter_ip,vm_name, *args): - #everytime verify_vm_in_vcenter should be called with introspect refreshe - self.get_introspect() - vrouter_details = vmware_introspect_utils.get_vrouter_details(self.vcntr_introspect, vrouter_ip) - try: - for virtual_machine in vrouter_details.virtual_machines: - if virtual_machine.name == vm_name: - self.logger.error("Vcenter plugin verification:%s STILL in vorouter %s in virtual network %s"\ - %(vm_name,vrouter_ip,virtual_machine.virtual_network)) - return False - except Exception as e: - self.logger.info("Vcenter plugin verification:%s deleted in vorouter %s "\ - %(vm_name,vrouter_ip)) - return True - - self.logger.info("Vcenter plugin verification:%s deleted in vorouter %s "\ - %(vm_name,vrouter_ip)) - return True - -if __name__ == '__main__': - va = vmware_introspect_utils.VMWareInspect('10.204.216.14') - class Inputs: - def __init__(self): - self.cfgm_ips = ['10.204.216.7','10.204.216.14','10.204.216.15'] - r = vmware_introspect_utils.vrouter_details(va,'10.204.217.27') - import pprint - pprint.pprint(r) - inputs = Inputs() - vcenter = VMWareVerificationLib(inputs) - vcenter.verify_vm_in_vcenter('10.204.217.27','test_vm2') - diff --git a/tcutils/config/vmware_introspect_utils.py b/tcutils/config/vmware_introspect_utils.py deleted file mode 100755 index 477d0451a..000000000 --- a/tcutils/config/vmware_introspect_utils.py +++ /dev/null @@ -1,231 +0,0 @@ -import logging as LOG - -from tcutils.verification_util import * -from vnc_api_results import * -from tcutils.util import retry - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - -def elem2dict(node, alist=False): - d = list() if alist else dict() - for e in node.iterchildren(): - #key = e.tag.split('}')[1] if '}' in e.tag else e.tag - if e.tag == 'list': - value = elem2dict(e, alist=True) - else: - value = e.text if e.text else elem2dict(e) - if type(d) == type(list()): - d.append(value) - else: - d[e.tag] = value - return d - -def get_vcenter_plugin_introspect_elements(vcenterclient): - - vcenterplugin = {} - inspect = vcenterclient.get_vcenter_plugin_struct() - children = inspect[0].getchildren() - for child in children: - if (child.tag == 'master'): - master = Master(child) - vcenterplugin['master'] = master.master - elif (child.tag == 'pluginSessions'): - session = PluginSessions(child) - vcenterplugin['pluginSessions'] = session.pluginsessions - elif (child.tag == 'vRouterStats'): - vrouter = VRouterStats(child) - vcenterplugin['vRouterStats'] = vrouter.vrouterstats - elif (child.tag == 'ApiServerInfo'): - api_info = ApiServerInfo(child) - vcenterplugin['ApiServerInfo'] = api_info.apiserverinfo - elif (child.tag == 'VCenterServerInfo'): - vcntr_info = VCenterServerInfo(child) - vcenterplugin['VCenterServerInfo'] = vcntr_info.vcenterserverinfo - else: - print 'Invalid element' - continue - return vcenterplugin - -def get_esxi_to_vrouter_mapping(vcenterclient,query_value): - vrouter = [] - inspect = vcenterclient.get_vcenter_plugin_vrouter_up(query_value) - try: - for elem in inspect[0].getchildren(): - vrouter.append(elem2dict(elem)) - except Exception as e: - LOG.exception(e) - finally: - return vrouter - -def get_vrouter_details(vcenterclient,query_value): - try: - inspect = vcenterclient.get_vcenter_plugin_vrouter_details(query_value) - return VRouterDetails(elem2dict(inspect[0])) - except Exception as e: - LOG.exception(e) - return None - -class VRouterDetails(Result): - '''Vrouter details objects''' - def __init__(self,d={}): - super(VRouterDetails, self).__init__(d) - self.virtual_machines = [] - self.state = self['state'] - self.esxiHost = self['EsxiHost'] - self.ip = self['ipAddr'] - self.virtual_networks = [VirtualNetworks(vn['name']) for vn in self['VirtualNetworks']['list']] - for element in self['VirtualNetworks']['list']: - net = element['name'] - for vm in element['VirtualMachineInterfaces']['list']: - self.virtual_machines.append(VirtualMachinesInVcenter(net,vm)) - -class VirtualNetworks(): - '''Represents one virtual network in the vcenter''' - def __init__(self,vn): - self.name = vn - -class VirtualMachinesInVcenter(): - '''Represents one vm in the vcenter introspect page''' - def __init__(self,vn,vm): - self.vm = vm - self.virtual_network = vn - self.macAddr = self.vm['macAddress'] - self.powerState = self.vm['poweredOn'] - self.name = self.vm['virtualMachine'] - self.ip_addr = self.vm['ipAddress'] - -class Master(): - '''Represent vcenter plugin master''' - def __init__(self,element): - self.return_list = [] - d ={} - d[element.tag] = element.text - self.return_list.append(VMWarePluginResult(d)) - - @property - def master(self): - return [ele.master() for ele in self.return_list] - -class PluginSessions(): - '''Represent vcenter plugin pluginsessions''' - def __init__(self,element): - self.return_list = [] - d ={} - d[element.tag] = element.text - self.return_list.append(VMWarePluginResult(d)) - - @property - def pluginsessions(self): - return [ele.pluginsessions() for ele in self.return_list] - -class VRouterStats(): - '''Represent vcenter plugin vRouterStats''' - def __init__(self,element): - self.return_list = [] - vstats ={} - stats = element.getchildren() - for stat in stats: - ele = stat.getchildren() - for ele1 in ele: - d = {} - d[ele1.tag] = ele1.text - vstats.update(d) - vrouterStats={} - vrouterStats['vRouterStats'] = vstats - self.return_list.append(VMWarePluginResult(vrouterStats)) - - @property - def vrouterstats(self): - return [ele.vrouterstats() for ele in self.return_list] - -class ApiServerInfo(): - '''Represent vcenter plugin ApiServerInfo''' - def __init__(self,element): - self.return_list = [] - cfgm_info ={} - stats = element.getchildren() - for stat in stats: - ele = stat.getchildren() - for ele1 in ele: - d = {} - d[ele1.tag] = ele1.text - cfgm_info.update(d) - api_info={} - api_info['ApiServerInfo'] = cfgm_info - self.return_list.append(VMWarePluginResult(api_info)) - - @property - def apiserverinfo(self): - return [ele.apiserverinfo() for ele in self.return_list] - -class VCenterServerInfo(): - '''Represent vcenter plugin VCenterServerInfo''' - def __init__(self,element): - self.return_list = [] - cfgm_info ={} - stats = element.getchildren() - for stat in stats: - ele = stat.getchildren() - for ele1 in ele: - d = {} - d[ele1.tag] = ele1.text - cfgm_info.update(d) - api_info={} - api_info['VCenterServerInfo'] = cfgm_info - self.return_list.append(VMWarePluginResult(api_info)) - - @property - def vcenterserverinfo(self): - return [ele.vcenterserverinfo() for ele in self.return_list] - -class VMWareInspect (VerificationUtilBase): - - def __init__(self, ip, logger=LOG, args=None): - super(VMWareInspect, self).__init__( - ip, 8234,XmlDrv, logger=logger, args=args) - self.ip = ip - - def get_vcenter_plugin_struct(self): - doms = self.dict_get('Snh_VCenterPluginInfo') - plugin_structs = doms.xpath('./VCenterPlugin/VCenterPluginStruct') - return plugin_structs - - def get_vcenter_plugin_vrouter_up(self,query_val,*args): - path = 'Snh_vRoutersTotal?x=%s' %query_val - val = self.dict_get(path) - return val.xpath('./VirtualRouters/list') - - def get_vcenter_plugin_vrouter_details(self,query_val,*args): - path = 'Snh_vRouterDetail?x=%s' %query_val - val = self.dict_get(path) - return val.xpath('./VRouterInfo/VRouterInfoStruct') - -class VMWarePluginResult(Result): - ''' - Returns value from the below link - http://:8777/Snh_VCenterPluginInfo - ''' - - def master(self): - return self['master'] - - def pluginsessions(self): - return self['pluginSessions'] - - def vrouterstats(self): - return self['vRouterStats'] - - def apiserverinfo(self): - return self['ApiServerInfo'] - - def vcenterserverinfo(self): - return self['VCenterServerInfo'] - -if __name__ == '__main__': - va = VMWareInspect('10.204.216.61') - class Inputs: - def __init__(self): - self.cfgm_ips = ['10.204.216.61','10.204.216.62','10.204.216.63'] - r = get_vrouter_details(va,'10.204.216.183') - import pprint - pprint.pprint(r) diff --git a/tcutils/config/vnc_api_results.py b/tcutils/config/vnc_api_results.py deleted file mode 100644 index d2fd03fb2..000000000 --- a/tcutils/config/vnc_api_results.py +++ /dev/null @@ -1,787 +0,0 @@ -import re -from tcutils.verification_util import * - -class CsDomainResult (Result): - - ''' - CsDomainResult to provide access to vnc_introspect_utils.get_cs_domain - dict contrains: - - {u'domain': { - u'fq_name': [u'ted-domain'], - u'id_perms': {u'created': None, - u'enable': True, - u'last_modified': None, - u'permissions': {u'group': u'cloud-admin-group', - u'group_access': 7, - u'other_access': 7, - u'owner': u'cloud-admin', - u'owner_access': 7}, - u'uuid': {u'uuid_lslong': 13068984139654137108L, - u'uuid_mslong': 9504116366942620127L}}, - u'namespaces': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/namespace/c0552b1f-588e-4507-8962-b1837c8f883a', - u'to': [u'ted-domain', u'default-namespace'], - u'uuid': u'c0552b1f-588e-4507-8962-b1837c8f883a'}], - u'projects': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/project/0d779509-7d54-4842-9b34-f85557898b67', - u'to': [u'ted-domain', u'ted-eng'], - u'uuid': u'0d779509-7d54-4842-9b34-f85557898b67'}, - {u'attr': {}, - u'href': u'http://10.84.7.4:8082/project/1fcf3244-d4d9-407d-8637-54bb2522020e', - u'to': [u'ted-domain', u'default-project'], - u'uuid': u'1fcf3244-d4d9-407d-8637-54bb2522020e'}], - u'_type': u'domain', - u'href': u'http://10.84.7.4:8082/domain/83e5677b-1397-49df-b55e-5bd5234c8514', - u'name': u'ted-domain', - u'uuid': u'83e5677b-1397-49df-b55e-5bd5234c8514'}} - - ''' - - def fq_name(self): - return ':'.join(self.xpath('domain', 'fq_name')) - - def name(self): - return self.xpath('domain', 'name') - - def uuid(self): - return self.xpath('domain', 'uuid') - - def project_list(self): - return map(lambda x: ':'.join(x['to']), - self.xpath('domain', 'projects')) - - def project(self, name): - return filter(lambda x: x['to'] == [self.name(), name], - self.xpath('domain', 'projects')) - - def st_list(self): - return self.xpath('domain', 'service_templates') - - def st(self, st): - return filter(lambda x: x['to'][-1] == st, self.st_list()) - - def vdns_list(self): - return self.xpath('domain', 'virtual_DNSs') - - def vdns(self, vdns_name): - vdns_li = self.vdns_list() - if vdns_li: - return filter(lambda x: x['to'][-1] == vdns_name, vdns_li) - - -class CsProjectResult (Result): - - ''' - CsDomainResult to provide access to vnc_introspect_utils.get_cs_project - dict contrains: - - {u'project': {u'fq_name': [u'ted-domain', u'ted-eng'], - u'id_perms': {u'created': None, - u'enable': True, - u'last_modified': None, - u'permissions': {u'group': u'cloud-admin-group', - u'group_access': 7, - u'other_access': 7, - u'owner': u'cloud-admin', - u'owner_access': 7}, - u'uuid': {u'uuid_lslong': 11183836820092324711L, - u'uuid_mslong': 970408112711551042}}, - u'network_ipams': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/network-ipam/52310151-ec68-4052-9114-14ae1a47f2fb', - u'to': [u'ted-domain', - u'ted-eng', - u'default-network-ipam'], - u'uuid': u'52310151-ec68-4052-9114-14ae1a47f2fb'}], - u'network_policys': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/network-policy/c30461ae-e72a-44a6-845b-7510c7ae3897', - u'to': [u'ted-domain', - u'ted-eng', - u'default-network-policy'], - u'uuid': u'c30461ae-e72a-44a6-845b-7510c7ae3897'}], - u'security_groups': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/security-group/32dc02af-1b3c-4baa-a6eb-3c97cbdd2941', - u'to': [u'ted-domain', - u'ted-eng', - u'default-security-group'], - u'uuid': u'32dc02af-1b3c-4baa-a6eb-3c97cbdd2941'}], - u'service_templates': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/service-template/4264dd1e-d312-4e03-a60e-35b40da39e95', - u'to': [u'ted-domain', - u'ted-eng', - u'default-service-template'], - u'uuid': u'4264dd1e-d312-4e03-a60e-35b40da39e95'}], - u'_type': u'project', - u'virtual_networks': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/virtual-network/6a5c5c29-cfe6-4fea-9768-b0dea3b217bc', - u'to': [u'ted-domain', - u'ted-eng', - u'ted-back'], - u'uuid': u'6a5c5c29-cfe6-4fea-9768-b0dea3b217bc'}, - {u'attr': {}, - u'href': u'http://10.84.7.4:8082/virtual-network/926c8dcc-0b8b-444f-9f59-9ab67a8f9f48', - u'to': [u'ted-domain', - u'ted-eng', - u'ted-front'], - u'uuid': u'926c8dcc-0b8b-444f-9f59-9ab67a8f9f48'}, - {u'attr': {}, - u'href': u'http://10.84.7.4:8082/virtual-network/b312647f-0921-4ddf-9d59-0667a887989f', - u'to': [u'ted-domain', - u'ted-eng', - u'default-virtual-network'], - u'uuid': u'b312647f-0921-4ddf-9d59-0667a887989f'}], - u'href': u'http://10.84.7.4:8082/project/0d779509-7d54-4842-9b34-f85557898b67', - u'name': u'ted-eng', - u'parent_name': u'ted-domain', - u'uuid': u'0d779509-7d54-4842-9b34-f85557898b67'}} - ''' - - def fq_name(self): - return ':'.join(self.xpath('project', 'fq_name')) - - def policy_list(self): - return self.xpath('project', 'network_policys') - - def policy(self, policy): - return filter(lambda x: x['to'][-1] == policy, self.policy_list()) - - def vn_list(self): - return self.xpath('project', 'virtual_networks') - - def vn(self, vn): - if self.vn_list(): - return filter(lambda x: x['to'][-1] == vn, self.vn_list()) - return [] - - def fip_list(self): - if self.has_key('floating_ip_pool_refs'): - p = self.xpath('project', 'floating_ip_pool_refs') - else: - p = [] - return p - - def fip(self, fip_fq_name=[]): - return filter(lambda x: x['to'] == fip_fq_name, self.fip_list()) - - def secgrp_list(self): - return self.xpath('project', 'security_groups') - - def secgrp(self, secgrp): - secgrp_list = self.secgrp_list() - if secgrp_list: - return filter(lambda x: x['to'][-1] == secgrp, secgrp_list) - - def si_list(self): - return self.xpath('project', 'service_instances') - - def si(self, si): - si_list = self.si_list() - if si_list: - return filter(lambda x: x['to'][-1] == si, si_list) - - -class CsVdnsResult(Result): - - def fq_name(self): - return ':'.join(self.xpath('virtual-DNS', 'fq_name')) - - def vdns_data(self): - return ':'.join(self.xpath('virtual-DNS', 'virtual_DNS_data')) - - def vdns_records(self): - return ':'.join(self.xpath('virtual-DNS', 'virtual_DNS_records')) -# end of CsVdnsResult - - -class CsUseFipResult (Result): - - ''' - CsUseFipResult to provide access to vnc_introspect_utils.get_cs_use_fip_pool - dict contrains: - -{u'floating-ip-pool': {u'fq_name': [u'ted-domain', - u'ted-eng', - u'ted-front', - u'ted_fip_pool'], - u'id_perms': {u'created': None, - u'enable': True, - u'last_modified': None, - u'permissions': {u'group': u'cloud-admin-group', - u'group_access': 7, - u'other_access': 7, - u'owner': u'cloud-admin', - u'owner_access': 7}, - u'uuid': {u'uuid_lslong': 13214437371555268939L, - u'uuid_mslong': 18023639221065174839L}}, - u'project_back_refs': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/project/1fcf3244-d4d9-407d-8637-54bb2522020e', - u'to': [u'ted-domain', - u'default-project'], - u'uuid': u'1fcf3244-d4d9-407d-8637-54bb2522020e'}], - u'_type': u'floating-ip-pool', - u'href': u'http://10.84.7.4:8082/floating-ip-pool/fa20d460-d363-4f37-b763-1cc6be32c94b', - u'name': u'ted_fip_pool', - u'parent_name': u'ted-front', - u'uuid': u'fa20d460-d363-4f37-b763-1cc6be32c94b'}} - ''' - - -class CsAllocFipResult (Result): - - ''' - CsAllocFipResult to provide access to vnc_introspect_utils.get_cs_alloc_fip_pool - dict contrains: - -{u'floating-ip-pool': {u'fq_name': [u'ted-domain', - u'ted-eng', - u'ted-front', - u'ted_fip_pool'], - u'id_perms': {u'created': None, - u'enable': True, - u'last_modified': None, - u'permissions': {u'group': u'cloud-admin-group', - u'group_access': 7, - u'other_access': 7, - u'owner': u'cloud-admin', - u'owner_access': 7}, - u'uuid': {u'uuid_lslong': 13214437371555268939L, - u'uuid_mslong': 18023639221065174839L}}, - u'project_back_refs': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/project/1fcf3244-d4d9-407d-8637-54bb2522020e', - u'to': [u'ted-domain', - u'default-project'], - u'uuid': u'1fcf3244-d4d9-407d-8637-54bb2522020e'}], - u'_type': u'floating-ip-pool', - u'href': u'http://10.84.7.4:8082/floating-ip-pool/fa20d460-d363-4f37-b763-1cc6be32c94b', - u'name': u'ted_fip_pool', - u'parent_name': u'ted-front', - u'uuid': u'fa20d460-d363-4f37-b763-1cc6be32c94b'}} - ''' - pass - - -class CsIPAMResult (Result): - - ''' - CsIPAMResult to provide access to vnc_introspect_utils.get_cs_ipam - dict contrains: - - {u'network-ipam': {u'fq_name': [u'ted-domain', - u'ted-eng', - u'default-network-ipam'], - u'id_perms': {u'created': None, - u'enable': True, - u'last_modified': None, - u'permissions': {u'group': u'cloud-admin-group', - u'group_access': 7, - u'other_access': 7, - u'owner': u'cloud-admin', - u'owner_access': 7}, - u'uuid': {u'uuid_lslong': 10454003373031551739L, - u'uuid_mslong': 5922516436339146834}}, - u'network_ipam_mgmt': {u'dhcp_option_list': None, - u'ipam_method': u'dhcp'}, - u'_type': u'network-ipam', - u'virtual_network_back_refs': [{u'attr': {u'ipam_subnets': [{u'default_gateway': None, - u'subnet': {u'ip_prefix': u'192.168.1.0', - u'ip_prefix_len': 24}}]}, - u'href': u'http://10.84.7.4:8082/virtual-network/6a5c5c29-cfe6-4fea-9768-b0dea3b217bc', - u'to': [u'ted-domain', - u'ted-eng', - u'ted-back'], - u'uuid': u'6a5c5c29-cfe6-4fea-9768-b0dea3b217bc'}], - u'href': u'http://10.84.7.4:8082/network-ipam/52310151-ec68-4052-9114-14ae1a47f2fb', - u'name': u'default-network-ipam', - u'parent_name': u'ted-eng', - u'uuid': u'52310151-ec68-4052-9114-14ae1a47f2fb'}} - ''' - - def fq_name(self): - return ':'.join(self.xpath('network-ipam', 'fq_name')) - - -class CsPolicyResult (Result): - - ''' - CsPolicyResult to provide access to vnc_introspect_utils.get_cs_policy - dict contrains: - - {u'network-policy': {u'fq_name': [u'ted-domain', - u'ted-eng', - u'default-network-policy'], - u'id_perms': {u'created': None, - u'enable': True, - u'last_modified': None, - u'permissions': {u'group': u'cloud-admin-group', - u'group_access': 7, - u'other_access': 7, - u'owner': u'cloud-admin', - u'owner_access': 7}, - u'uuid': {u'uuid_lslong': 9537345350817167511L, - u'uuid_mslong': 14052464141133300902L}}, - u'_type': u'network-policy', - u'href': u'http://10.84.7.4:8082/network-policy/c30461ae-e72a-44a6-845b-7510c7ae3897', - u'name': u'default-network-policy', - u'parent_name': u'ted-eng', - u'uuid': u'c30461ae-e72a-44a6-845b-7510c7ae3897'}} - ''' - - def fq_name(self): - return ':'.join(self.xpath('network-policy', 'fq_name')) - - -class CsVNResult (Result): - - ''' - CsVNResult to provide access to vnc_introspect_utils.get_cs_vn - dict contrains: - -{u'virtual-network': {u'fq_name': [u'ted-domain', u'ted-eng', u'ted-back'], - u'id_perms': {u'created': None, - u'enable': True, - u'last_modified': None, - u'permissions': {u'group': u'cloud-admin-group', - u'group_access': 7, - u'other_access': 7, - u'owner': u'cloud-admin', - u'owner_access': 7}, - u'uuid': {u'uuid_lslong': 10910164567580612540L, - u'uuid_mslong': 7664102000529133546}}, - u'instance_ip_back_refs': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/instance-ip/9d4cbfbc-da80-4732-a98e-77607bd78704', - u'to': [u'9d4cbfbc-da80-4732-a98e-77607bd78704'], - u'uuid': u'9d4cbfbc-da80-4732-a98e-77607bd78704'}], - u'network_ipam_refs': [{u'attr': {u'ipam_subnets': [{u'default_gateway': None, - u'subnet': {u'ip_prefix': u'192.168.1.0', - u'ip_prefix_len': 24}}]}, - u'href': u'http://10.84.7.4:8082/network-ipam/52310151-ec68-4052-9114-14ae1a47f2fb', - u'to': [u'ted-domain', - u'ted-eng', - u'default-network-ipam'], - u'uuid': u'52310151-ec68-4052-9114-14ae1a47f2fb'}], - u'routing_instances': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/routing-instance/a68948af-46be-4f26-b73e-9ec725f57437', - u'to': [u'ted-domain', - u'ted-eng', - u'ted-back', - u'ted-back'], - u'uuid': u'a68948af-46be-4f26-b73e-9ec725f57437'}], - u'_type': u'virtual-network', - u'virtual_machine_interface_back_refs': [{u'attr': {}, - u'href': u'http://10.84.7.4:8082/virtual-machine-interface/864ecd37-cf1f-43d5-9f63-4f24831859eb', - u'to': [u'c707f91f-68e9-427a-a0ba-92563c0d067f', - u'864ecd37-cf1f-43d5-9f63-4f24831859eb'], - u'uuid': u'864ecd37-cf1f-43d5-9f63-4f24831859eb'}], - u'href': u'http://10.84.7.4:8082/virtual-network/6a5c5c29-cfe6-4fea-9768-b0dea3b217bc', - u'name': u'ted-back', - u'parent_name': u'ted-eng', - u'uuid': u'6a5c5c29-cfe6-4fea-9768-b0dea3b217bc'}} - ''' - _pat = None - - def _rpat(self): - if self._pat is None: - self._pat = re.compile('-interface/.*$') - return self._pat - - def sub(self, st, _id): - return self._rpat().sub('/%s' % _id, st) - - def fq_name(self): - return ':'.join(self.xpath('virtual-network', 'fq_name')) - - def fip_list(self): - return self.xpath('virtual-network', 'floating_ip_pools') - - def fip(self, fip): - return filter(lambda x: x['to'][-1] == fip, self.fip_list()) - - def vm_link_list(self): - return map(lambda x: self.sub(x['href'], x['to'][0]), - self.xpath('virtual-network', - 'virtual_machine_interface_back_refs')) - - def rts(self): - if self.xpath('virtual-network').has_key('route_target_list'): - for rt in self.xpath('virtual-network', 'route_target_list', - 'route_target'): - yield rt - - def ri_links(self): - if self.xpath('virtual-network').has_key('routing_instances'): - for ri in self.xpath('virtual-network', 'routing_instances'): - yield ri['href'] - - def ri_refs(self): - if self.xpath('virtual-network').has_key('routing_instances'): - for ri in self.xpath('virtual-network', 'routing_instances'): - yield ri['to'] - - def uuid(self): - return self.xpath('virtual-network', 'uuid') - - def route_table(self): - return self.xpath('virtual-network', 'route_table_refs', 0) - -class CsRtResult (Result): - - ''' - CsRtResult to provide access to vnc_introspect_utils.get_cs_route_targets - dict contrains: - - ''' - def fq_name(self): - return ':'.join(self.xpath('route-table', 'fq_name')) - - -class CsRiResult (Result): - - ''' - CsRiResult to provide access to vnc_introspect_utils.get_cs_routing_instances - dict contrains: - - ''' - - def rt_links(self): - if self.xpath('routing-instance').has_key('route_target_refs'): - for rt in self.xpath('routing-instance', 'route_target_refs'): - yield rt['href'] - - def get_rt(self): - target = list() - if self.xpath('routing-instance').has_key('route_target_refs'): - for rt in self.xpath('routing-instance', 'route_target_refs'): - target.append(rt['to'][0]) - return target - - -class CsAllocFipPoolResult (Result): - - ''' - CsVMResult to provide access to vnc_introspect_utils.get_cs_vm - dict contrains: - - ''' - pass - - -class CsVMResult (Result): - - ''' - CsVMResult to provide access to vnc_introspect_utils.get_cs_vm - dict contrains: - - ''' - - def fq_name(self): - return ':'.join(self.xpath('virtual-network', 'fq_name')) - - def vr_link(self): - return self.xpath('virtual-machine', 'virtual_router_back_refs', - 0, 'href') - - def vmi_links(self): - vmi_list = (self.xpath('virtual-machine', 'virtual_machine_interfaces') or - self.xpath('virtual-machine', 'virtual_machine_interface_back_refs')) - links = [] - for vmi in vmi_list: - links.append(vmi['href']) - return links -# return self.xpath ('virtual-machine', 'virtual_machine_interfaces', -# 0, 'href') - - -class CsVrOfVmResult (Result): - - def name(self): - return self.xpath('name') - - -class CsVmiOfVmResult (Result): - - def ip_link(self): - links = [] - instance_ips = self.xpath('virtual-machine-interface', - 'instance_ip_back_refs') - for iip in instance_ips: - links.append(iip['href']) - return links - - def fip_link(self): - if self.xpath('virtual-machine-interface').has_key( - 'floating_ip_back_refs'): - return self.xpath('virtual-machine-interface', - 'floating_ip_back_refs', 0, 'href') - - def properties(self, property=None): - if self.xpath('virtual-machine-interface').has_key( - 'virtual_machine_interface_properties'): - if property: - return self.xpath('virtual-machine-interface', - 'virtual_machine_interface_properties', property) - else: - return self.xpath('virtual-machine-interface', - 'virtual_machine_interface_properties') - - @property - def uuid(self): - return self.xpath('virtual-machine-interface', 'uuid') - - @property - def vn_fq_name(self): - return ':'.join(self.xpath('virtual-machine-interface', - 'virtual_network_refs', 0, 'to')) - - @property - def vn_uuid(self): - return self.xpath('virtual-machine-interface', - 'virtual_network_refs', 0, 'uuid') - - @property - def mac_addr(self): - return self.xpath('virtual-machine-interface', - 'virtual_machine_interface_mac_addresses', - 'mac_address', 0) - -class CsIipOfVmResult (Result): - - @property - def ip(self): - return self.xpath('instance-ip', 'instance_ip_address') - - @property - def vn_uuid(self): - return self.xpath('instance-ip', 'virtual_network_refs', 0, 'uuid') - - @property - def vn_fq_name(self): - return ':'.join(self.xpath('instance-ip', - 'virtual_network_refs', 0, 'to')) - -class CsFipOfVmResult (Result): - - def ip(self): - return self.xpath('floating-ip', 'floating_ip_address') - - -class CsFipIdResult (Result): - - ''' - CsFipIdResult to provide access to vnc_introspect_utils.get_cs_fip - dict contrains: - - ''' - - def fip(self): - return self.xpath('floating-ip', 'floating_ip_address') - - def vmi(self): - return [vmi['uuid'] for vmi in self.xpath('floating-ip', - 'virtual_machine_interface_refs')] - - -class CsSecurityGroupResult (Result): - - ''' - CsSecurityGroupResult to provide access to vnc_introspect_utils.get_cs_secgrp - ''' - - def fq_name(self): - return ':'.join(self.xpath('security-group', 'fq_name')) - - -class CsServiceInstanceResult (Result): - - ''' - CsServiceInstanceResult to provide access to vnc_introspect_utils.get_cs_si - ''' - - def fq_name(self): - return ':'.join(self.xpath('service-instance', 'fq_name')) - - def get_vms(self): - vms = list() - if self.xpath('service-instance').has_key('virtual_machine_back_refs'): - for vm in self.xpath('service-instance', 'virtual_machine_back_refs'): - vms.append(vm['uuid']) - return vms - - -class CsServiceTemplateResult (Result): - - ''' - CsServiceTemplateResult to provide access to vnc_introspect_utils.get_cs_st - ''' - - def fq_name(self): - return ':'.join(self.xpath('service-template', 'fq_name')) - - -class CsGlobalVrouterConfigResult (Result): - - ''' - CsGlobalVrouterConfigResult to provide access to vnc_introspect_utils.get_global_vrouter_config - ''' - - def get_link_local_service(self, name='metadata'): - link_local_service = {} - try: - p = self.xpath('global-vrouter-config', 'linklocal_services') - for elem in p['linklocal_service_entry']: - if (elem['linklocal_service_name'] == name): - link_local_service['name'] = elem['linklocal_service_name'] - link_local_service['service_ip'] = elem[ - 'linklocal_service_ip'] - link_local_service['service_port'] = elem[ - 'linklocal_service_port'] - link_local_service['fabric_service_ip'] = elem[ - 'ip_fabric_service_ip'] - link_local_service['fabric_DNS_service_name'] = elem[ - 'ip_fabric_DNS_service_name'] - link_local_service['ip_fabric_service_port'] = elem[ - 'ip_fabric_service_port'] - except Exception as e: - print e - finally: - return link_local_service - -class CsLogicalRouterResult(Result): - ''' - CsLogicalRouterResult access logical router dict - ''' - def get_rt(self): - target = list() - if self.xpath('logical-router').has_key('route_target_refs'): - for rt in self.xpath('logical-router', 'route_target_refs'): - target.append(rt['to'][0]) - return target - - def fq_name(self): - return ':'.join(self.xpath('logical-router', 'fq_name')) - - def uuid(self): - return self.xpath('logical-router', 'uuid') - -class CsTableResult(Result): - ''' - CsTableResult access Route table dict - ''' - def get_route(self): - if self.xpath('route-table').has_key('routes'): - return self.xpath('route-table', 'routes', 'route') - - def fq_name(self): - return ':'.join(self.xpath('route-table', 'fq_name')) - - def uuid(self): - return self.xpath('route-table', 'uuid') - -class CsLbPool(Result): - ''' - CsLbPool access Load Balancer Pool dict - ''' - def fq_name(self): - return ':'.join(self.xpath('loadbalancer-pool', 'fq_name')) - - def uuid(self): - return self.xpath('loadbalancer-pool', 'uuid') - - def name(self): - return self.xpath('loadbalancer-pool', 'name') - - def members(self): - members = list() - for member in self.xpath('loadbalancer-pool', 'loadbalancer_members'): - members.append(member['uuid']) - return members - - def hmons(self): - hmons = list() - for hmon in self.xpath('loadbalancer-pool', - 'loadbalancer_healthmonitor_refs'): - hmons.append(hmon['uuid']) - return hmons - - def vip(self): - return self.xpath('loadbalancer-pool', 'virtual_ip_back_refs', 0,'uuid') - - def si(self): - return self.xpath('loadbalancer-pool', 'service_instance_refs',0,'uuid') - - def properties(self): - return self.xpath('loadbalancer-pool', 'loadbalancer_pool_properties') - - def custom_attrs(self): - custom_attr = dict() - kvpairs = self.xpath('loadbalancer-pool', - 'loadbalancer_pool_custom_attributes', - 'key_value_pair') or [] - for dct in kvpairs: - custom_attr[dct['key']] = dct['value'] - return custom_attr - -class CsLbMember(Result): - ''' - CsLbMember access Load Balancer Member dict - ''' - def fq_name(self): - return ':'.join(self.xpath('loadbalancer-member', 'fq_name')) - - def uuid(self): - return self.xpath('loadbalancer-member', 'uuid') - - def ip(self): - return self.xpath('loadbalancer-member', - 'loadbalancer_member_properties', - 'address') - -class CsLbVip(Result): - ''' - CsLbVip access Load Balancer Vip dict - ''' - def fq_name(self): - return ':'.join(self.xpath('virtual-ip', 'fq_name')) - - def uuid(self): - return self.xpath('virtual-ip', 'uuid') - - def ip(self): - return self.xpath('virtual-ip', 'virtual_ip_properties', 'address') - - def vmi(self): - return self.xpath('virtual-ip', - 'virtual_machine_interface_refs', - 0, 'uuid') - -class CsLbHealthMonitor(Result): - ''' - CsLbHealthMonitor access Load Balancer Health Monitor dict - ''' - def fq_name(self): - return ':'.join(self.xpath('loadbalancer-healthmonitor', 'fq_name')) - - def uuid(self): - return self.xpath('loadbalancer-healthmonitor', 'uuid') - - def properties(self): - return self.xpath('loadbalancer-healthmonitor', 'loadbalancer_healthmonitor_properties') - -class CsVrouters(Result): - def __iter__(self): - for vrouter in self.xpath('virtual-routers'): - yield vrouter - -class CsVrouter(Result): - def is_tor_agent(self): - if 'tor-agent' in self.xpath('virtual-router', 'virtual_router_type'): - return True - return False - - def is_tsn(self): - if 'tor-service-node' in self.xpath('virtual-router', 'virtual_router_type'): - return True - return False - - @property - def ip(self): - return self.xpath('virtual-router', 'virtual_router_ip_address') diff --git a/tcutils/config/vnc_introspect_utils.py b/tcutils/config/vnc_introspect_utils.py deleted file mode 100755 index 72a284675..000000000 --- a/tcutils/config/vnc_introspect_utils.py +++ /dev/null @@ -1,740 +0,0 @@ -import logging as LOG - -from tcutils.verification_util import * -from vnc_api_results import * - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - - -class VNCApiInspect (VerificationUtilBase): - - def __init__(self, ip, logger=LOG, args=None): - super(VNCApiInspect, self).__init__( - ip, 8082, logger=logger, args=args) - self._cache = { - 'domain': {}, - 'project': {}, - 'ipam': {}, - 'policy': {}, - 'vn': {}, - 'fip_alloc_pool': {}, - 'fip_use_pool': {}, - 'vm': {}, - 'vr': {}, - 'vmi': {}, - 'iip': {}, - 'fip': {}, - 'ri': {}, - 'rt': {}, - 'secgrp': {}, - 'si': {}, - 'st': {}, - 'dns': {}, - 'dns_rec': {}, - 'lb_pool': {}, - 'lb_vip': {}, - 'lb_member': {}, - 'lb_healthmonitor': {}, - 'lr': {}, - 'table': {}, - } - - def update_cache(self, otype, fq_path, d): - self._cache[otype]['::'.join(fq_path)] = d - - def try_cache(self, otype, fq_path, refresh): - p = None - try: - if not (refresh or self.get_force_refresh()): - p = self._cache[otype]['::'.join(fq_path)] - except KeyError: - pass - return p - - def try_cache_by_id(self, otype, uuid, refresh): - if not (refresh or self.get_force_refresh()): - for p in self._cache[otype].values(): - if p.uuid() == uuid: - return p - return None - - def get_cs_domain(self, domain='default-domain', refresh=False): - ''' - method: get_cs_domain find a domain by domin name - returns CsDomainResult object, None if not found - - ''' - d = self.try_cache('domain', [domain], refresh) - if not d: - # cache miss - doms = self.dict_get('domains') - mydom = filter(lambda x: x['fq_name'][-1] == domain, - doms['domains']) - if mydom: - dd = self.dict_get(mydom[-1]['href']) - # cache set - if dd: - d = CsDomainResult(dd) - self.update_cache('domain', [domain], d) - return d - - def get_cs_project(self, domain='default-domain', project='admin', - refresh=True): - ''' - method: get_cs_project find a project by domin & project name - returns None if not found, a dict w/ project attrib. eg: - ''' - p = self.try_cache('project', [domain, project], refresh) - pp = None - if not p: - # cache miss - dom = self.get_cs_domain(domain, refresh=True) - if dom: - myproj = dom.project(project) - # myproj = filter (lambda x: x['to'] == [domain, project], - # dom['domain']['_projects']) - if 1 == len(myproj): - pp = self.dict_get(myproj[0]['href']) - if pp: - p = CsProjectResult(pp) - self.update_cache('project', [domain, project], p) - return p - - def get_cs_ipam(self, domain='default-domain', project='admin', - ipam='default-network-ipam', refresh=False): - ''' - method: get_cs_ipam find an ipam - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('ipam', [domain, project, ipam], refresh) - pp = None - if not p: - # cache miss - proj = self.get_cs_project(domain, project, refresh) - if proj: - myipam = filter(lambda x: x['to'] == [domain, project, ipam], - proj['project']['network_ipams']) - if 1 == len(myipam): - pp = self.dict_get(myipam[0]['href']) - if pp: - p = CsIPAMResult(pp) - self.update_cache('ipam', [domain, project, ipam], p) - return p - - def get_cs_policy(self, domain='default-domain', project='admin', - policy='default-network-policy', refresh=False): - ''' - method: get_cs_ipam find an ipam - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('policy', [domain, project, policy], refresh) - if not p: - # cache miss - proj = self.get_cs_project(domain, project, refresh) - if proj: - mypolicy = proj.policy(policy) - if 1 == len(mypolicy): - pp = self.dict_get(mypolicy[0]['href']) - if pp: - p = CsPolicyResult(pp) - self.update_cache('policy', [domain, project, policy], p) - return p - - def get_cs_ri_by_id(self, ri_id=''): - ''' - method: get_cs_ri find a ri - returns None if not found, a dict w/ attrib. eg: - - ''' - p = None - if ri_id: - pp = None - try: - pp = self.dict_get('routing-instance/' + ri_id) - except: - self.log.debug("Rounting instance ID: % not found", ri_id) - - if pp: - p = CsRiResult(pp) - return p - - def get_cs_vn_by_id(self, vn_id='', refresh=False): - ''' - method: get_cs_vn find a vn - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache_by_id('vn', vn_id, refresh) - if not p and vn_id: - # cache miss - pp = None - try: - pp = self.dict_get('virtual-network/' + vn_id) - except: - self.log.debug("Virtual Network ID: %s not found", vn_id) - - if pp: - p = CsVNResult(pp) - self.update_cache('vn', p.fq_name().split(':'), p) - return p - - def get_cs_vn(self, domain='default-domain', project='admin', - vn='default-virtual-network', refresh=False): - ''' - method: get_cs_vn find a vn - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('vn', [domain, project, vn], refresh) - if not p: - # cache miss - pp = None - proj = self.get_cs_project(domain, project, refresh) - if proj: - myvn = proj.vn(vn) - if 1 == len(myvn): - pp = self.dict_get(myvn[0]['href']) - if pp: - p = CsVNResult(pp) - self.update_cache('vn', [domain, project, vn], p) - return p - - # TODO - def get_cs_vn_policys(self, project='admin', domain='default-domain', vn='default-virtual-network', refresh=False): - ''' - method: get_cs_vn_policys find a vn associated policys - returns None if not found,or a list of virtual network associated policys - - ''' - vn_final_policy_list = [] - vn_attach_policy_list = {} - vn_obj = self.get_cs_vn(domain='default-domain', - project=project, vn=vn, refresh=True) - if 'network_policy_refs' in vn_obj['virtual-network']: - vn_pol = vn_obj['virtual-network']['network_policy_refs'] - else: - return vn_final_policy_list - for i in range(len(vn_pol)): - vn_attach_policy_list[vn_pol[i]['attr']['sequence'] - ['major']] = (str(vn_pol[i]['to'][-1])) - policy_major_no_list = vn_attach_policy_list.keys() - order_policys = sorted(policy_major_no_list) - for policy in order_policys: - vn_final_policy_list.append(vn_attach_policy_list[policy]) - return vn_final_policy_list - - def get_cs_dns(self, vdns_name, domain='default-domain', refresh=False): - p = self.try_cache('dns', [domain, vdns_name], refresh) - if not p: - pp = None - dom = self.get_cs_domain(domain, refresh) - if dom: - #myvdns =dom.vdns_list() - if dom.vdns_list() is None: - self.log.debug('VDNS information not found in API server') - return None - myvdns = dom.vdns(vdns_name) - if myvdns: - pp = self.dict_get(myvdns[0]['href']) - if pp: - p = CsVdnsResult(pp) - self.update_cache('dns', [domain, vdns_name], p) - return p - - def get_cs_dns_rec(self, rec_name, vdns_name, domain='default-domain', refresh=False): - p = self.try_cache('dns_rec', [domain, rec_name], refresh) - if not p: - pp = None - rec_ref = None - dom = self.get_cs_domain(domain, refresh) - if dom: - mydns = dom.vdns(vdns_name) - if mydns: - pp = self.dict_get(mydns[0]['href']) - if pp: - dns_recs = pp['virtual-DNS']['virtual_DNS_records'] - for rec in dns_recs: - if rec['to'][2] == rec_name: - rec_ref = rec['href'] - break - rec_data = self.dict_get(rec_ref) - p = CsVdnsResult(rec_data) - self.update_cache('dns_rec', [domain, rec_name], p) - return p - - def get_cs_fip_list(self, domain='default-domain', project='admin'): - ''' - Returns the floating-ips currently used in the project - ''' - - # end get_cs_fip_list - - def get_cs_fip(self, fip_id, refresh): - ''' - Given a Floating IP ID, return the floating-ip dict for it. - ''' - p = self.try_cache('fip', fip_id, refresh) - pp = None - if not p: - # cache miss - try: - pp = self.dict_get('floating-ip/%s' % fip_id) - if pp: - p = CsFipIdResult(pp) - self.update_cache('fip', fip_id, p) - except Exception as e: - self.log.error(e) - return p - # end get_cs_fip - - def get_cs_alloc_fip_pool(self, domain='default-domain', project='admin', - vn_name='default-virtual-network', fip_pool_name='default-floating-ip-pool', - refresh=False): - ''' - method: get_cs_alloc_fip_pool finds a fip pool allocated in vn - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('fip_alloc_pool', [domain, project, vn_name, - fip_pool_name], refresh) - pp = None - if not p: - # cache miss - _vn = self.get_cs_vn(domain, project, vn_name, refresh) - if _vn: - myfip_alloc_pool = _vn.fip(fip_pool_name) - if 1 == len(myfip_alloc_pool): - pp = self.dict_get(myfip_alloc_pool[0]['href']) - if pp: - p = CsAllocFipPoolResult(pp) - self.update_cache('fip_alloc_pool', - [domain, project, vn_name, fip_pool_name], p) - return p - - def get_cs_use_fip_pool(self, domain='default-domain', project='admin', - fip_pool_name='default-floating-ip-pool', vn_name='default-virtual-network', - refresh=False): - ''' - method: get_cs_use_fip_pool finds a fip pool used by a project - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('fip_use_pool', [domain, project, - '::'.join(fip_pool_name)], refresh) - if p: - return p - # cache miss - pp = None - proj = self.get_cs_project(domain, project, refresh) - if proj and proj['project'].has_key('floating_ip_pool_refs'): - myfip = proj.fip_list(fip_pool_name) - if 1 == len(myfip): - pp = self.dict_get(myfip[0]['href']) - if pp: - p = CsUseFipResult(pp) - self.update_cache('fip_use_pool', [domain, project, - '::'.join(fip_pool_name)], p) - return p - - def get_cs_vm(self, vm_id, refresh=False): - ''' - - Returns VM object in API Server as in http://172.27.58.57:8082/virtual-machine/ - ''' - p = self.try_cache('vm', vm_id, refresh) - if not p: - # cache miss - pp = self.dict_get('virtual-machine/%s' % vm_id) - if pp: - p = CsVMResult(pp) - self.update_cache('vm', vm_id, p) - return p - # end get_cs_vm - - def get_cs_vr_of_vm(self, vm_id, refresh=False): - ''' - - Returns the Virtual Router object using the virtual_router_back_refs of a link : http://172.27.58.57:8082/virtual-machine/ - ''' - p = self.try_cache('vr', vm_id, refresh) - pp = None - if not p: - # cache miss - vm = self.get_cs_vm(vm_id, refresh) - if vm: - pp = self.dict_get(vm.vr_link()) - if pp: - p = CsVrOfVmResult(pp) - self.update_cache('vr', vm_id, p) - return p - # end get_vr_of_vm - - def get_cs_vmi_of_vm(self, vm_id, refresh=False): - ''' - - Returns the Virtual Machine Interface using virtual_machine_interfaces link in http://host/virtual-machine/ - ''' - p = self.try_cache('vmi', vm_id, refresh) - pp = [] - if not p: - # cache miss - vm = self.get_cs_vm(vm_id, refresh) - if vm: - links = vm.vmi_links() - for link in links: - pp.append(self.dict_get(link)) - if pp: - p = [] - for vmi in pp: - p.append(CsVmiOfVmResult(vmi)) - self.update_cache('vmi', vm_id, p) - return p - # end get_cs_vmi_of_vm - - def get_cs_instance_ips_of_vm(self, vm_id, refresh=False): - ''' - - Returns the Instance-IP objects using virtual_machine_interfaces link of a VM - ''' - p = self.try_cache('iip', vm_id, refresh) - pp = [] - if not p: - # cache miss - vmi_objs = self.get_cs_vmi_of_vm(vm_id, refresh) - if vmi_objs: - for vmi_obj in vmi_objs: - for link in vmi_obj.ip_link(): - pp.append(self.dict_get(link)) - if pp: - p = [] - for ip_obj in pp: - p.append(CsIipOfVmResult(ip_obj)) - self.update_cache('iip', vm_id, p) - return p - # end get_instance_ips_of_vm - - def get_cs_floating_ips_of_vm(self, vm_id, refresh=False): - ''' - - Returns the Floating-IP objects using virtual_machine_interfaces link of a VM - ''' - p = self.try_cache('fip', vm_id, refresh) - pp = [] - if not p: - # cache miss - vmi_objs = self.get_cs_vmi_of_vm(vm_id, refresh) - if vmi_obj: - try: - #import pdb; pdb.set_trace () - for vmi_obj in vmi_objs: - pp.append(self.dict_get(vmi_obj.fip_link())) - if pp: - p = [] - for fip_obj in pp: - p.append(CsFipOfVmResult(fip_obj)) - self.update_cache('fip', vm_id, p) - except Exception as e: - self.log.error(e) - return p - # end get_cs_floating_ips_of_vm - - def get_cs_routing_instances(self, vn_id='', refresh=True): - ''' Get a list of Routing instances mapped to the VN - ''' - p = self.try_cache_by_id('ri', vn_id, refresh) - if not p: - pp = CsRiResult({'routing_instances': []}) - for rl in self.get_cs_vn_by_id(vn_id, refresh).ri_links(): - pp['routing_instances'].append(self.dict_get(rl)) - if pp['routing_instances']: - p = pp - self.update_cache('ri', vn_id, p) - return p - - # end get_cs_routing_instances - - def get_cs_route_targets(self, vn_id=''): - ''' Get a list of Route targets mapped to the VN - ''' - p = self.get_cs_routing_instances(vn_id) - if p: - pp = CsRtResult({'route_target_list': []}) - for ri in p['routing_instances']: - ri_uuid = ri['routing-instance']['uuid'] - rt = self.get_cs_route_targets_of_ri(ri_uuid) - pp['route_target_list'].append(rt) - if pp['route_target_list']: - p = pp - self.update_cache('rt', vn_id, p) - return p - - # end get_cs_route_targets - - def get_cs_rt_names(self, rt_obj): - ''' From the result of get_cs_route_targets(), return the list of Route-target names - Input will be of the form : -{'route_target_list': [{'route_target_list': [{u'route-target': {u'_type': u'route-target', u'fq_name': [u'target:64512:914'], u'uuid': u'b1c0ef9a-cab8-4554-bfcb-74bf963c6a80', u'routing_instance_back_refs': [{u'to': [u'default-domain', u'admin', u'vn222', u'vn222'], u'href': u'http://10.204.216.38:8082/routing-instance/61983e45-dcdf-46d7-87f4-c434f874597f', u'attr': None, u'uuid': u'61983e45-dcdf-46d7-87f4-c434f874597f'}], u'href': u'http://10.204.216.38:8082/route-target/b1c0ef9a-cab8-4554-bfcb-74bf963c6a80', u'id_perms': {u'enable': True, u'uuid': {u'uuid_mslong': 12808500788346766676L, u'uuid_lslong': 13820268247724616320L}, u'created': None, u'description': None, u'last_modified': None, u'permissions': {u'owner': u'cloud-admin', u'owner_access': 7, u'other_access': 7, u'group': u'cloud-admin-group', u'group_access': 7}}, u'name': u'target:64512:914'}}]}]} - ''' - rt_list = rt_obj['route_target_list'][0].get('route_target_list', []) - rt_names = [] - for rt in rt_list: - rt_names.append(str(rt['route-target']['name'])) - self.log.debug("Route Targets: %s", rt_names) - return rt_names - # end get_cs_rt_names - - def get_cs_route_targets_of_ri(self, ri_id='', refresh=False): - ''' Get a list of Route targets mapped to the VN - ''' - p = self.get_cs_ri_by_id(ri_id) - if p: - pp = CsRtResult({'route_target_list': []}) - for rt in p.rt_links(): - pp['route_target_list'].append(self.dict_get(rt)) - if pp['route_target_list']: - p = pp - return p - # end get_cs_route_targets - - def policy_update(self, domain='default-domain', *arg): - pass - - def dissassociate_ip(self, domain='default-domain', *arg): - pass - - def get_cs_secgrp(self, domain='default-domain', project='admin', - secgrp='default-security-group', refresh=False): - ''' - method: get_cs_secgrp find an security group - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('secgrp', [domain, project, secgrp], refresh) - if not p: - # cache miss - proj = self.get_cs_project(domain, project, refresh) - pp = None - if proj: - mysecgrp = proj.secgrp(secgrp) - if mysecgrp: - pp = self.dict_get(mysecgrp[0]['href']) - if pp: - p = CsSecurityGroupResult(pp) - self.update_cache('secgrp', [domain, project, secgrp], p) - return p - - def get_cs_si(self, domain='default-domain', project='admin', - si=None, refresh=False): - ''' - method: get_cs_si find an service instance - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('si', [domain, project, si], refresh) - if not p: - # cache miss - proj = self.get_cs_project(domain, project, refresh) - pp = None - if proj: - mysi = proj.si(si) - if mysi: - pp = self.dict_get(mysi[0]['href']) - if pp: - p = CsServiceInstanceResult(pp) - self.update_cache('si', [domain, project, si], p) - return p - - def get_cs_si_by_id(self, si_id, refresh=False): - ''' - method: get_cs_si_by_id find a service instance by id - returns None if not found, a dict w/ attrib. eg: - ''' - p = self.try_cache_by_id('si', si_id, refresh) - if not p: - # cache miss - pp = self.dict_get('service-instance/%s' %si_id) - if pp: - p = CsServiceInstanceResult(pp) - self.update_cache('si', p.fq_name().split(':'), p) - return p - - def get_cs_st(self, domain='default-domain', project='admin', - st='nat-template', refresh=False): - ''' - method: get_cs_st find an service template - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('st', [domain, project, st], refresh) - if not p: - # cache miss - dom = self.get_cs_domain(domain) - pp = None - if dom: - myst = dom.st(st) - if myst: - pp = self.dict_get(myst[0]['href']) - if pp: - p = CsServiceTemplateResult(pp) - self.update_cache('secgrp', [domain, project, st], p) - return p - - def get_global_vrouter_config(self): - '''Gets global vrouter configs''' - doms = self.dict_get('global-vrouter-configs') - gvr_config = self.dict_get(doms['global-vrouter-configs'][0]['href']) - if gvr_config: - pp = CsGlobalVrouterConfigResult(gvr_config) - return pp - - def get_computes(self): - '''Get list of vrouter-agents''' - vrouters = CsVrouters(self.dict_get('virtual-routers?detail=True')) - vr_list = list() - for vrouter in vrouters: - vr = CsVrouter(vrouter) -# if vr.is_tor_agent() or vr.is_tsn(): -# continue - vr_list.append(vr.ip) - return vr_list - - def get_secgrp_acls_href(self, domain='default-domain', project='admin', - secgrp='default-security-group', refresh=False): - ''' - method: get_secgrp_acls find acls href for a security group - returns empty list if not found, a list of href - ''' - p = [] - secgrp_fq_name = ':'.join([domain,project,secgrp]) - proj = self.get_cs_project(domain, project, refresh) - pp = None - if proj: - pp = self.dict_get('access-control-lists') - for acl in pp['access-control-lists']: - if ':'.join([acl['fq_name'][0],acl['fq_name'][1],acl['fq_name'][2]]) == secgrp_fq_name: - p.append(acl['href']) - - return p - - def get_lb_pool(self, pool_id, refresh=False): - ''' - method: get_lb_pool find a lb pool - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache_by_id('lb_pool', pool_id, refresh) - if not p: - # cache miss - pp = self.dict_get('loadbalancer-pool/%s' % pool_id) - if pp: - p = CsLbPool(pp) - self.update_cache('lb_pool', p.fq_name().split(':'), p) - return p - - def get_lb_vip(self, vip_id, refresh=False): - ''' - method: get_lb_vip find a lb vip - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('lb_vip', vip_id, refresh) - if not p: - # cache miss - pp = self.dict_get('virtual-ip/%s' % vip_id) - if pp: - p = CsLbVip(pp) - self.update_cache('lb_vip', p.fq_name().split(':'), p) - return p - - def get_lb_member(self, member_id, refresh=False): - ''' - method: get_lb_member find a lb member - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('lb_member', member_id, refresh) - if not p: - # cache miss - pp = self.dict_get('loadbalancer-member/%s' % member_id) - if pp: - p = CsLbMember(pp) - self.update_cache('lb_member', p.fq_name().split(':'), p) - return p - - def get_lb_healthmonitor(self, healthmonitor_id, refresh=False): - ''' - method: get_lb_healthmonitor find a lb healthmonitor - returns None if not found, a dict w/ attrib. eg: - - ''' - p = self.try_cache('lb_healthmonitor', healthmonitor_id, refresh) - if not p: - # cache miss - pp = self.dict_get('loadbalancer-healthmonitor/%s'%healthmonitor_id) - if pp: - p = CsLbHealthMonitor(pp) - self.update_cache('lb_healthmonitor', p.fq_name().split(':'), p) - return p - - def get_lr(self, uuid, refresh=False): - p = self.try_cache_by_id('lr', uuid, refresh) - if not p: - # cache miss - pp = self.dict_get('logical-router/%s' % uuid) - if pp: - p = CsLogicalRouterResult(pp) - self.update_cache('lr', p.fq_name().split(':'), p) - return p - - def get_route_table(self, uuid, refresh=False): - p = self.try_cache_by_id('table', uuid, refresh) - if not p: - # cache miss - pp = self.dict_get('route-table/%s' % uuid) - if pp: - p = CsTableResult(pp) - self.update_cache('table', p.fq_name().split(':'), p) - return p - -if __name__ == '__main__': - va = VNCApiInspect('10.84.7.2') - r = va.get_cs_domain() - pr = va.get_cs_project() - ir = va.get_cs_ipam() - polr = va.get_cs_policy() - vnr = va.get_cs_vn(project='demo', vn='fe') - vmvr = va.get_cs_instance_ip_of_vm("becd5f61-c446-4963-af5a-886138ce026f") - print r.project_list(), r.uuid(), r.name(), pr.fq_name(), ir.fq_name() - print polr.fq_name(), vnr.fq_name() - import pprint - pprint.pprint(vmvr) - if vmvr: - print polr.fq_name(), vnr.vm_link_list(), vmvr.ip() - fipr = va.get_cs_floating_ip_of_vm("bae09ef8-fcad-4aae-a36e-0969410daf8e") - if fipr: - print fipr.ip() - print va.get_cs_routing_instances('3236c96e-38cf-40d9-94dd-7ec5495192f1') - print va.get_cs_route_targets_of_ri('97e53a4e-0d10-4c88-aec9-0ebb3e4471f6') - va = VNCApiInspect('10.84.11.2') - print va.get_cs_route_targets('6a454d59-aadb-4140-907f-1bd8b378a7ce') - # print va.get_cs_domain ('red-domain'), va.get_cs_domain ('ted-domain') - # print va.get_cs_project ('ted-domain', 'ted-eng') - # print va.get_cs_ipam ('ted-domain', 'ted-eng', 'default-network-ipam') - # print va.get_cs_policy ('ted-domain', 'ted-eng', 'default-network-policy') - # print va.get_cs_vn ('ted-domain', 'ted-eng', 'ted-back') - # print va.get_cs_alloc_fip_pool ('ted-domain', 'ted-eng', 'ted-front', 'ted_fip_pool') - # print va.get_cs_use_fip_pool ('ted-domain', 'default-project', ['ted-domain', 'ted-eng', 'ted-front', 'ted_fip_pool']) - #va = VNCApiInspect ('10.84.7.4') - # print va.get_cs_domain ('red-domain'), va.get_cs_domain ('ted-domain') - # print va.get_cs_project ('ted-domain', 'ted-eng') - # print va.get_cs_vr_of_vm('e87b5000-722f-420f-9b7b-7dd74f0b87ef') - # print va.get_cs_vmi_of_vm('e87b5000-722f-420f-9b7b-7dd74f0b87ef') - # print va.get_cs_instance_ip_of_vm('e87b5000-722f-420f-9b7b-7dd74f0b87ef') - # print va.get_cs_ipam ('default-domain', 'demo', 'default-network-ipam'), va.get_cs_ipam ('default-domain', 'demo', 'default-network-ipam2') - # print va.get_cs_policy ('default-domain', 'default-project', 'default-network-policy'), va.get_cs_policy ('default-domain', 'default-project', 'default-network-policy2') - # print va.get_cs_vn ('default-domain', 'default-project', 'ip-fabric'), va.get_cs_vn ('my-domain', 'my-proj', 'my-fe') - # print va.get_cs_fip_pool ('ted-domain', 'ted-eng', 'ted-front', - # 'ted_fip_pool'), cn.get_cs_fip_pool ('ted-domain', 'ted-eng', - # 'ted-front', 'ted-fip-pool2') diff --git a/tcutils/contrail_status_check.py b/tcutils/contrail_status_check.py deleted file mode 100644 index c367b17f5..000000000 --- a/tcutils/contrail_status_check.py +++ /dev/null @@ -1,272 +0,0 @@ -# Tool to check contrail status on a bunch of nodes. -# The script has 2 functions. -# 1.Using get_status, you can get the 'contrail-status' output -# for the nodes that you pass. -# --If no nodes are passed, the list of nodes specified from -# the testbed.py are taken by default. -# --If you want status only for a specific bunch of services, -# include these as a dict as below: -# includeservice = -# {'10.204.216.72': 'supervisor-vrouter', -# '10.204.217.7': 'supervisor-control,contrail-named'} -# It will return only these service's status. -# 2. Using check_status, you can get all the above mentioned features -# along with a boolean mentioning whether the contrail-status is -# clean with no non-active or duplicate actives or not -# Usage 1: -# nodes = ['10.204.217.7', '10.204.216.72'] -# includeservice = -# {'10.204.216.72': 'supervisor-vrouter,contrail-vrouter-agent', -# '10.204.217.7': 'supervisor-control,contrail-control'} -# (boolval, ret) = self.stat.check_status(nodes, includeservice) -# The return value in boolval will be False if error present, True -# otherwise and in ret will be a list of dict -# specifying node, service and error for that node and service -# if present: -# boolval = False -# ret = -# [{'Error': 'contrail-svc-monitor inactive \r', -# 'Node': '10.204.216.72', -# 'Service': 'contrail-svc-monitor'}, -# {'Error': 'contrail-schema inactive \r', -# 'Node': '10.204.217.11', -# 'Service': 'contrail-schema'}] -# Usage 2: -# nodes = ['10.204.217.7', '10.204.216.72'] -# ret = self.stat.get_status(nodes) -# The return value in ret will be a list of dict specifying node, -# service and error for that node and service if present: -# ret = -# [{'Error': 'contrail-svc-monitor inactive \r', -# 'Node': '10.204.216.72', -# 'Service': 'contrail-svc-monitor'}, -# {'Error': 'contrail-schema inactive \r', -# 'Node': '10.204.217.11', -# 'Service': 'contrail-schema'}] -# 3. Using wait_till_contrail_cluster_stable, you can get all -# the above mentioned features -# along with a boolean mentioning whether the contrail-status -# is clean with no non-active or duplicate actives or not -# and along with a delay(default = 300sec) -# Usage 1: -# nodes = ['10.204.217.7', '10.204.216.72'] -# delay = 20(default value = 10) -# tries = 50(default value = 30) -# (so delay in this case will be = 20*50 seconds) -# includeservice = -# {'10.204.216.72': 'supervisor-vrouter,contrail-vrouter-agent', -# '10.204.217.7': 'supervisor-control,contrail-control'} -# (boolval, ret) = self.stat.wait_till_contrail_cluster_stable(nodes, includeservice, delay, tries) -# The return value in boolval will be False if error present, True -# otherwise and in ret will be a list of dict -# specifying node, service and error for that node and service -# if present: -# boolval = False -# ret = -# [{'Error': 'contrail-svc-monitor inactive \r', -# 'Node': '10.204.216.72', -# 'Service': 'contrail-svc-monitor'}, -# {'Error': 'contrail-schema inactive \r', -# 'Node': '10.204.217.11', -# 'Service': 'contrail-schema'}] -# Usage 2: -# nodes = ['10.204.217.7', '10.204.216.72'] -# delay = 50 -# (so delay in this case will be = 50*30 seconds) -# ret = self.stat.wait_till_contrail_cluster_stable(nodes, delay) -# The return value in ret will be a list of dict specifying node, -# service and error for that node and service if present: -# ret = -# [{'Error': 'contrail-svc-monitor inactive \r', -# 'Node': '10.204.216.72', -# 'Service': 'contrail-svc-monitor'}, -# {'Error': 'contrail-schema inactive \r', -# 'Node': '10.204.217.11', -# 'Service': 'contrail-schema'}] - -import re -import time -from common.contrail_test_init import * - - -class Constatuscheck: - - '''Tool to get contrail status - - Mandatory: - None - - Optional: - :nodes : nodes to check the status for - :includeservice : check services included in includeservice only - - ''' - - def __init__(self, inputs=None): - if not inputs: - sanity_params = os.environ.get( - 'TEST_CONFIG_FILE') or 'sanity_params.ini' - self.inputs = ContrailTestInit(sanity_params) - self.inputs.read_prov_file() - - def get_status(self, nodes=[], includeservice={}): - # Command used is contrail-status -x - cmd = 'contrail-status -x' - # initialize variables to be used during run - self.keys = ['Node', 'Service', 'Error'] - errlist = [] - skip_status = ['initializing', 'inactive', 'failed', 'timeout'] - single_active_services = {'contrail-schema': None, - 'contrail-svc-monitor': None, - 'contrail-device-manager': None} - # Get nodes from host_ips if not passed from test script - if not nodes: - nodes = self.inputs.host_ips - for node in nodes: - self.inputs.logger.debug( - 'Executing %s command on node %s to check for contrail-status' % (cmd, node)) - # run command on each node - output = self.inputs.run_cmd_on_server(node, cmd) - for line in output.split("\n"): - status = None - service_status = line.split() - try: - service = service_status[0] - except IndexError: - service = None - if len(service_status) == 2: - status = service_status[1].strip() - # Variable "status" has the status for corresponding - # service - if not status: - continue - if ((status == "active") or (status == "backup")): - if service in single_active_services.keys(): - self.add_node_to_all_active_servers( - includeservice, node, single_active_services, service, status) - # Check for the status being one of the 4 "erroneous" - # statuses and update the list - if (status in skip_status): - if (includeservice): - self.update_error_if_includeservice_present( - node, includeservice, service, errlist, line, output) - else: - self.update_error_if_includeservice_not_present( - node, includeservice, service, errlist, line, output) - - # check if any of the 3 services in - # single_active_services defined above - # have more than 1 "active" status nodes - if single_active_services: - for individual_service in single_active_services: - if (single_active_services[individual_service].count('active')) > 1: - single_nodes = re.findall( - '([0-9.]+)-active', single_active_services[individual_service]) - individual_service_error = [ - single_nodes, individual_service, - 'multiple actives found for this service'] - errlist.append( - dict(zip(self.keys, individual_service_error))) - - return errlist - - def check_status(self, nodes=[], includeservice={}): - # get status and return with a boolean too - returndict = self.get_status( - nodes=nodes, includeservice=includeservice) - if returndict: - return (False, returndict) - else: - return (True, returndict) - - def wait_till_contrail_cluster_stable(self, nodes=[], includeservice={}, delay=10, tries=30): - # Wait until the contrail-status shows stability across - # all the nodes - for i in range(0, tries): - returndict = self.get_status( - nodes=nodes, includeservice=includeservice) - if returndict: - self.inputs.logger.debug( - 'Not all services up. Sleeping for %s seconds. Present iteration number : %s' % (delay, i)) - time.sleep(delay) - continue - else: - self.inputs.logger.info( - 'Contrail cluster seems stable') - return (True, returndict) - - self.inputs.logger.error( - 'Not all services up , Gave up!') - if returndict: - return (False, returndict) - else: - return (True, returndict) - - def add_node_to_all_active_servers(self, includeservice, node, single_active_services, service, status): - # add to single_active_services list if any - # active or backup is present. The check for multiple actives in - # cluster is taken care later - if (includeservice): - if node in includeservice.keys(): - if service in includeservice[node]: - if single_active_services[service]: - if not(node in single_active_services[service]): - # if its the correct node and service, add - single_active_services[ - service] = '-'.join([x for x in (single_active_services[service], node, status) if x]) - else: - single_active_services[ - service] = '-'.join([x for x in (single_active_services[service], node, status) if x]) - # if includeservice not present - else: - if single_active_services[service]: - if not(node in single_active_services[service]): - single_active_services[ - service] = '-'.join([x for x in (single_active_services[service], node, status) if x]) - else: - single_active_services[ - service] = '-'.join([x for x in (single_active_services[service], node, status) if x]) - - def update_error_if_includeservice_present(self, node, includeservice, service, errlist, line, output): - # Check for "erroneous" statuses and update the errlist accordingly - if node in includeservice: - if service in includeservice[node]: - skip_this_variable = 0 - for err in errlist: - indline = json.dumps({'id': err}) - if ((node in indline) and (service in indline)): - # tag which node and service to not skip - skip_this_variable = 1 - if not skip_this_variable: - errorline = output.split( - "\n")[output.split("\n").index(line) + 1] - if (self.check_if_erroneous_status_present(errorline)): - # sometimes the 'line+1' might have the next service, - # in which case 'line' is what we want - errorline = line - sererror = [node, service, errorline] - errlist.append(dict(zip(self.keys, sererror))) - - def update_error_if_includeservice_not_present(self, node, includeservice, service, errlist, line, output): - # Check for "erroneous" statuses and update the errlist accordingly - skip_this_variable = 0 - for err in errlist: - indline = json.dumps({'id': err}) - if ((node in indline) and (service in indline)): - # tag which node and service to not skip - skip_this_variable = 1 - if not skip_this_variable: - errorline = output.split("\n")[output.split("\n").index(line) + 1] - if (self.check_if_erroneous_status_present(errorline)): - # sometimes the 'line+1' might have the next service, in which - # case 'line' is what we want - errorline = line - sererror = [node, service, errorline] - errlist.append(dict(zip(self.keys, sererror))) - - def check_if_erroneous_status_present(self, errorline): - # check for the "erroneous" statuses and return True or False - if (('initializing' in errorline) or ('inactive' in errorline) or ('failed' in errorline) or ('timeout' in errorline) or ('active' in errorline) or ('backup' in errorline) or ('contrail-' in errorline) or ('supervisor-' in errorline) or ('rabbitmq-' in errorline) or ('ifmap' in errorline)): - return True - else: - return False diff --git a/tcutils/contrailtestrunner.py b/tcutils/contrailtestrunner.py deleted file mode 100644 index 5c3d08761..000000000 --- a/tcutils/contrailtestrunner.py +++ /dev/null @@ -1,106 +0,0 @@ -"""Customized HTMLTestTunner - 1. Customized for adding core count in the test report. - Can be extended when required. -""" -import re -import sys -import datetime - -from HTMLTestRunner import HTMLTestRunner, _TestResult - - -class ContrailTestResult(_TestResult): - - """Contrail Test Result with overrided addFailure and addError - callback to add core count in the test report. - """ - - def __init__(self, verbosity=1): - super(ContrailTestResult, self).__init__(verbosity) - self.core_count = 0 - self.crash_count = 0 - self.core_list = [] - - def addFailure(self, test, err): - self.addCores(err) - self.addCrashes(err) - super(ContrailTestResult, self).addFailure(test, err) - - def addError(self, test, err): - self.addCores(err) - self.addCrashes(err) - super(ContrailTestResult, self).addError(test, err) - - def addCores(self, err): - """Add core count when addFailure or addError callbacks is called by - unittest.Result. - """ - # Change this pattern if output format in tcutils.wrapper.py is - # changed. - match = re.search("Cores found\(([0-9]+)\): \{([\S ]+)\}", str(err[1])) - if match: - self.core_count += int(match.group(1)) - core_list = match.group(2) - core_list = re.findall(r'core.\S+.[0-9]+.\S+.[0-9]+', core_list) - self.core_list += core_list - - def addCrashes(self, err): - """Add crash count when addFailure or addError callbacks is called by - unittest.Result. - """ - # Change this pattern if output format in tcutils.wrapper.py is - # changed. - match = re.search("Contrail service crashed\(([0-9]+)\)", str(err[1])) - if match: - self.crash_count += int(match.group(1)) - - -class ContrailHTMLTestRunner(HTMLTestRunner): - - """Contrail HTML Test runner with overrided getReportAttributes - and run method to customize the test report. - """ - - def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None): - super(ContrailHTMLTestRunner, self).__init__( - stream, verbosity, title, description) - - def run(self, test): - """Run the given test case or test suite. - Pass customized ContrailTestResult. - """ - result = ContrailTestResult(self.verbosity) - test(result) - self.stopTime = datetime.datetime.now() - self.generateReport(test, result) - print >>sys.stderr, '\nTime Elapsed: %s' % ( - self.stopTime - self.startTime) - return result - - def getReportAttributes(self, result): - """ - Return report attributes as a list of (name, value) - along with core information. - """ - attributes = super(ContrailHTMLTestRunner, - self).getReportAttributes(result) - if not result.core_count and not result.crash_count: - return attributes - - status = [] - for key, val in attributes: - if key == "Status": - attributes.remove((key, val)) - if val != 'none': - status.append(val) - - if result.core_count: - status.append('Cores %s' % result.core_count) - if result.crash_count: - status.append('Crashes %s' % result.crash_count) - status = ' '.join(status) - result.core_list = ', '.join(result.core_list) - attributes.append(('Status', status)) - attributes.append(('Cores List', result.core_list)) - - return attributes diff --git a/tcutils/control/__init__.py b/tcutils/control/__init__.py deleted file mode 100644 index e1c50e06f..000000000 --- a/tcutils/control/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Contorl-node Utils""" diff --git a/tcutils/control/cn_introspect_utils.py b/tcutils/control/cn_introspect_utils.py deleted file mode 100644 index 0bba50d87..000000000 --- a/tcutils/control/cn_introspect_utils.py +++ /dev/null @@ -1,278 +0,0 @@ -import logging as LOG -from lxml import etree -import re -from tcutils.verification_util import * -from tcutils.util import is_v6 -from netaddr import IPNetwork, AddrFormatError - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - - -class ControlNodeInspect (VerificationUtilBase): - - def __init__(self, ip, logger=LOG): - super(ControlNodeInspect, self).__init__(ip, 8083, XmlDrv, - logger=logger) - - def _join(self, *args): - """Joins the args with ':'""" - return ':'.join(args) - - def _get_if_map_table_entry(self, match): - d = None - #Changes to paging will require fetching particular element rather than entire data - table_name = re.match('(\S+?):', match) - new_table_req = 'Snh_IFMapTableShowReq?table_name=' + table_name.group(1) + '&search_string=' + match - p = self.dict_get(new_table_req) - xp = p.xpath('./IFMapTableShowResp/ifmap_db/list/IFMapNodeShowInfo') - if not xp: - # sometime ./xpath dosen't work; work around - # should debug to find the root cause. - xp = p.xpath('/IFMapTableShowResp/ifmap_db/list/IFMapNodeShowInfo') - f = filter(lambda x: x.xpath('./node_name')[0].text == match, xp) - if 1 == len(f): - d = {} - for e in f[0]: - if e.tag != 'obj_info': - d[e.tag] = e.text - else: - od = e.xpath('./list/IFMapObjectShowInfo') - if od: - d[e.tag] = {} - for eod in od[0]: - if eod.tag != 'data': - d[e.tag][eod.tag] = eod.text - else: - d[e.tag][eod.tag] = {} - # Remove CDATA; if present - text = eod.text.replace( - "") - nxml = etree.fromstring(text) - for iqc in nxml: - if iqc.tag == 'virtual-DNS-data': - d[e.tag][eod.tag][iqc.tag] = {} - for dns in iqc: - d[e.tag][eod.tag][iqc.tag][ - dns.tag] = dns.text - if iqc.tag == 'virtual-DNS-record-data': - d[e.tag][eod.tag][iqc.tag] = {} - for dns in iqc: - d[e.tag][eod.tag][iqc.tag][ - dns.tag] = dns.text - if iqc.tag == 'id-perms': - d[e.tag][eod.tag][iqc.tag] = {} - for idpc in iqc: - if idpc.tag == 'permissions': - d[e.tag][eod.tag][iqc.tag][ - idpc.tag] = {} - for prm in idpc: - d[e.tag][eod.tag][iqc.tag][ - idpc.tag][prm.tag] = prm.text - elif idpc.tag == 'uuid': - d[e.tag][eod.tag][iqc.tag][ - idpc.tag] = {} - for prm in idpc: - d[e.tag][eod.tag][iqc.tag][ - idpc.tag][prm.tag] = prm.text - else: - d[e.tag][eod.tag][iqc.tag][ - idpc.tag] = idpc.text - - return d - - def get_if_map_peer_server_info(self, match=None): - d = None - try: - p = self.dict_get('Snh_IFMapPeerServerInfoReq?') - xpath = './IFMapPeerServerInfoResp/%s' % (match) - #import pdb;pdb.set_trace() - d = EtreeToDict(xpath).get_all_entry(p) - except Exception as e: - print e - finally: - return d - - def get_cn_domain(self, domain='default-domain'): - pass - - def get_cn_project(self, domain='default-domain', project='admin'): - pass - - def get_cn_vdns(self, vdns, domain='default-domain'): - m = 'virtual-DNS:' + domain + ':' + vdns - return self._get_if_map_table_entry(m) - - def get_cn_vdns_rec(self, vdns, rec_name, domain='default-domain'): - m = 'virtual-DNS-record:' + domain + ':' + vdns + ':' + rec_name - return self._get_if_map_table_entry(m) - - def get_cn_config_ipam(self, domain='default-domain', project='admin', ipam='default-network-ipam'): - m = 'network-ipam:' + domain + ':' + project + ':' + ipam - return self._get_if_map_table_entry(m) - - def get_cn_config_policy(self, domain='default-domain', project='admin', policy='default-network-policy'): - policy_name = 'network-policy:' + domain + ':' + project + ':' + policy - path = 'Snh_IFMapTableShowReq?table_name=network-policy&search_string=%s' % (policy_name) - xpath = './IFMapTableShowResp/ifmap_db/list/IFMapNodeShowInfo' - p = self.dict_get(path) - ifmap = EtreeToDict(xpath).get_all_entry(p) - if ifmap['node_name'] == policy_name: - return ifmap - - def get_cn_config_vn(self, domain='default-domain', project='admin', vn_name='default-virtual-network'): - m = 'virtual-network:' + domain + ':' + project + ':' + vn_name - return self._get_if_map_table_entry(m) - - def get_cn_config_fip_pool(self, domain='default-domain', project='admin', vn_name='default-virtual-network', fip_pool_name='default-floating-ip-pool'): - m = 'floating-ip-pool:' + domain + ':' + \ - project + ':' + vn_name + ':' + fip_pool_name - return self._get_if_map_table_entry(m) - - def get_cn_routing_instance(self, ri_name): - '''Returns a routing instance dictionary. - ''' - path = 'Snh_ShowRoutingInstanceReq?name=%s' % ri_name - xpath = '/ShowRoutingInstanceResp/instances/list/ShowRoutingInstance' - p = self.dict_get(path) - return EtreeToDict(xpath).get_all_entry(p) - - def get_cn_routing_instance_list(self): - '''Returns a list of routing instance dictionaries. - ''' - path = 'Snh_ShowRoutingInstanceReq' - xpath = '/ShowRoutingInstanceResp/instances/list/ShowRoutingInstance' - p = self.dict_get(path) - return EtreeToDict(xpath).get_all_entry(p) - - def get_cn_route_table(self, ri_name): - '''Returns a routing table dictionary of a specifc routing instance, - includes both the unicast and multicast table. - ''' - path = 'Snh_ShowRouteReq?name=%s' % ri_name - xpath = '/ShowRouteResp/tables/list/ShowRouteTable' - p = self.dict_get(path) - return EtreeToDict(xpath).get_all_entry(p) - - def get_cn_rtarget_group(self, route_target): - '''Returns the dictionary of the rtarget_group. - ''' - path = 'Snh_ShowRtGroupReq?' - xpath = '/ShowRtGroupResp/rtgroup_list/list/ShowRtGroupInfo' - p = self.dict_get(path) - rt = EtreeToDict(xpath).get_all_entry(p) - for r in rt: - if r['rtarget'] == route_target: - return r - - def get_cn_rtarget_table(self): - '''Returns the dictionary of the bgp.rtarget.0 table. - ''' - path = 'Snh_ShowRouteReq?x=bgp.rtarget.0' - xpath = '/ShowRouteResp/tables/list/ShowRouteTable' - p = self.dict_get(path) - rt = EtreeToDict(xpath).get_all_entry(p) - return rt['routes'] - - def get_cn_vpn_table(self, prefix): - result= True - path = 'Snh_ShowRouteReq?x=bgp.l3vpn-inet6.0' if is_v6(prefix) \ - else 'Snh_ShowRouteReq?x=bgp.l3vpn.0' - xpath = '/ShowRouteResp/tables/list/ShowRouteTable' - p = self.dict_get(path) - rt = EtreeToDict(xpath).get_all_entry(p) - for route in rt['routes']: - if prefix in route['prefix']: - result= True - break - else: - result= False - return result - - def get_cn_route_table_entry(self, prefix, ri_name, table=None): - '''Returns the route dictionary for requested prefix and routing instance. - ''' - try: - prefix = str(IPNetwork(prefix).network) + '/' + \ - str(IPNetwork(prefix).prefixlen) - except AddrFormatError: - pass - if not table: - table = 'inet6.0' if is_v6(prefix) else 'inet.0' - path = 'Snh_ShowRouteReq?x=%s.%s' % (ri_name, table) - xpath = '/ShowRouteResp/tables/list/ShowRouteTable' - p = self.dict_get(path) - rt = EtreeToDict(xpath).get_all_entry(p) - if type(rt) == type(dict()): - for route in rt['routes']: - if route['prefix'] == prefix: - return route['paths'] - else: - for entry in rt: - for route in entry['routes']: - if route['prefix'] == prefix: - return route['paths'] - - def get_cn_bgp_neigh_entry(self, encoding='All'): - '''Returns the route dictionary for requested prefix and routing instance. - ''' - path = 'Snh_BgpNeighborReq?domain=&ip_address=' - xpath = '/BgpNeighborListResp/neighbors/list/BgpNeighborResp' - p = self.dict_get(path) - rt = EtreeToDict(xpath).get_all_entry(p) - if encoding is 'All': - return rt - else: - parshed_rt = [] - for entry in rt: - if entry['encoding'] == encoding: - parshed_rt.append(entry) - return parshed_rt - - def policy_update(self, domain='default-domain', *arg): - pass - - def dissassociate_ip(self, domain='default-domain', *arg): - pass - - def get_cn_sec_grp(self, domain='default-domain', project='admin', secgrp='default'): - sec_name = 'security-group:' + domain + ':' + project + ':' + secgrp - path = 'Snh_IFMapTableShowReq?table_name=security_group&search_string=%s' % (sec_name) - xpath = './IFMapTableShowResp/ifmap_db/list/IFMapNodeShowInfo' - p = self.dict_get(path) - ifmaps = EtreeToDict(xpath).get_all_entry(p) - - if type(ifmaps) is dict and ifmaps.has_key('node_name') and ifmaps['node_name'] == sec_name: - return ifmaps - - if type(ifmaps) is list: - for ifmap in ifmaps: - if ifmap['node_name'] == sec_name: - return ifmap - - def get_cn_sec_grp_acls(self, domain='default-domain', project='admin', secgrp='default'): - sec_name = 'access-control-list:' + domain + ':' + project + ':' + secgrp - egress = sec_name + ':' + 'egress-access-control-list' - ingress = sec_name + ':' + 'ingress-access-control-list' - path = 'Snh_IFMapTableShowReq?table_name=access-control-list&search_string=%s' % (sec_name) - xpath = './IFMapTableShowResp/ifmap_db/list/IFMapNodeShowInfo' - acls_dict = {} - p = self.dict_get(path) - ifmaps = EtreeToDict(xpath).get_all_entry(p) - if type(ifmaps) is dict or (type(ifmaps) is list and len(ifmaps) != 2): - return False - - for ifmap in ifmaps: - if ifmap['node_name'] == egress: - acls_dict['egress-access-control-list'] = ifmap - if ifmap['node_name'] == ingress: - acls_dict['ingress-access-control-list'] = ifmap - - return acls_dict - -if __name__ == '__main__': - cn = ControlNodeInspect('10.84.14.9') - print "ipam", cn.get_cn_config_ipam() - print "policy", cn.get_cn_config_policy() - print "vn", cn.get_cn_config_vn() - print "vn", cn.get_cn_config_vn(vn_name="fvnn100") - print "fip_pool", cn.get_cn_config_fip_pool() diff --git a/tcutils/cores.py b/tcutils/cores.py deleted file mode 100644 index fcecaba36..000000000 --- a/tcutils/cores.py +++ /dev/null @@ -1,114 +0,0 @@ -""" Module to check and collect information about cores during the test.""" - -import sys -import traceback -import unittest -from functools import wraps - -from fabric.api import run, cd -from fabric.context_managers import settings, hide - -CORE_DIR = '/var/crashes' - - -class TestFailed(Exception): - pass - - -def get_node_ips(inputs): - """Get the list of nodes ip address in the test setup. - """ - node_ips = [] - nodes = ['cfgm_ips', 'bgp_ips', 'collector_ips', - 'webui_ips', 'compute_ips'] - if inputs.orchestrator == 'openstack': - nodes += ['openstack_ip'] - for node in nodes: - ip = getattr(inputs, node) - if type(ip) is str: - ip = [ip] - node_ips = list(set(node_ips).union(set(ip))) - return node_ips - -def get_cores(inputs): - '''Get the list of cores in all the nodes in the test setup - ''' - cores = {} - for host in inputs.host_ips: - username = inputs.host_data[host]['username'] - password = inputs.host_data[host]['password'] - core = get_cores_node(host, username, password) - if core: - cores.update({host: core.split()}) - # end for - return cores - - -def get_cores_node(node_ip, user, password): - """Get the list of cores in one of the nodes in the test setup. - """ - cores = {} - with hide('everything'): - with settings( - host_string='%s@%s' % (user, node_ip), password=password, - warn_only=True, abort_on_prompts=False): - with cd(CORE_DIR): - core = run("ls core.* 2>/dev/null") - return core - - -def find_new(initials, finals): - """Finds if new cores/crashes in any of the nodes in test setup. - """ - new = {} - for node, final in finals.items(): - if node in initials.keys(): - initial = initials[node] - diff = list(set(final).difference(set(initial))) - if not diff: - continue - new.update({node: diff}) - else: - new.update({node: final}) - - return new - -def get_service_crashes(inputs): - """Get the list of services crashed in all of the nodes in the test setup. - """ - crashes = {} - for node_ip in inputs.host_ips: - username = inputs.host_data[node_ip]['username'] - password = inputs.host_data[node_ip]['password'] - service_crash = get_service_crashes_node(node_ip, username, password) - if service_crash: - crashes.update({node_ip: service_crash}) - return crashes - - -def get_service_crashes_node(node_ip, user, password): - """Get the list of services crashed in one of the nodes in the test setup. - """ - crashes = {} - with hide('everything'): - with settings( - host_string='%s@%s' % (user, node_ip), password=password, - warn_only=True, abort_on_prompts=False): - crash = run("contrail-status") - services = [] - if "Failed service list" in crash: - for line in crash.split("\n"): - if "Failed service list" in line: - # dont iterate beyond this to look for service: status - break - status = None - service_status = line.split(":") - service = service_status[0] - if len(service_status) == 2: - status = service_status[1].strip() - if (status == "inactive" or - status == "failed"): - services.append(service) - - return services -# end get_service_crashes_node diff --git a/tcutils/custom_filehandler.py b/tcutils/custom_filehandler.py deleted file mode 100644 index e516704ae..000000000 --- a/tcutils/custom_filehandler.py +++ /dev/null @@ -1,30 +0,0 @@ -import logging -import subprocess -import os -import time - - -class CustomFileHandler(logging.FileHandler): -# def __init__( self, path='', fileName='test_details.log', mode='a', -# build_id='0000'): - - def __init__(self, fileName='test_details.log', mode='a', build_id='0000'): -# ts=time.strftime("%Y%m%d%H%M%S") -# path = path+"/log_%s_%s" %( build_id, ts ) -# super(CustomFileHandler,self).__init__(path+"/"+fileName,mode) - if 'SCRIPT_TS' in os.environ: - ts = os.environ.get('SCRIPT_TS') - else: - ts = '' - if 'BUILD_ID' in os.environ: - build_id = os.environ.get('BUILD_ID') - path = os.environ.get('HOME') + '/logs/' + build_id + '_' + ts - try: - os.mkdir(path) - except OSError: - subprocess.call('mkdir -p %s' % (path), shell=True) - fileName = path + '/' + fileName - print "\nLog file : %s \n" % (os.path.realpath(fileName)) -# super(CustomFileHandler,self).__init__(fileName,mode) - logging.FileHandler.__init__(self, fileName, mode) -# end customFileHandler diff --git a/tcutils/db.py b/tcutils/db.py deleted file mode 100644 index 38ccebb3c..000000000 --- a/tcutils/db.py +++ /dev/null @@ -1,551 +0,0 @@ -import os -import shelve -import filelock - -def fqname_to_str(fqname): - if type(fqname) is list: - fqname = ':'.join(fqname) - return fqname - -class TestDB(object): - def __init__(self, filename='/var/tmp/test.db'): - self.db_file = filename - if not os.path.exists(self.db_file): - shelve.open(self.db_file).close() - self.lock = filelock.FileLock(self.db_file+'.lock') - self.lock.timeout = 60 - - def open(self, mode): - if mode == 'read': - self.orig_db = shelve.open(self.db_file, flag='r') - else: - self.orig_db = shelve.open(self.db_file) - self.db = dict(self.orig_db) - - def close(self, mode): - if mode == 'write': - self.orig_db['projects'] = self.db.get('projects', {}) - self.orig_db['vdns'] = self.db.get('vdns', {}) - self.orig_db['fip-pool'] = self.db.get('fip-pool', {}) - self.orig_db['logical_router'] = self.db.get('logical_router', {}) - self.orig_db['load_balancer'] = self.db.get('load_balancer', {}) - self.orig_db.close() - - def read(f): - def wrapper(self, *args, **kwargs): - self.open(mode='read') - result = f(self, *args, **kwargs) - self.close(mode='read') - return result - return wrapper - - def write(f): - def wrapper(self, *args, **kwargs): - try: - with self.lock.acquire(): - self.open(mode='write') - result = f(self, *args, **kwargs) - self.close(mode='write') - return result - except filelock.Timeout: - print 'Unable to acquire lock on', self.db_file - raise - return wrapper - - def get_vdns_dict(self, fqname): - if 'vdns' not in self.db: - self.db['vdns'] = dict() - if fqname not in self.db['vdns']: - raise Exception('vDNS %s not found in db'%fqname) - return self.db['vdns'][fqname] - - @read - def get_vdns_id(self, fqname): - vdns = self.get_vdns_dict(fqname_to_str(fqname)) - return vdns['uuid'] - - @write - def set_vdns_id(self, fqname, uuid): - fqname = fqname_to_str(fqname) - try: - self.get_vdns_dict(fqname) - except: - self.db['vdns'][fqname] = dict() - self.db['vdns'][fqname]['uuid'] = uuid - - @write - def delete_vdns(self, fqname): - fqname = fqname_to_str(fqname) - if fqname in self.db['vdns']: - del self.db['vdns'][fqname] - - @read - def list_vdns(self): - return self.db['vdns'].keys() - - @write - def add_vdns_to_ipam(self, vdns_fqname, ipam_fqname): - self._add_vdns_to_ipam(self, vdns_fqname, ipam_fqname) - - def _add_vdns_to_ipam(self, vdns_fqname, ipam_fqname): - vdns = self.get_vdns_dict(fqname_to_str(vdns_fqname)) - if 'ipam_refs' not in vdns: - vdns['ipam_refs'] = [] - vdns['ipam_refs'].append(fqname_to_str(ipam_fqname)) - vdns['ipam_refs'] = list(set(vdns['ipam_refs'])) - - @write - def delete_vdns_from_ipam(self, vdns_fqname, ipam_fqname): - self._delete_vdns_from_ipam(self, vdns_fqname, ipam_fqname) - - def _delete_vdns_from_ipam(self, vdns_fqname, ipam_fqname): - vdns = self.get_vdns_dict(fqname_to_str(vdns_fqname)) - ipam_fqname = fqname_to_str(ipam_fqname) - if 'ipam_refs' not in vdns or ipam_fqname not in vdns['ipam_refs']: - return - vdns['ipam_refs'].remove(ipam_fqname) - - def get_project_dict(self, fqname): - if 'projects' not in self.db: - self.db['projects'] = dict() - if fqname not in self.db['projects']: - raise Exception('Project %s not found in db'%fqname) - return self.db['projects'][fqname] - - @read - def get_project_id(self, fqname): - proj = self.get_project_dict(fqname_to_str(fqname)) - return proj['uuid'] - - @write - def set_project_id(self, fqname, uuid): - fqname = fqname_to_str(fqname) - try: - self.get_project_dict(fqname) - except: - self.db['projects'][fqname] = dict() - self.db['projects'][fqname]['uuid'] = uuid - - @write - def delete_project(self, fqname): - fqname = fqname_to_str(fqname) - if fqname in self.db['projects']: - del self.db['projects'][fqname] - - @read - def list_projects(self): - return self.db['projects'].keys() - - def get_ipam_dict(self, fqname): - project_fqname = ':'.join(fqname.split(':')[:-1]) - project = self.get_project_dict(project_fqname) - if 'ipams' not in project: - project['ipams'] = dict() - if fqname not in project['ipams']: - project['ipams'][fqname] = dict() - return project['ipams'][fqname] - - @read - def get_ipam_id(self, fqname): - ipam = self.get_ipam_dict(fqname_to_str(fqname)) - return ipam['uuid'] - - @write - def add_ipam(self, fqname, uuid, vdns_fqname=None): - ipam = self.get_ipam_dict(fqname_to_str(fqname)) - ipam['uuid'] = uuid - self._add_vdns_to_ipam(fqname_to_str(vdns_fqname), fqname) - ipam['vdns'] = vdns_fqname - - @write - def delete_ipam(self, fqname): - fqname = fqname_to_str(fqname) - ipam = self.get_ipam_dict(fqname) - if 'vdns' in ipam: - self._delete_vdns_from_ipam(ipam['vdns'], fqname) - proj = self.get_project_dict(fqname_to_str(':'.join(fqname.split(':')[:-1]))) - if fqname in proj['ipams']: - del proj['ipams'][fqname] - - @read - def list_ipams(self, proj_fqname): - proj = self.get_project_dict(fqname_to_str(proj_fqname)) - if 'ipams' not in proj: - return [] - return proj['ipams'].keys() - - def get_vn_dict(self, fqname): - project_fqname = ':'.join(fqname.split(':')[:-1]) - project = self.get_project_dict(project_fqname) - if 'virtual-networks' not in project: - project['virtual-networks'] = dict() - if fqname not in project['virtual-networks']: - project['virtual-networks'][fqname] = dict() - return project['virtual-networks'][fqname] - - @read - def get_virtual_network_id(self, fqname): - vn = self.get_vn_dict(fqname_to_str(fqname)) - return vn['uuid'] - - @read - def get_virtual_network(self, fqname): - vn = self.get_vn_dict(fqname_to_str(fqname)) - return (vn['uuid'], vn['subnets']) - - @write - def add_virtual_network(self, fqname, uuid, subnets=None): - vn = self.get_vn_dict(fqname_to_str(fqname)) - vn['uuid'] = uuid - vn['subnets'] = subnets - - @write - def delete_virtual_network(self, fqname): - try: - fqname = fqname_to_str(fqname) - proj = self.get_project_dict(fqname_to_str(':'.join(fqname.split(':')[:-1]))) - if fqname in proj['virtual-networks']: - del proj['virtual-networks'][fqname] - except: - pass - - @write - def add_lr_to_vn(self, fqname, router_id): - vn = self.get_vn_dict(fqname_to_str(fqname)) - vn['router'] = router_id - - @write - def delete_lr_from_vn(self, fqname, router_id): - vn = self.get_vn_dict(fqname_to_str(fqname)) - if 'router' in vn and vn['router'] == router_id: - del vn['router'] - - @read - def list_virtual_networks(self, proj_fqname): - proj = self.get_project_dict(fqname_to_str(proj_fqname)) - if 'virtual-networks' not in proj: - return [] - return proj['virtual-networks'].keys() - - def get_vm_dict(self, name, project_fqname): - project = self.get_project_dict(project_fqname) - if 'virtual-machines' not in project: - project['virtual-machines'] = dict() - if name not in project['virtual-machines']: - project['virtual-machines'][name] = dict() - return project['virtual-machines'][name] - - @write - def add_virtual_machine(self, name, proj_fqname, uuid, vn_ids, username='ubuntu', password='ubuntu'): - vm = self.get_vm_dict(fqname_to_str(name), fqname_to_str(proj_fqname)) - vm['uuid'] = uuid - vm['vns'] = vn_ids - vm['username'] = username - vm['password'] = password - - @write - def associate_fip_to_vm(self, name, proj_fqname, fip_id): - vm = self.get_vm_dict(fqname_to_str(name), fqname_to_str(proj_fqname)) - if not 'fip_ids' in vm: - vm['fip_ids'] = list() - vm['fip_ids'].append(fip_id) - vm['fip_ids'] = list(set(vm['fip_ids'])) - - @write - def disassociate_fip_from_vm(self, name, proj_fqname, fip_id): - vm = self.get_vm_dict(fqname_to_str(name), fqname_to_str(proj_fqname)) - if not 'fip_ids' in vm: - return - vm['fip_ids'].remove(fip_id) - - @read - def get_fip_id_assoc(self, name, proj_fqname): - vm = self.get_vm_dict(fqname_to_str(name), fqname_to_str(proj_fqname)) - return vm.get('fip_ids', []) - - @write - def delete_virtual_machine(self, name, proj_fqname): - proj = self.get_project_dict(fqname_to_str(proj_fqname)) - if name in proj['virtual-machines']: - del proj['virtual-machines'][name] - - @read - def get_virtual_machine(self, name, proj_fqname): - vm = self.get_vm_dict(fqname_to_str(name), fqname_to_str(proj_fqname)) - return (vm['uuid'], vm['vns'], vm['username'], vm['password']) - - @read - def get_creds_of_vm(self, vm_id, proj_fqname): - proj = self.get_project_dict(fqname_to_str(proj_fqname)) - for vm_name in proj['virtual-machines'].keys(): - vm = self.get_vm_dict(vm_name, fqname_to_str(proj_fqname)) - if vm_id == vm['uuid']: - return (vm['username'], vm['password']) - return (None, None) - - @read - def list_virtual_machines(self, proj_fqname): - proj = self.get_project_dict(fqname_to_str(proj_fqname)) - if 'virtual-machines' not in proj: - return [] - return proj['virtual-machines'].keys() - - @read - def list_vms_in_vn(self, vn_id, proj_fqname): - vms = list() - proj = self.get_project_dict(fqname_to_str(proj_fqname)) - if not proj.get('virtual-machines', None): - return vms - for vm_name in proj['virtual-machines'].keys(): - if vn_id in proj['virtual-machines'][vm_name]['vns']: - vm_dict = self.get_vm_dict(vm_name, fqname_to_str(proj_fqname)) - vms.append(vm_dict['uuid']) - return vms - - def get_fip_pool_dict(self, fqname): - if 'fip-pool' not in self.db: - self.db['fip-pool'] = dict() - if fqname not in self.db['fip-pool']: - raise Exception('Fip-Pool %s not found in db'%fqname) - return self.db['fip-pool'][fqname] - - @write - def add_fip_pool(self, fqname, uuid): - fqname = fqname_to_str(fqname) - try: - self.get_fip_pool_dict(fqname) - except: - self.db['fip-pool'][fqname] = dict() - self.db['fip-pool'][fqname]['uuid'] = uuid - - fip_dict = self.get_fip_pool_dict(fqname_to_str(fqname)) - fip_dict['uuid'] = uuid - - @read - def get_fip_pool_id(self, fqname): - fip_dict = self.get_fip_pool_dict(fqname_to_str(fqname)) - return fip_dict['uuid'] - - @write - def delete_fip_pool(self, fqname): - fqname = fqname_to_str(fqname) - if fqname in self.db['fip-pool']: - del self.db['fip-pool'][fqname] - - @read - def list_fip_pools(self): - return self.db['fip-pool'].keys() - - @read - def get_fips(self, fqname): - fip_dict = self.get_fip_pool_dict(fqname_to_str(fqname)) - return fip_dict.get('fip_ids', []) - - @write - def add_fip(self, fqname, fip_id): - fip_dict = self.get_fip_pool_dict(fqname_to_str(fqname)) - if 'fip_ids' not in fip_dict: - fip_dict['fip_ids'] = list() - fip_dict['fip_ids'].append(fip_id) - fip_dict['fip_ids'] = list(set(fip_dict['fip_ids'])) - - @write - def delete_fip(self, fqname, fip_id): - fip_dict = self.get_fip_pool_dict(fqname_to_str(fqname)) - if 'fip_ids' not in fip_dict: - return - fip_dict['fip_ids'].remove(fip_id) - - @read - def find_fip_pool_id(self, fip_id): - for fqname, value in self.db['fip-pool'].iteritems(): - if fip_id in value['fip_ids']: - return value['uuid'] - - def get_logical_router_dict(self, fqname): - if 'logical_router' not in self.db: - self.db['logical_router'] = dict() - if fqname not in self.db['logical_router']: - raise Exception('Logical router %s not found in db'%fqname) - return self.db['logical_router'][fqname] - - @write - def add_logical_router(self, fqname, uuid): - fqname = fqname_to_str(fqname) - try: - self.get_logical_router_dict(fqname) - except: - self.db['logical_router'][fqname] = dict() - self.db['logical_router'][fqname]['uuid'] = uuid - - logical_router = self.get_logical_router_dict(fqname_to_str(fqname)) - logical_router['uuid'] = uuid - - @read - def get_logical_router_id(self, fqname): - logical_router = self.get_logical_router_dict(fqname_to_str(fqname)) - return logical_router['uuid'] - - @write - def delete_logical_router(self, fqname): - fqname = fqname_to_str(fqname) - if fqname in self.db['logical_router']: - del self.db['logical_router'][fqname] - - @read - def list_logical_routers(self): - return self.db['logical_router'].keys() - - @read - def get_vns_of_lr(self, fqname): - logical_router = self.get_logical_router_dict(fqname_to_str(fqname)) - return logical_router.get('vns', []) - - @write - def add_vn_to_lr(self, fqname, vn_id): - logical_router = self.get_logical_router_dict(fqname_to_str(fqname)) - if 'vns' not in logical_router: - logical_router['vns'] = list() - logical_router['vns'].append(vn_id) - logical_router['vns'] = list(set(logical_router['vns'])) - - @write - def delete_vn_from_lr(self, fqname, vn_id): - logical_router = self.get_logical_router_dict(fqname_to_str(fqname)) - if 'vns' not in logical_router or vn_id not in logical_router['vns']: - return - logical_router['vns'].remove(vn_id) - - @write - def get_gw_of_lr(self, fqname): - logical_router = self.get_logical_router_dict(fqname_to_str(fqname)) - return logical_router.get('gw', None) - - @write - def set_gw_to_lr(self, fqname, vn_id): - logical_router = self.get_logical_router_dict(fqname_to_str(fqname)) - logical_router['gw'] = vn_id - - @write - def clear_gw_from_lr(self, fqname): - logical_router = self.get_logical_router_dict(fqname_to_str(fqname)) - if 'gw' in logical_router: - del logical_router['gw'] - - def get_load_balancer_dict(self, fqname): - if 'load_balancer' not in self.db: - self.db['load_balancer'] = dict() - if fqname not in self.db['load_balancer']: - raise Exception('Load balancer %s not found in db'%fqname) - return self.db['load_balancer'][fqname] - - @write - def add_load_balancer(self, fqname, uuid): - fqname = fqname_to_str(fqname) - try: - self.get_load_balancer_dict(fqname) - except: - self.db['load_balancer'][fqname] = dict() - self.db['load_balancer'][fqname]['uuid'] = uuid - - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - load_balancer['uuid'] = uuid - - @write - def delete_load_balancer(self, fqname): - fqname = fqname_to_str(fqname) - if fqname in self.db['load_balancer']: - del self.db['load_balancer'][fqname] - - @read - def get_load_balancer_id(self, fqname): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - return load_balancer['uuid'] - - @write - def set_vip_to_lb(self, fqname, vip_id): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - load_balancer['vip'] = vip_id - - @write - def clear_vip_from_lb(self, fqname): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - if 'vip' in load_balancer: - del load_balancer['vip'] - - @read - def get_vip_of_lb(self, fqname): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - return load_balancer.get('vip', None) - - @write - def set_fip_on_vip(self, fqname, fip): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - load_balancer['fip'] = fip - - @write - def clear_fip_on_vip(self, fqname): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - if 'fip' in load_balancer: - del load_balancer['fip'] - - @read - def get_fip_on_vip(self, fqname): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - return load_balancer.get('fip', None) - - @write - def add_member_to_lb(self, fqname, member_id): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - if 'members' not in load_balancer: - load_balancer['members'] = list() - load_balancer['members'].append(member_id) - load_balancer['members'] = list(set(load_balancer['members'])) - - @write - def delete_member_from_lb(self, fqname, member_id): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - if 'members' not in load_balancer or member_id not in load_balancer['members']: - return - load_balancer['members'].remove(member_id) - - @read - def get_members_of_lb(self, fqname): - load_balancer = self.get_load_balancer_dict(fqname_to_str(fqname)) - return load_balancer.get('members', []) - - @read - def list_load_balancer(self): - return self.db['load_balancer'].keys() - - @read - def dump(self): - print self.db - -def main(): -# db = TestDB('db.1') - db = TestDB('/root/test.db.scale.load_balance') -# db.dump() -# l = list() -# for entry in db.list_logical_routers(): -# router_name = entry.split(':')[2] -# if router_name in l: -# import pdb; pdb.set_trace() -# print router_name -# l.append(router_name) -# if db.get_logical_router_id(entry) == '22c7f61b-aec3-4787-9dbf-98bee9f030fc': -# import pdb; pdb.set_trace() - for entry in db.list_fip_pools(): - print entry - fips = db.get_fips(entry) - import pdb; pdb.set_trace() - exit(0) - db.delete_project('default-domain:TestProject-LBaas') - db.set_project_id('default-domain:db-test', 123) - print db.get_project_id('default-domain:db-test') - db.add_virtual_network('default-domain:db-test:db-vn', 1234) - print db.get_virtual_network_id('default-domain:db-test:db-vn') - print db.list_virtual_networks('default-domain:db-test') - -if __name__ == "__main__": - main() diff --git a/tcutils/fabfile.py b/tcutils/fabfile.py deleted file mode 100644 index e6dfe116f..000000000 --- a/tcutils/fabfile.py +++ /dev/null @@ -1,162 +0,0 @@ -from fabric.operations import sudo, run, get, put, env -import paramiko -import time -import ncclient -import json -env.command_timeout = 120 - - -def sudo_command(cmd): - sudo(cmd) - - -def command(cmd): - run(cmd) - - -def fput(src, dest): - put(src, dest) - - -def retry(tries=5, delay=3): - '''Retries a function or method until it returns True. - delay sets the initial delay in seconds. - ''' - - if tries < 0: - raise ValueError("tries must be 0 or greater") - - if delay < 0: - raise ValueError("delay must be 0 or greater") - - def deco_retry(f): - def f_retry(*args, **kwargs): - mtries, mdelay = tries, delay # make mutable - - result = f(*args, **kwargs) # first attempt - rv = result - if type(result) is tuple: - rv = result[0] - while mtries > 0: - if rv == "True": # Done on success - if type(result) is tuple: - return ("True", result[1]) - return "True" - mtries -= 1 # consume an attempt - time.sleep(mdelay) # wait... - - result = f(*args, **kwargs) # Try again - rv = result - if type(result) is tuple: - rv = result[0] - if not "True" in rv: - if type(result) is tuple: - return ("False", result[1]) - return "False" # Ran out of tries :-( - else: - if type(result) is tuple: - return ("True", result[1]) - return "True" - - return f_retry # true decorator -> decorated function - return deco_retry # @retry(arg[, ...]) -> true decorator -# end retry - - -@retry(delay=5, tries=20) -def wait_for_ssh(timeout=5): - ip = env.host - try: - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect(ip, username=env.user, - password=env.password, timeout=timeout) - # In some virtual clusters, client.connect passed, but later SSH cmds - # failed. So, better run a ssh cmd here itself before proceeding - client.exec_command('ls > /dev/null') - client.close() - except Exception, e: - client.close() - return "False" - print "True" - return "True" -# end wait_for_ssh - - -@retry(delay=5, tries=2) -def verify_socket_connection(port=22): - import socket - host = env.host - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - s.settimeout(2) - s.connect((host, int(port))) - s.shutdown(2) - print "Port %s reachable for host %s" % (str(port), host) - print "True" - return "True" - except socket.error as e: - print "Error on connect: %s" % e - print "Port %s NOT reachable for host %s" % (host, str(port)) - print "False" - return "False" - s.close() -# end verify_socket_connection - -#@retry(delay=5, tries=20) - - -def get_via_netconf(cmd, timeout=10, device='junos', hostkey_verify="False", format='text'): - if hostkey_verify == 'False': - hostkey_verify = bool(False) - timeout = int(timeout) - if device == 'junos': - device_params = {'name': 'junos'} - ip = env.host - from ncclient import manager - try: - conn = manager.connect(host=str(ip), username=env.user, password=env.password, - timeout=timeout, device_params=device_params, hostkey_verify=hostkey_verify) - get_config = conn.command(command=str(cmd), format=format) - if format == 'json': - op = json.loads(get_config._NCElement__root.text) - else: - op = get_config.tostring - print op - return "True" - except Exception, e: - return "False" -# end get_via_netconf - -#@retry(delay=5, tries=20) - - -def config_via_netconf(cmd_string, timeout=10, device='junos', hostkey_verify="False"): - if hostkey_verify == 'False': - hostkey_verify = bool(False) - timeout = int(timeout) - if device == 'junos': - device_params = {'name': 'junos'} - cmdList = cmd_string.split(';') - ip = env.host - from ncclient import manager - try: - conn = manager.connect(host=str(ip), username=env.user, password=env.password, - timeout=timeout, device_params=device_params, hostkey_verify=hostkey_verify) - conn.lock() - send_config = conn.load_configuration(action='set', config=cmdList) - print send_config.tostring - check_config = conn.validate() - print check_config.tostring - compare_config = conn.compare_configuration() - print compare_config.tostring - conn.commit() - if 'family mpls mode packet-based' in cmd_string: - conn.reboot() - conn.unlock() - conn.close_session() - print compare_config.tostring - return "True" - except Exception, e: - return "False" -# end config_via_netconf diff --git a/tcutils/filelock.py b/tcutils/filelock.py deleted file mode 100644 index cc0147196..000000000 --- a/tcutils/filelock.py +++ /dev/null @@ -1,385 +0,0 @@ -# This is free and unencumbered software released into the public domain. -# -# Anyone is free to copy, modify, publish, use, compile, sell, or -# distribute this software, either in source code form or as a compiled -# binary, for any purpose, commercial or non-commercial, and by any -# means. -# -# In jurisdictions that recognize copyright laws, the author or authors -# of this software dedicate any and all copyright interest in the -# software to the public domain. We make this dedication for the benefit -# of the public at large and to the detriment of our heirs and -# successors. We intend this dedication to be an overt act of -# relinquishment in perpetuity of all present and future rights to this -# software under copyright law. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# For more information, please refer to - -""" -A platform independent file lock that supports the with-statement. -""" - - -# Modules -# ------------------------------------------------ -import os -import threading -import time -try: - import warnings -except ImportError: - warnings = None - -try: - import msvcrt -except ImportError: - msvcrt = None - -try: - import fcntl -except ImportError: - fcntl = None - - -# Backward compatibility -# ------------------------------------------------ -try: - TimeoutError -except NameError: - TimeoutError = OSError - - -# Data -# ------------------------------------------------ -__all__ = ["Timeout", "FileLock"] -__version__ = "2.0.1" - - -# Exceptions -# ------------------------------------------------ -class Timeout(TimeoutError): - """ - Raised when the lock could not be acquired in *timeout* - seconds. - """ - - def __init__(self, lock_file): - """ - """ - #: The path of the file lock. - self.lock_file = lock_file - return None - - def __str__(self): - temp = "The file lock '{}' could not be acquired."\ - .format(self.lock_file) - return temp - - -# Classes -# ------------------------------------------------ -class BaseFileLock(object): - """ - Implements the base class of a file lock. - """ - - def __init__(self, lock_file, timeout = -1): - """ - """ - # The path to the lock file. - self._lock_file = lock_file - - # The file descriptor for the *_lock_file* as it is returned by the - # os.open() function. - # This file lock is only NOT None, if the object currently holds the - # lock. - self._lock_file_fd = None - - # The default timeout value. - self.timeout = timeout - - # We use this lock primarily for the lock counter. - self._thread_lock = threading.Lock() - - # The lock counter is used for implementing the nested locking - # mechanism. Whenever the lock is acquired, the counter is increased and - # the lock is only released, when this value is 0 again. - self._lock_counter = 0 - return None - - @property - def lock_file(self): - """ - The path to the lock file. - """ - return self._lock_file - - @property - def timeout(self): - """ - You can set a default timeout for the filelock. It will be used as - fallback value in the acquire method, if no timeout value (*None*) is - given. - - If you want to disable the timeout, set it to a negative value. - - A timeout of 0 means, that there is exactly one attempt to acquire the - file lock. - - .. versionadded:: 2.0.0 - """ - return self._timeout - - @timeout.setter - def timeout(self, value): - """ - """ - self._timeout = float(value) - return None - - # Platform dependent locking - # -------------------------------------------- - - def _acquire(self): - """ - Platform dependent. If the file lock could be - acquired, self._lock_file_fd holds the file descriptor - of the lock file. - """ - raise NotImplementedError() - - def _release(self): - """ - Releases the lock and sets self._lock_file_fd to None. - """ - raise NotImplementedError() - - # Platform independent methods - # -------------------------------------------- - - @property - def is_locked(self): - """ - True, if the object holds the file lock. - - .. versionchanged:: 2.0.0 - - This was previously a method and is now a property. - """ - return self._lock_file_fd is not None - - def acquire(self, timeout=None, poll_intervall=0.05): - """ - Acquires the file lock or fails with a :exc:`Timeout` error. - - .. code-block:: python - - # You can use this method in the context manager (recommended) - with lock.acquire(): - pass - - # Or you use an equal try-finally construct: - lock.acquire() - try: - pass - finally: - lock.release() - - :arg float timeout: - The maximum time waited for the file lock. - If ``timeout <= 0``, there is no timeout and this method will - block until the lock could be acquired. - If ``timeout`` is None, the default :attr:`~timeout` is used. - - :arg float poll_intervall: - We check once in *poll_intervall* seconds if we can acquire the - file lock. - - :raises Timeout: - if the lock could not be acquired in *timeout* seconds. - - .. versionchanged:: 2.0.0 - - This method returns now a *proxy* object instead of *self*, - so that it can be used in a with statement without side effects. - """ - # Use the default timeout, if no timeout is provided. - if timeout is None: - timeout = self.timeout - - # Increment the number right at the beginning. - # We can still undo it, if something fails. - with self._thread_lock: - self._lock_counter += 1 - - try: - start_time = time.time() - while True: - with self._thread_lock: - if not self.is_locked: - self._acquire() - - if self.is_locked: - break - elif timeout >= 0 and time.time() - start_time > timeout: - raise Timeout(self._lock_file) - else: - time.sleep(poll_intervall) - except: - # Something did go wrong, so decrement the counter. - with self._thread_lock: - self._lock_counter = max(0, self._lock_counter - 1) - - raise - - # This class wraps the lock to make sure __enter__ is not called - # twiced when entering the with statement. - # If we would simply return *self*, the lock would be acquired again - # in the *__enter__* method of the BaseFileLock, but not released again - # automatically. - class ReturnProxy(object): - - def __init__(self, lock): - self.lock = lock - return None - - def __enter__(self): - return self.lock - - def __exit__(self, exc_type, exc_value, traceback): - self.lock.release() - return None - - return ReturnProxy(lock = self) - - def release(self, force = False): - """ - Releases the file lock. - - Please note, that the lock is only completly released, if the lock - counter is 0. - - :arg bool force: - If true, the lock counter is ignored and the lock is released in - every case. - """ - with self._thread_lock: - - if self.is_locked: - self._lock_counter -= 1 - - if self._lock_counter == 0 or force: - self._release() - self._lock_counter = 0 - return None - - def __enter__(self): - self.acquire() - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.release() - return None - - def __del__(self): - self.release(force = True) - return None - - -# Windows locking mechanism -if msvcrt: - class FileLock(BaseFileLock): - - def _acquire(self): - open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC - - try: - fd = os.open(self._lock_file, open_mode) - except OSError: - pass - else: - try: - msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) - except (IOError, OSError): - os.close(fd) - else: - self._lock_file_fd = fd - return None - - def _release(self): - msvcrt.locking(self._lock_file_fd, msvcrt.LK_UNLCK, 1) - os.close(self._lock_file_fd) - self._lock_file_fd = None - - try: - os.remove(self._lock_file) - # Probably another instance of the application - # that acquired the file lock. - except OSError: - pass - return None - -# Unix locking mechanism -elif fcntl: - class FileLock(BaseFileLock): - - def _acquire(self): - open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC - fd = os.open(self._lock_file, open_mode) - - try: - fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except (IOError, OSError): - os.close(fd) - else: - self._lock_file_fd = fd - return None - - def _release(self): - fcntl.flock(self._lock_file_fd, fcntl.LOCK_UN) - os.close(self._lock_file_fd) - self._lock_file_fd = None - - try: - os.remove(self._lock_file) - # Probably another instance of the application - # that acquired the file lock. - except OSError: - pass - return None - -# The "hard" lock is not available. But we can watch the existence of a file. -else: - class FileLock(BaseFileLock): - - def _acquire(self): - open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC - try: - fd = os.open(self._lock_file, open_mode) - except (IOError, OSError): - pass - else: - self._lock_file_fd = fd - return None - - def _release(self): - os.close(self._lock_file_fd) - self._lock_file_fd = None - - try: - os.remove(self._lock_file) - # The file is already deleted and that's what we want. - except OSError: - pass - return None - - if warnings is not None: - warnings.warn("only soft file lock is available") diff --git a/tcutils/parsers/__init__.py b/tcutils/parsers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/parsers/flow_rate_parse.py b/tcutils/parsers/flow_rate_parse.py deleted file mode 100644 index 02cca0db9..000000000 --- a/tcutils/parsers/flow_rate_parse.py +++ /dev/null @@ -1,31 +0,0 @@ -"parser to parse the 'flow -r' output.""" - -import re -class FlowRateParser: - "Parser to parse flow -r output" - def __init__(self, filename): - file=open(filename,'r') - self.lines = file.readlines() - self.flow_rate =[] - self.parse() - - def parse(self): - pattern = "Flow setup rate =\s+(-?\d+)\s+flows/sec" - for line in self.lines: - match = re.search(pattern, line) - self.flow_rate.append(int(match.group(1))) - - def flowrate(self): - flow_rate_filtered = [] - for item in self.flow_rate: - #Removing negative and flows ceated by copy/ping metadata actions. - if item > 100: - flow_rate_filtered.append(item) - flow_rate_filtered.sort() - length = len(flow_rate_filtered) - if length % 2 == 0: - mid = length/2 - flow_rate = (flow_rate_filtered[mid] + flow_rate_filtered[mid-1])/2 - else: - flow_rate = flow_rate_filtered[((length +1)/2)-1] - return flow_rate diff --git a/tcutils/parsers/netperfparse.py b/tcutils/parsers/netperfparse.py deleted file mode 100644 index 14e8ed0de..000000000 --- a/tcutils/parsers/netperfparse.py +++ /dev/null @@ -1,98 +0,0 @@ -"""Parser to parse the netperf output.""" - -import re -from itertools import dropwhile - -class NetPerfParser(object): - """Parser to parse the netperf output.""" - def __init__(self, output): - output = output.split('\r\n') - out_put = [x for x in output if x != ''] - self.output = out_put - self.parsed_output = {} - self.parse() - - def parse(self): - get_out = lambda yy: list(dropwhile(lambda x: x.count(yy) != True, self.output)) - if get_out('TCP STREAM TEST'): - new_out = get_out('TCP STREAM TEST') - pattern = "\s*(\d+)\s+(\d+)\s+(\d+)\s+([0-9\.]+)\s+([0-9\.]+)" - match = re.search(pattern, new_out[-1]) - (self.parsed_output['recv_sock_size'], - self.parsed_output['send_sock_size'], - self.parsed_output['send_msg_size'], - self.parsed_output['elapsed_time'], - self.parsed_output['throughput']) = match.groups() - - pattern = "\s*bytes\s+bytes\s+bytes\s+secs\.\s+([0-9\^]+bits/sec)" - match = re.search(pattern, new_out[-2]) - self.parsed_output['throughput_bits_per_sec'] = match.group(1) - return self.parsed_output - elif get_out('UDP STREAM TEST'): - new_out = get_out('UDP STREAM TEST') - pattern = "(\d+)\s+(\d*)\s+([0-9\.]+)\s+(\d+)\s+(\d?)\s+([0-9\.]+)" - match = re.search(pattern, new_out[-3]) - (self.parsed_output['sock_size'], - self.parsed_output['message_size'], - self.parsed_output['elapsed_time'], - self.parsed_output['msg_okay'], - self.parsed_output['msg_errors'], - self.parsed_output['throughput']) = match.groups() - - pattern = "\s*bytes\s+bytes\s+secs\s+#\s+#\s+([0-9\^]+bits/sec)" - match = re.search(pattern, new_out[-4]) - self.parsed_output['throughput_bits_per_sec'] = match.group(1) - return self.parsed_output - elif get_out('TCP REQUEST/RESPONSE TEST'): - new_out = get_out('TCP REQUEST/RESPONSE TEST') - pattern = "\s*(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+([0-9\.]+)\s+([0-9\.]+)" - match = re.search(pattern, new_out[-2]) - (self.parsed_output['send_sock_bytes'], - self.parsed_output['recv_sock_bytes'], - self.parsed_output['req_size_bytes'], - self.parsed_output['resp_size_bytes'], - self.parsed_output['elapsed_time'], - self.parsed_output['tran_rate']) = match.groups() - - pattern = "\s*bytes\s+Bytes\s+bytes\s+bytes\s+secs.\s+(per sec)" - match = re.search(pattern, new_out[-3]) - self.parsed_output['transactions_per_sec'] = match.group(1) - return self.parsed_output - elif get_out('UDP REQUEST/RESPONSE TEST'): - new_out = get_out('UDP REQUEST/RESPONSE TEST') - pattern = "\s*(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+([0-9\.]+)\s+([0-9\.]+)" - match = re.search(pattern, new_out[-2]) - (self.parsed_output['send_sock_bytes'], - self.parsed_output['recv_sock_bytes'], - self.parsed_output['req_size_bytes'], - self.parsed_output['resp_size_bytes'], - self.parsed_output['elapsed_time'], - self.parsed_output['tran_rate']) = match.groups() - - pattern = "\s*bytes\s+Bytes\s+bytes\s+bytes\s+secs.\s+(per sec)" - match = re.search(pattern, new_out[-3]) - self.parsed_output['transactions_per_sec'] = match.group(1) - return self.parsed_output - - def get_throughput(self): - throughput=self.parsed_output['throughput']+" "+ self.parsed_output['throughput_bits_per_sec'] - return (throughput) - - def get_trans_rate(self): - trans_rate= self.parsed_output['tran_rate'] + " " + self.parsed_output['transactions_per_sec'] - return trans_rate - - def get_elapsed_time(self): - return int(self.parsed_output['elapsed_time']) - - def get_throughput_in_bits_per_sec(self): - return self.parsed_output['throughput_bits_per_sec'] - - def get_recv_socket_size(self): - return int(self.parsed_output['recv_sock_size']) - - def get_send_socket_size(self): - return int(self.parsed_output['send_sock_size']) - - def get_send_message_size(self): - return int(self.parsed_output['send_msg_size']) diff --git a/tcutils/parsers/pingparse.py b/tcutils/parsers/pingparse.py deleted file mode 100644 index 51a28244c..000000000 --- a/tcutils/parsers/pingparse.py +++ /dev/null @@ -1,27 +0,0 @@ -"parser to parse the ping output.""" - -import re - - -class PingParser(object): - - """Parser to parse the ping output.""" - - def __init__(self, output): - self.output = output - self.parsed_output = {} - self.parse() - - def parse(self): - match = re.search( - "rtt\s+(min/avg/max/mdev)\s+=\s+(\d+.\d+/\d+.\d+/\d+.\d+/\d+.\d+)\s+(\w+)", self.output) - output_req = [] - output_req.append(match.group(1)) - output_req.append(match.group(2)) - self.parsed_output = dict( - zip(output_req[0].split('/'), output_req[1].split('/'))) - self.parsed_output['unit'] = match.group(3) - - def get_ping_latency(self): - ping_output=self.parsed_output['avg']+" "+self.parsed_output['unit'] - return ping_output diff --git a/tcutils/pkgs/Traffic/README b/tcutils/pkgs/Traffic/README deleted file mode 100644 index 30d598ca1..000000000 --- a/tcutils/pkgs/Traffic/README +++ /dev/null @@ -1,251 +0,0 @@ - -Traffic package developed for send/recv packets from a linux host, both from -physical and virtual host. This is just an wrraper over Scapy a powerful -interactive packet manipulation package. - -Scapy Documentation: http://www.secdev.org/projects/scapy/doc/ - -The directory traffic/examples contains sample scripts intended to give an idea -how traffic pacakage may be used for crafting, sending, receving, capturing -and parsing packets. - - -NOTE: This package is dependent on scapy, so make sure to install scapy before - using this package. Use "ubunut-traffic" as image name when creating VM's - using VMFixture(image_name = 'ubuntu-traffic') - -How To: -========= -1. Build the Traffic package using "python setup.py sdist" -2. Install the Traffic package in the desired hosts, using "python setup.py install" - >>> vm_fixture.install_pkg("Traffic") #Performs the above two steps for the users. - -3. Create traffic stream using "traffic.core.stream" - Refer "Creating traffic streams" section in this README. - -4. Create traffic profile with stream using "traffic.core.profile" - Refer "Creating traffic profiles" section in this README. - -5. Create send helper using traffic.core.helpers.Sender()" - Refer "Creating Sender" section in this README. - -6. Create receive helper using traffic.core.helpers.Receiver()" - Refer "Creating Receiver" section in this README. - -7. Start reciever -8. Start sender -9. Do your other testing here -10. Poll reciever (optional: To make sure traffic flows) -11. Poll sender (optional: To make sure traffic flows) -12. Stop sender -13.Stop receiver - -For StandardProfile and BurstProfile stop() call is not necessary. We should poll() to make sure -the expected Number of packets are sent/received. -or -Sleep till the required Number of packets is sent/received and then call stop() - -Supported streams: -================== -1. ICMP -2. UDP -3. TCP -To get the supported header fields that can be crafted use stream.help() as below. -This follows the scapy convention of naming the various protocol header fields. - ->>> from tcutils.pkgs.Traffic.traffic.core import stream ->>> stream.help() -ICMPHeader : ('type', 'code', 'chksum', 'id', 'seq', 'ts_ori', 'ts_rxts_tx', 'gw', 'ptr', 'reserved', 'addr_mask') -IPHeader : ('version', 'ihl', 'tos', 'iplen', 'id', 'ipflags', 'frag', 'ttl', 'proto', 'ipchksum', 'src', 'dst', 'options') -TCPHeader : ('sport', 'dport', 'seq', 'ack', 'dataofs', 'reserved', 'flags', 'window', 'chksum', 'urgptr') -TCPHeader : ('EOL', 'NOP', 'MSS', 'WScale', 'SAckOK', 'SAck', 'Timestamp', 'AltChkSum', 'AltChkSumOpt') -UDPHeader : ('sport', 'dport', 'len', 'chksum') ->>> - - -Supported Profile types: -======================== -1. StandardProfile - "N" number of packets are sent -2. ContinuousProfile - Packets are sent continuously till the user stop it. -3. BurstProfile - "X" burst count of Packets are bursted/sent at burst interval continuously/"Y" count. -4. ContinuousSportRange (Supports only UDP currently; will be extended to support TCP) - This takes 'startport' and 'endport' args, creates packet with L4 sport from the range - startport till endport and sents them continuously(round robin) till the user stop it. - - -Creating traffic stream: -======================= ->>> sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) ->>> from traffic.core.stream import Stream ->>> stream = Stream(protocol="ip", proto="udp", dport="8000", src="10.1.1.100", dst="10.2.2.200") - - -Creating traffic Profile: -========================= ->>> sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) ->>> from traffic.core.profile import create, ContinuousProfile, StandardProfile, BurstProfile ->>> from traffic.core.stream import Stream ->>> stream = Stream(protocol="ip", proto="udp", dport="8000", src="10.1.1.100", dst="10.2.2.200") - -#send packets continuously with payload of size 100 bytes, and to verify the chksum of the packet. -#look for packets with udp port 8000 -#listen at 10.2.2.200:8000(Gets the ip from the stream above) ->>> profile = ContinuousProfile(stream=stream, size=100, capfilter="udp port 8000", chksum=True) - -#send 100 packets with payload "spamspamspam" -#look for packets with udp port 8000 -#listen at 10.2.2.200:8000(Gets the ip from the stream above) ->>> profile = StandardProfile(stream=stream, count=100, payload="spamspamspam", capfilter="udp port 8000") - -#send 100 packets at the interval of 60 secs for 10 times. -#look for packets with udp port 8000 -#listen at 20.2.2.100:8000(Gets the ip from the profile.listener); can be used when sending traffic to (Floating IP) ->>> profile = BusrtProfile(stream=stream, count=10, burst_count=100, burst_interval=60 listener="20.2.2.100", capfilter="udp port 8000") - -#send packets range with 'startport' to 'endport' as L4 sport continuously(round robin) with payload of size 100 bytes -#look for packets with udp port 8000 -#listen at 10.2.2.200:8000(Gets the ip from the stream above) ->>> profile = ContinuousSportRange(stream=stream, startport=8001, endport=8200) - -Creating Sender: -================= -Create a sender, a helper to start/stop the traffic, also holds the total packets sent/received. ->>> from traffic.core.helpers import Host ->>> from traffic.core.helpers import Sender ->>> local_host = Host() #with credentials of the host from where the traffic send/recv is triggerred. ->>> remote_host = Host() #with credentials of the host from where the actual traffic is sent. ->>> sender = Sender(name="send-udp", profile=profile, lhost=local_host, rhost=remote_host) - -#To start sending packets ->>> sender.start() - -#To poll sender for sent/received packets. ->>> sender.poll() -['Sent=1000\n', 'Received=1000'] - -#To stop sending packets and to populate the packets "sent" and "recv" attribute of "Sender". ->>> sender.stop() -['Sent=2000\n', 'Received=2000'] ->>> print sender.sent, sender.recv -2000, 2000 - - -Creating Receiver: -================= -Create a Receiver, a helper to start/stop listening for packets, also holds the total packets received. ->>> from traffic.core.helpers import Host ->>> from traffic.core.helpers import Receiver ->>> local_host = Host() #with credentials of the host from where the traffic recv is triggerred. ->>> remote_host = Host() #with credentials of the host at which the actual traffic is recieved. ->>> sender = Receiver(name="recv-udp", profile=profile, lhost=local_host, rhost=remote_host) - -#To start receiving packets ->>> receiver.start() - -#To poll reciever for received packets. ->>> receiver.poll() -['Received=3000', 'Corrupted=0'] - -#To stop receiving packets and to populate the packets "recv" attribute of "Receiver". ->>> receiver.stop() -['Received=4000', 'Corrupted=0'] ->>> print receiver.recv -4000 - - -Example to send icmp traffic: -================================ - -#This is required as we pickle the Profile object and send it across network; -#The oject should be instansiated from traffic.core and not from -#tcutils.pkgs.Traffic.traffic.core for the uncpickle to work properly. Because -#in our testing scenario, we install the Traffic package in the VM and import -#the traffic package from traffic/ and not from tcutils/pkgs/Traffic/traffic. -sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) - -from traffic.core.stream import Stream -from traffic.core.profile import create, ContinuousProfile - - -#Install the Trafic package as below in the host that used for sending -#receiving traffic. -vm1_fixture.install_pkg("Traffic") -vm2_fixture.install_pkg("Traffic") - -#Create traffic stream -stream = Stream(protocol="ip", proto="icmp", src=vm1_fixture.vm_ip, - dst=vm2_fixture.vm_ip) - -#Create traffic profile -profile = ContinuousProfile(stream=stream, capfilter="icmp") - -#Start receiving/capturing traffic, If ICMP traffic it just captures -#packet, but in case of UDP/TCP, this creates a listener socket to -#receive packets and also capture packets. -receiver = Receiver("icmp", profile, local_host, recv_host, self.inputs.logger) -receiver.start() - -#Start sending packets., If ICMP it also receive the ICMP reply back -sender = Sender("icmp", profile, local_host, send_host, self.inputs.logger) -sender.start() - -#Do other testing stuff -sleep(5) - -#Stop sending -sender.stop() - -#Stop receiving -receiver.stop() - -print sender.sent, receiver.recv -assert "sender.sent == receiver.recv", "Icmp traffic failed" - -Example to send UDP traffic to FIP: -=================================== - #Send UDP traffic - fvm_fixture.install_pkg("Traffic") - vm1_fixture.install_pkg("Traffic") - stream = Stream(protocol="ip", proto="udp", src=fvm_fixture.vm_ip, - dst=fip, dport=9000) - profile = ContinuousProfile(stream=stream, listener=vm1_fixture.vm_ip, capfilter="udp port 8000") - local_host = Host(self.inputs.cfgm_ip, , ) - send_host = Host(fvm_fixture.local_ip) - recv_host = Host(vm1_fixture.local_ip) - sender = Sender("sendudp", profile, local_host, send_host, self.inputs.logger) - receiver = Receiver("recvudp", profile, local_host, recv_host, self.inputs.logger) - receiver.start() - sender.start() - #Poll to make sure traffic flows, optional - sender.poll() - receiver.poll() - sender.stop() - receiver.stop() - print sender.sent, receiver.recv - assert "sender.sent == receiver.recv", "UDP traffic to fip:%s failed" % fip - -Example to send TCP traffic: -============================ - #Send TCP traffic - fvm_fixture.install_pkg("Traffic") - vm1_fixture.install_pkg("Traffic") - stream = Stream(protocol="ip", proto="tcp", src=fvm_fixture.vm_ip, - dst=vm1_fixture.vm_ip, dport=9000) - profile = ContinuousProfile(stream=stream) - local_host = Host(self.inputs.cfgm_ip, , ) - send_host = Host(fvm_fixture.local_ip) - recv_host = Host(vm1_fixture.local_ip) - sender = Sender("sendtcp", profile, local_host, send_host, self.inputs.logger) - receiver = Receiver("recvtcp", profile, local_host, recv_host, self.inputs.logger) - receiver.start() - sender.start() - #Poll to make sure traffic flows, optional - sender.poll() - receiver.poll() - sender.stop() - receiver.stop() - print sender.sent, receiver.recv - assert "sender.sent == receiver.recv", "TCP traffic to fip:%s failed" % fip diff --git a/tcutils/pkgs/Traffic/__init__.py b/tcutils/pkgs/Traffic/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/setup.py b/tcutils/pkgs/Traffic/setup.py deleted file mode 100644 index 33ea9a4a5..000000000 --- a/tcutils/pkgs/Traffic/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python - -from distutils.core import setup - -setup(name="traffic", - version="1.0", - author="Ignatious Johnson", - author_email="ijohnson@juniper.net", - description=("Traffic generator package."), - packages=['traffic', - 'traffic.core', - 'traffic.utils', ], - scripts=['traffic/scripts/sendpkts', 'traffic/scripts/recvpkts'] - ) diff --git a/tcutils/pkgs/Traffic/traffic/__init__.py b/tcutils/pkgs/Traffic/traffic/__init__.py deleted file mode 100644 index 7d1034b7e..000000000 --- a/tcutils/pkgs/Traffic/traffic/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Traffic generator package using scapy - - Scapy Documentation: http://www.secdev.org/projects/scapy/doc/ - -""" diff --git a/tcutils/pkgs/Traffic/traffic/core/__init__.py b/tcutils/pkgs/Traffic/traffic/core/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/traffic/core/generator.py b/tcutils/pkgs/Traffic/traffic/core/generator.py deleted file mode 100644 index bf63f1546..000000000 --- a/tcutils/pkgs/Traffic/traffic/core/generator.py +++ /dev/null @@ -1,374 +0,0 @@ -"""Module to send packets. -""" -import os -import socket -import signal -import traceback -from time import sleep -from optparse import OptionParser -from multiprocessing import Process, Event - -from scapy.all import send, sr1, sendpfast -from scapy.packet import Raw -from scapy.layers.inet import Ether, IP, UDP, TCP, ICMP -from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest - -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.core.profile import * - from tcutils.pkgs.Traffic.traffic.core.tcpclient import * - from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger - from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL - from tcutils.pkgs.Traffic.traffic.utils.util import is_v6 -except ImportError: - # Distributed and installed as package - from traffic.core.profile import * - from traffic.core.tcpclient import * - from traffic.utils.logger import LOGGER, get_logger - from traffic.utils.globalvars import LOG_LEVEL - from traffic.utils.util import is_v6 - -LOGGER = "%s.core.generator" % LOGGER -log = get_logger(name=LOGGER, level=LOG_LEVEL) -SRC_PORT = 8000 - - -class CreatePkt(object): - - def __init__(self, profile): - self.profile = profile - self.stream = profile.stream - self._str_port_to_int() - log.debug("Stream: %s", self.stream.__dict__) - log.debug("Stream L3: %s", self.stream.l3.__dict__) - if self.stream.l4 is not None: - log.debug("Stream L4: %s", self.stream.l4.__dict__) - self.pkt = None - self._create() - if isinstance(self.profile, ContinuousSportRange): - self.pkts = self._create_pkts() - - def _str_port_to_int(self): - try: - self.stream.l4.sport = int(self.stream.l4.sport) - except AttributeError: - if self.stream.get_l4_proto() in ['udp', 'tcp']: - self.stream.l4.sport = SRC_PORT - try: - self.stream.l4.dport = int(self.stream.l4.dport) - except AttributeError: - pass - - def _create(self): - l2_hdr = None - # To incease rate, we need to send pkt at L2 usinf sendpfast - if isinstance(self.profile, ContinuousSportRange): - l2_hdr = self._l2_hdr() - l3_hdr = self._l3_hdr() - l4_hdr = self._l4_hdr() - if self.stream.get_l4_proto() == 'icmpv6': - self.profile.size = 0 - self.payload = self._payload() - if l2_hdr: - log.debug("L2 Header: %s", `l2_hdr`) - self.pkt = l2_hdr - if l3_hdr: - log.debug("L3 Header: %s", `l3_hdr`) - if not self.pkt: - self.pkt = l3_hdr - else: - self.pkt = self.pkt / l3_hdr - if l4_hdr: - log.debug("L4 Header: %s", `l4_hdr`) - self.pkt = self.pkt / l4_hdr - if self.payload: - log.debug("Payload: %s", self.payload) - self.pkt = self.pkt / self.payload - - def _create_pkts(self): - pkts = [self.pkt] - for sport in range(self.profile.startport, self.profile.endport + 1): - self.pkt = None - self.stream.l4.__dict__.update({'sport': sport}) - self._create() - pkts.append(self.pkt) - - return pkts - - def _l4_hdr(self): - if self.stream.l4 is not None: - l4_header = self.stream.l4.__dict__ - proto = self.stream.get_l4_proto() - if proto == 'tcp': - return TCP(**l4_header) - elif proto == 'udp': - return UDP(**l4_header) - elif proto == 'icmp': - return ICMP(**l4_header) - elif proto == 'icmpv6': - return ICMPv6EchoRequest() - else: - log.error("Unsupported L4 protocol %s."%proto) - - def _l3_hdr(self): - l3_header = self.stream.l3.__dict__ - - if not l3_header: - return None - if self.stream.protocol == 'ip': - return IP(**l3_header) - elif self.stream.protocol == 'ipv6': - return IPv6(**l3_header) - else: - log.error("Unsupported L3 protocol.") - - def _l2_hdr(self): - return Ether() - - def _payload(self): - if self.profile.payload: - return self.profile.payload - if self.profile.size: - return Raw(RandString(size=self.profile.size)) - else: - return None - - -class GeneratorBase(object): - - def __init__(self, name, profile): - self.profile = profile - self.creater = CreatePkt(self.profile) - self.pkt = self.creater.pkt - self.count = 0 - self.recv_count = 0 - self.resultsfile = "/tmp/%s.results" % name - self.update_result("Sent=%s\nReceived=%s" % - (self.count, self.recv_count)) - - def update_result(self, result): - fd = open(self.resultsfile, 'w') - fd.write(result) - fd.flush() - fd.close() - os.system('sync') - - -class Generator(Process, GeneratorBase): - - def __init__(self, name, profile): - Process.__init__(self) - GeneratorBase.__init__(self, name, profile) - self.stopit = Event() - self.stopped = Event() - - def send_recv(self, pkt, timeout=2): - # Should wait for the ICMP reply when sending ICMP request. - # So using scapy's "sr1". - log.debug("Sending: %s", `pkt`) - proto = self.profile.stream.get_l4_proto() - if proto == "icmp" or proto == "icmpv6": - p = sr1(pkt, timeout=timeout) - if p: - log.debug("Received: %s", `pkt`) - self.recv_count += 1 - else: - send(pkt) - self.count += 1 - self.update_result("Sent=%s\nReceived=%s" % - (self.count, self.recv_count)) - - def _standard_traffic(self): - for i in range(self.profile.count): - self.send_recv(self.pkt) - self.stopped.set() - - def _continuous_traffic(self): - while not self.stopit.is_set(): - self.send_recv(self.pkt) - self.stopped.set() - - def _burst_traffic(self): - for i in range(self.profile.count): - for j in range(self.profile.burst_count): - self.send_recv(self.pkt) - sleep(self.profile.burst_interval) - self.stopped.set() - - def _continuous_sport_range_traffic(self): - self.pkts = self.creater.pkts - while not self.stopit.is_set(): - sendpfast(self.pkts, pps=self.profile.pps) - self.count += len(self.pkts) - self.update_result("Sent=%s\nReceived=%s" % - (self.count, self.recv_count)) - if self.stopit.is_set(): - break - self.stopped.set() - - def _start(self): - # Preserve the order of the if-elif, because the Profiles are - # inherited from StandardProfile, So all the profiles will be - # instance of StandardProfile - if isinstance(self.profile, ContinuousSportRange): - self._continuous_sport_range_traffic() - elif isinstance(self.profile, ContinuousProfile): - self._continuous_traffic() - elif isinstance(self.profile, BurstProfile): - self._burst_traffic() - elif isinstance(self.profile, StandardProfile): - self._standard_traffic() - - def run(self): - try: - self._start() - except Exception, err: - log.warn(traceback.format_exc()) - finally: - log.info("Total packets sent: %s", self.count) - log.info("Total packets received: %s", self.recv_count) - self.update_result("Sent=%s\nReceived=%s" % - (self.count, self.recv_count)) - - def stop(self): - if not self.is_alive(): - return - - if (isinstance(self.profile, ContinuousProfile) or - isinstance(self.profile, ContinuousSportRange)): - self.stopit.set() - - while (self.is_alive() and not self.stopped.is_set()): - continue - if self.is_alive(): - self.terminate() - - -class TCPGenerator(GeneratorBase): - - def __init__(self, name, profile): - super(TCPGenerator, self).__init__(name, profile) - self.stopit = False - self.stopped = False - - def start(self): - sport = self.profile.stream.l4.sport - self.client = TCPClient(self, sport, debug=5) - table = 'ip6tables' if is_v6(self.profile.stream.l3.src) else 'iptables' - # Kernal will send RST packet during TCP hand shake before the scapy - # sends ACK, So drop RST packets sent by Kernal - os.system( - '%s -A OUTPUT -p tcp --tcp-flags RST RST -s %s -j DROP' % - (table, self.profile.stream.l3.src)) - # DO TCP Three way Hand Shake - self.client.runbg() - if isinstance(self.profile, ContinuousSportRange): - self.clients = self.start_clients() - self._start() - - def start_clients(self): - clients = [] - for sport in range(self.profile.startport, self.profile.endport + 1): - client = TCPClient(self, sport, debug=5) - self.client.runbg() - clients.append(client) - return clients - - def _start(self): - # Preserve the order of the if-elif, because the Profiles are - # inherited from StandardProfile, So all the profiles will be - # instance of StandardProfile - if isinstance(self.profile, ContinuousSportRange): - data = self.creater.payload - while not self.stopit: - for tcpclient in self.clients: - tcpclient.io.tcp.send(data) - self.stopped = True - return - elif isinstance(self.profile, ContinuousProfile): - data = self.creater.payload - while not self.stopit: - self.client.io.tcp.send(data) - self.stopped = True - return - elif isinstance(self.profile, BurstProfile): - for i in range(self.profile.count): - for j in range(self.profile.burst_count): - data = self.creater.payload - self.client.io.tcp.send(data) - sleep(self.profile.burst_interval) - elif isinstance(self.profile, StandardProfile): - for i in range(self.profile.count): - data = self.creater.payload - self.client.io.tcp.send(data) - burst = self.profile.burst_count or 1 - - while True: - try: - with open(self.resultsfile, 'r') as rfile: - sent = re.search("(Sent)=([0-9]+)", rfile.read()) - if sent: - sent = int(sent.group(2)) - if sent == (self.profile.count * burst): - break - except IOError: - continue - self.stopped = True - - def stop(self): - if (isinstance(self.profile, ContinuousProfile) or - isinstance(self.profile, ContinuousSportRange)): - self.stopit = True - - if not self.stopped: - return - # Trriger the TCPClient to RESET the connection. - self.client.io.tcp.send("STOP_STREAM") - self.client.stop() - - -class PktGenerator(object): - - def __init__(self, params): - self.params = params - self.profile = load(params.profile) - log.debug("Profile: %s", self.profile.__dict__) - - def handler(self, signum, frame): - self.generator.stop() - - def start(self): - # Set the signal handler - signal.signal(signal.SIGTERM, self.handler) - if self.profile.stream.get_l4_proto() == 'tcp': - self.generator = TCPGenerator(self.params.name, self.profile) - self.generator.start() - else: - self.generator = Generator(self.params.name, self.profile) - self.generator.daemon = True - self.generator.start() - self.generator.join() - - -class GenArgParser(object): - - def parse(self): - parser = OptionParser() - parser.add_option("-n", "--name", - dest="name", - help="Name for this traffic profile.") - parser.add_option("-p", "--profile", - dest="profile", - help="Stream profile to be used for sending traffic.") - parser.add_option("-S", "--stop", - dest="stop", - action="store_true", - default=False, - help="Stop this traffic Generator.") - parser.add_option("-P", "--poll", - dest="poll", - action="store_true", - default=False, - help="Poll for packets sent/recv at traffic Generator.") - opts, args = parser.parse_args() - return opts diff --git a/tcutils/pkgs/Traffic/traffic/core/helpers.py b/tcutils/pkgs/Traffic/traffic/core/helpers.py deleted file mode 100644 index 0fc3ea7fe..000000000 --- a/tcutils/pkgs/Traffic/traffic/core/helpers.py +++ /dev/null @@ -1,189 +0,0 @@ -"""Helper module to start/stop traffic. -""" -import re -from time import sleep - -from fabric.api import run -from fabric.operations import put -from fabric.context_managers import settings, hide -from tcutils.util import run_fab_cmd_on_node - -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.core.profile import * - from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger - from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL -except ImportError: - # Distributed and installed as package - from traffic.core.profile import * - from traffic.utils.logger import LOGGER, get_logger - from traffic.utils.globalvars import LOG_LEVEL - - -LOGGER = "%s.core.helper" % LOGGER -LOG = get_logger(name=LOGGER, level=LOG_LEVEL) - - -class SSHError(Exception): - pass - - -class Host(object): - - """Stores the credentials of a host. - """ - - def __init__(self, ip, user="root", password="C0ntrail123", key=None): - self.ip = ip - self.user = user - self.password = password - self.key = key - - -class Helper(object): - - def __init__(self, lhost, rhost, log=LOG): - self.lhost = lhost - self.rhost = rhost - self.log = log - - def get_sshkey(self): - with settings(host_string='%s@%s' % (self.lhost.user, self.lhost.ip), - password=self.lhost.password, warn_only=True, - abort_on_prompts=False): - out = put('~/.ssh/id_rsa', '/tmp/id_rsa') - out = run('chmod 600 /tmp/id_rsa') - return '/tmp/id_rsa' - - def runcmd(self, cmd): - """Run remote command.""" - output = None -# keyfile = self.get_sshkey() -# ssh_cmd = 'ssh -o StrictHostKeyChecking=no -i %s %s@%s \"%s\"' % ( -# keyfile, self.rhost.user, self.rhost.ip, cmd) - self.log.debug('On host %s exec: %s'%(self.rhost.ip, cmd)) - with hide('everything'): - with settings( - host_string='%s@%s' % (self.lhost.user, self.lhost.ip), - password=self.lhost.password, warn_only=True, abort_on_prompts=False): - self.log.debug("Executing: %s", cmd) - retry = 6 - while True: - output = '' -# output=run(ssh_cmd) - output = run_fab_cmd_on_node( - host_string='%s@%s' % (self.rhost.user, self.rhost.ip), - password='ubuntu', as_sudo=True, cmd=cmd) - if (not output) and retry: - self.log.error( - "Scapy issue while sending/receiving packets. Will retry after 5 secs.") - sleep(5) - retry -= 1 - continue - if ("Connection timed out" in output or - "Connection refused" in output) and retry: - self.log.debug( - "SSH timeout, sshd might not be up yet. will retry after 5 secs.") - sleep(5) - retry -= 1 - continue - elif "Connection timed out" in output: - raise SSHError(output) - else: - break - self.log.debug(output) - return output - - -class Sender(Helper): - - def __init__(self, name, profile, lhost, rhost, log=LOG): - super(Sender, self).__init__(lhost, rhost, log) - self.name = name - self.pktheader = profile.stream.all_fields - # Pickle the profile object, so that it can be sent across network. - self.profile = create(profile) - # Initialize the packet sent/recv count - self.sent = None - self.recv = None - - def start(self): - # Start send; launches the "sendpkts" script in the VM - self.log.debug("Sender: VM '%s' in Compute '%s'", - self.rhost.ip, self.lhost.ip) - self.log.info("Sending traffic with '%s'", self.pktheader) - out = self.runcmd("sendpkts --name %s -p %s" % - (self.name, self.profile)) - if 'Daemon already running' in out: - errmsg = "Traffic stream with name '%s' already present in VM '%s' \ - at compute '%s'" % (self.name, self.rhost.ip, self.lhost.ip) - assert False, errmsg - - def poll(self): - """Polls for the number of packets sent/received. - This api can be used when trraffic is live. - """ - # Polls for the packets sent; launches the "sendpkts" script in - # the VM with --poll option - result = self.runcmd("sendpkts --name %s --poll" % self.name) - sent = re.search("(Sent)=([0-9]+)", result) - if sent: - self.sent = int(sent.group(2)) - recv = re.search("(Received)=([0-9]+)", result) - if recv: - self.recv = int(recv.group(2)) - - def stop(self): - # Stop send; launches the "sendpkts" script in the VM with --stop - # option - result = self.runcmd("sendpkts --name %s --stop" % self.name) - sent = re.search("(Sent)=([0-9]+)", result) - if sent: - self.sent = int(sent.group(2)) - recv = re.search("(Received)=([0-9]+)", result) - if recv: - self.recv = int(recv.group(2)) - self.log.info("Finished sending traffic with '%s'", self.pktheader) - - -class Receiver(Helper): - - def __init__(self, name, profile, lhost, rhost, log=LOG): - super(Receiver, self).__init__(lhost, rhost, log) - self.name = name - # Pickle the profile object, so that it can be sent across network. - self.profile = create(profile) - # Initialize the packet recv count - self.recv = None - self.corrupt = None - - def start(self): - # Start send; launches the "recvpkts" script in the VM - self.log.debug("Receiver: VM '%s' in Compute '%s'", - self.rhost.ip, self.lhost.ip) - self.runcmd("recvpkts --name %s -p %s" % (self.name, self.profile)) - - def poll(self): - """Polls for the number of packets received. - This api can be used when traffic is live. - """ - # Polls for packets recieve; launches the "recvpkts" script in - # the VM with --poll option - result = self.runcmd("recvpkts --name %s --poll" % self.name) - recv = re.search("(Received)=([0-9]+)", result) - if recv: - self.recv = int(recv.group(2)) - corrupt = re.search("(Corrupted)=([0-9]+)", result) - if corrupt: - self.corrupt = int(corrupt.group(2)) - - def stop(self): - # Stop send; launches the "recvpkts" script in the VM with --stop - # option - result = self.runcmd("recvpkts --name %s --stop" % self.name) - recv = re.search("(Received)=([0-9]+)", result) - if recv: - self.recv = int(recv.group(2)) - corrupt = re.search("(Corrupted)=([0-9]+)", result) - if corrupt: - self.corrupt = int(corrupt.group(2)) diff --git a/tcutils/pkgs/Traffic/traffic/core/listener.py b/tcutils/pkgs/Traffic/traffic/core/listener.py deleted file mode 100644 index 73e70a692..000000000 --- a/tcutils/pkgs/Traffic/traffic/core/listener.py +++ /dev/null @@ -1,443 +0,0 @@ -"""Module for creating socket to receive packets. -""" -import errno -import socket -import signal -import traceback -from select import select -from multiprocessing import Process, Queue -from optparse import OptionParser - -from scapy.data import * -from scapy.config import conf -from scapy.utils import PcapReader -from scapy.all import plist -from scapy.layers.inet import IP, TCP, UDP, ICMP -from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest - -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.core.profile import * - from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger - from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL - from tcutils.pkgs.Traffic.traffic.utils.util import * -except ImportError: - # Distributed and installed as package - from traffic.core.profile import * - from traffic.utils.logger import LOGGER, get_logger - from traffic.utils.globalvars import LOG_LEVEL - from traffic.utils.util import * - - -LOGGER = "%s.core.listener" % LOGGER -log = get_logger(name=LOGGER, level=LOG_LEVEL) - -MTU = 65565 - - -class CaptureBase(Process): - - def __init__(self, name, **kwargs): - super(CaptureBase, self).__init__() - self.kwargs = kwargs - log.debug("Filter is: %s", self.kwargs['filter']) - self.capture = True - self.pcap = [] - self.filtered_pcap = [] - self.corrupted_pcap = [] - self.resultsfile = "/tmp/%s.results" % name - - @conf.commands.register - def sniff(self, count=0, store=1, timeout=None, stopperTimeout=None, stopper=None, chksum=False, *arg, **karg): - """Sniff packets - sniff([count=0,] [store=1,] [stopper] + args) -> list of packets - - count: number of packets to capture. 0 means infinity - store: wether to store sniffed packets or discard them - timeout: stop sniffing after a given time (default: None) - stopperTimeout: break the select to check the returned value of - stopper: function returning true or false to stop the sniffing process - """ - self.chksum = chksum - c = 0 # Total packets - - L2socket = conf.L2listen - self.sock = L2socket(type=ETH_P_ALL, *arg, **karg) - - if timeout is not None: - stoptime = time.time() + timeout - remain = None - - if stopperTimeout is not None: - stopperStoptime = time.time() + stopperTimeout - remainStopper = None - last_pkt = None - while self.capture: - if timeout is not None: - remain = stoptime - time.time() - if remain <= 0: - break - sel = select([self.sock], [], [], remain) - if self.sock in sel[0]: - p = self.sock.recv(MTU) - else: - p = self.sock.recv(MTU) - - if p is None: - continue - if p == last_pkt: - last_pkt = None - # Sniff sniffs packet twice; workarund for it - # When time permits, we should debug this - log.debug("Duplicate, Skip counting this packet") - continue - last_pkt = p - log.debug(`p`) - # Discard the first ssh keepalive packet - try: - dport = p[TCP].dport - sport = p[TCP].sport - if dport == 22 or sport == 22: - log.debug("Discard the ssh keepalive packet") - continue - except IndexError: - pass - if store: - self.pcap.append(p) - if self.count_tcp(p): - c += 1 - log.debug("Total packets received: %s", c) - self.update_result(c, len(self.corrupted_pcap)) - if count > 0 and c >= count: - break - if stopper and stopper(p): - break - continue - - if self.count_icmp(p): - c += 1 - log.debug("Total packets received: %s", c) - self.update_result(c, len(self.corrupted_pcap)) - if count > 0 and c >= count: - break - if stopper and stopper(p): - break - continue - - if self.count_udp(p): - c += 1 - log.debug("Total packets received: %s", c) - self.update_result(c, len(self.corrupted_pcap)) - if count > 0 and c >= count: - break - if stopper and stopper(p): - break - continue - - def checksum(self, p, proto): - return self.verify_l3_checksum(p) and self.verify_l4_checksum(p, proto) - - def verify_l3_checksum(self, p): - try: - l3_chksum = p[IP].chksum - except IndexError: - log.debug("skipping checksum verification for v6 packets") - return True - log.debug("Received L3 checksum: %s", l3_chksum) - del p[IP].chksum - p = p.__class__(str(p)) - log.debug("Calculated L3 checksum: %s", p[IP].chksum) - if p[IP].chksum == l3_chksum: - return True - return False - - def verify_l4_checksum(self, p, proto): - try: - l4_chksum = p[proto].chksum - del p[proto].chksum - except AttributeError: - l4_chksum = p[proto].cksum - del p[proto].cksum - log.debug("Received L4 checksum: %s", l4_chksum) - p = p.__class__(str(p)) - try: - calc_l4_chksum = p[proto].chksum - except AttributeError: - calc_l4_chksum = p[proto].cksum - log.debug("Calculated L4 checksum: %s", calc_l4_chksum) - if calc_l4_chksum == l4_chksum: - return True - return False - - def count_tcp(self, p): - try: - proto = p[IP].proto - af = IP - except IndexError: - try: - proto = p[IPv6].nh - af = IPv6 - except IndexError: - return 0 - if proto == 6: - log.debug("Protocol is TCP") - if self.chksum and not self.checksum(p, TCP): - self.corrupted_pcap.append(p) - if ((af is IPv6) or not p[IP].frag == "MF") and p[TCP].flags == 24: - # count only TCP PUSH ACK packet. - log.debug("Packet is unfagmented and tcp flag is PUSH") - self.filtered_pcap.append(p) - return 1 - return 0 - - def count_udp(self, p): - try: - proto = p[IP].proto - af = IP - except IndexError: - try: - proto = p[IPv6].nh - af = IPv6 - except IndexError: - return 0 - if proto == 17: - log.debug("Protocol is UDP") - if self.chksum and not self.checksum(p, UDP): - self.corrupted_pcap.append(p) - if af is IPv6 or not p[IP].frag == "MF": - # count only unfragmented packet. - log.debug("Packet is unfagmented") - self.filtered_pcap.append(p) - return 1 - return 0 - - def count_icmp(self, p): - try: - icmp_type = p[ICMP].type - proto = ICMP - except IndexError: - try: - icmp_type = p[IPv6][ICMPv6EchoRequest].type - proto = ICMPv6EchoRequest - except IndexError: - return 0 - if (proto is ICMP and icmp_type == 8) or \ - (proto is ICMPv6EchoRequest and icmp_type == 128): - # count only ICMP Echo Request - log.debug("ICMP echo request") - self.filtered_pcap.append(p) - if self.chksum and not self.checksum(p, proto): - self.corrupted_pcap.append(p) - return 1 - return 0 - - def run(self): - try: - self.sniff(**self.kwargs) - except socket.error as (code, msg): - if code != errno.EINTR: - raise - except Exception, err: - log.warn(traceback.format_exc()) - finally: - self.sock.close() - self.pcap = plist.PacketList(self.filtered_pcap, "Sniffed") - log.debug("Total packets received: %s", len(self.pcap)) - self.update_result(len(self.pcap), len(self.corrupted_pcap)) - - def update_result(self, recv, corrupt): - result = "Received=%s\nCorrupted=%s" % (recv, corrupt) - fd = open(self.resultsfile, 'w') - fd.write(result) - fd.flush() - fd.close() - - def stop(self): - self.capture = False - self.terminate() - self.sock.close() - - -class ListenerBase(Process): - - def __init__(self, sock): - super(ListenerBase, self).__init__() - self.sock = sock - self.listen = True - - def run(self): - try: - while self.listen: - pkt = self.sock.recv(MTU) - except socket.error as (code, msg): - if code != errno.EINTR: - raise - - def stop(self): - self.listen = False - self.terminate() - self.sock.close() - - -class UDPListener(ListenerBase): - - def __init__(self, ip, port): - af = socket.AF_INET - if is_v6(ip): - af = socket.AF_INET6 - sock = socket.socket(af, socket.SOCK_DGRAM) - sock.bind((ip, int(port))) - super(UDPListener, self).__init__(sock) - - -class TCPListener(ListenerBase): - - def __init__(self, ip, port): - af = socket.AF_INET - if is_v6(ip): - af = socket.AF_INET6 - sock = socket.socket(af, socket.SOCK_STREAM) - sock.bind((ip, int(port))) - sock.listen(1) - super(TCPListener, self).__init__(sock) - - def run(self): - while self.listen: - conn, address = self.sock.accept() - # self.sock.recv(MTU) - - -class PktListener(object): - - def __init__(self, params): - self.profile_name = params.name - self.profile = load(params.profile) - self.stream = self.profile.stream - log.debug("Profile: %s", self.profile.__dict__) - log.debug("Stream: %s", self.stream.__dict__) - log.debug("Stream L3: %s", self.stream.l3.__dict__) - if self.stream.l4 is not None: - log.debug("Stream L4: %s", self.stream.l4.__dict__) - self.create_listener() - self.create_sniffer() - self.pcap = 0 - - def _join(self, *args): - return " ".join(args) - - def _make_filter(self): - capfilter = '' - proto = self.stream.get_l4_proto() - if proto: - proto = 'icmp6' if proto == 'icmpv6' else proto - capfilter = self._join(capfilter, proto) - - if hasattr(self.stream.l4, 'dport'): - capfilter = self._join( - capfilter, "port", str(self.stream.l4.dport)) - - return capfilter - - def create_listener(self): - if self.profile.listener: - listen_at = self.profile.listener - else: - listen_at = self.stream.l3.dst - - self.listener = None - if self.stream.get_l4_proto() == 'tcp': - self.listener = TCPListener(listen_at, self.stream.l4.dport) - elif self.stream.get_l4_proto() == 'udp': - self.listener = UDPListener(listen_at, self.stream.l4.dport) - if self.listener: - self.listener.daemon = 1 - - def _standard_traffic(self): - count = self.profile.count - return count - - def _burst_traffic(self): - count = self.profile.burst_count * self.profile.count - return count - - def _continuous_traffic(self): - pass - - def create_sniffer(self): - kwargs = {} - if self.profile.iface: - kwargs.update({'iface': self.profile.iface}) - if not self.profile.capfilter: - capfilter = self._make_filter() - else: - capfilter = self.profile.capfilter - kwargs.update({'filter': capfilter}) - - if (isinstance(self.profile, ContinuousProfile) or - isinstance(self.profile, ContinuousSportRange)): - self._continuous_traffic() - elif isinstance(self.profile, BurstProfile): - kwargs.update({'count': self._burst_traffic()}) - elif isinstance(self.profile, StandardProfile): - kwargs.update({'count': self._standard_traffic()}) - - if self.profile.stopper: - kwargs.update({'stopper': self.profile.stopper}) - if self.profile.timeout: - kwargs.update({'timeout': self.profile.timeout}) - if self.profile.chksum: - kwargs.update({'chksum': self.profile.chksum}) - - self.sniffer = CaptureBase(self.profile_name, **kwargs) - self.sniffer.daemon = 1 - - def start(self): - # Set the signal handler - signal.signal(signal.SIGTERM, self.handler) - try: - if self.listener: - self.listener.start() - self.sniffer.start() - self.sniffer.join() - except Exception, err: - log.warn(traceback.format_exc()) - finally: - self.stop() - - def stop(self): - try: - self.sniffer.stop() - if self.listener: - self.listener.stop() - except: - pass - finally: - self.pcap = len(self.sniffer.pcap) - - def handler(self, signum, frame): - self.stop() - - -class ListenerArgParser(object): - - def parse(self): - parser = OptionParser() - parser.add_option("-n", "--name", - dest="name", - help="Name for this traffic profile.") - parser.add_option("-p", "--profile", - dest="profile", - help="Traffic profile to be used to receive packets.") - parser.add_option("-S", "--stop", - dest="stop", - action="store_true", - default=False, - help="Stop this traffic listener.") - parser.add_option("-P", "--poll", - dest="poll", - action="store_true", - default=False, - help="poll for packets recieved at traffic listener.") - - opts, args = parser.parse_args() - return opts diff --git a/tcutils/pkgs/Traffic/traffic/core/profile.py b/tcutils/pkgs/Traffic/traffic/core/profile.py deleted file mode 100644 index 6eaa6c3b2..000000000 --- a/tcutils/pkgs/Traffic/traffic/core/profile.py +++ /dev/null @@ -1,120 +0,0 @@ -"""Module holding various stream profiles. -""" - -from cPickle import dumps, loads - -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.core.stream import * - from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger - from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL -except ImportError: - # Distributed and installed as package - from traffic.core.stream import * - from traffic.utils.logger import LOGGER, get_logger - from traffic.utils.globalvars import LOG_LEVEL - - -LOGGER = "%s.core.listener" % LOGGER -log = get_logger(name=LOGGER, level=LOG_LEVEL) - -ENCRYPT = [("\n", "#"), (" ", "@"), ("(", "{"), ("'", "}")] - - -def create(obj): - """Creates the string representation of the profile object. - Which can be passed as command line argument to the sendpkts/recvpkts - scripts that are run in another machine(VM, Host). - """ - objs = dumps(obj) - for actual, encrypt in ENCRYPT: - objs = objs.replace(actual, encrypt) - return "\"%s\"" % objs - - -def load(objs): - """Converts the string representation of the profile object to Object. - Which will be used by the listener and generator modules in another - Machine(VM , Host). - """ - ENCRYPT.reverse() - for actual, encrypt in ENCRYPT: - objs = objs.replace(encrypt, actual) - return loads(objs) - - -class StandardProfile(object): - - def __init__( - self, stream, size=100, count=10, payload=None, capfilter=None, - stopper=None, timeout=None, iface=None, listener=None, chksum=False): - self.stream = stream - # payload size in bytes - self.size = size - # Payload to tbe sent in the packets. - self.payload = payload - # Number of packets to send/send per burst. - self.count = count - # Tcp dump style filter to be applied when capturing the packets. - # If filter is not given, the dfault filter will be framed automatically - # using the source/destination IP and source/destination port attributes - # of the stream. - self.capfilter = capfilter - # Stop packet capturing, once this stopper function returns true - # This function will be applied on each received packets. - self.stopper = stopper - # Timeout usually goes with stopper.say the stopper function waiting for - # a particular packet and its never received...timeout avoids dead - # lock. - self.timeout = timeout - # Interface to capture packets, if None captures in all interface. - self.iface = iface - # Listener IP address; for tcp and udp receivers - self.listener = listener - # to verify checksum of the received packet. - self.chksum = chksum - - # Number of bursts - self.burst_count = None - # Interval between burst in seconds - self.burst_interval = None - - -class ContinuousProfile(StandardProfile): - - def __init__( - self, stream, size=100, count=10, payload=None, capfilter=None, - stopper=None, timeout=None, iface=None, listener=None, - chksum=False): - # count = 0; means send packets continuously - super(ContinuousProfile, self).__init__(stream, size, count, payload, - capfilter, stopper, timeout, iface, listener, chksum) - - -class BurstProfile(StandardProfile): - - def __init__( - self, stream, size=100, count=10, payload=None, capfilter=None, - stopper=None, timeout=None, burst_count=10, listener=None, - chksum=False, burst_interval=10): - super(BurstProfile, self).__init__(stream, size, count, payload, - capfilter, stopper, timeout, iface, listener, chksum) - # Number of bursts - self.burst_count = burst_count - # Interval between burst in seconds - self.burst_interval = burst_interval - - -class ContinuousSportRange(StandardProfile): - - def __init__(self, stream, size=100, startport=8001, endport=8100, - payload=None, capfilter=None, stopper=None, timeout=None, - iface=None, listener=None, chksum=False, pps=500): - super( - ContinuousSportRange, self).__init__(stream, size=size, payload=payload, - capfilter=capfilter, stopper=stopper, timeout=timeout, iface=iface, - listener=listener, chksum=chksum) - - self.startport = startport - self.endport = endport - self.pps = pps diff --git a/tcutils/pkgs/Traffic/traffic/core/stream.py b/tcutils/pkgs/Traffic/traffic/core/stream.py deleted file mode 100644 index 95bb9af10..000000000 --- a/tcutils/pkgs/Traffic/traffic/core/stream.py +++ /dev/null @@ -1,221 +0,0 @@ -"""Module to create traffic stream. - -It just parses the arguments given by the user and fills up the approprite -protocol header. - -This needs to be extended for new protocol streams with new protocol. -""" - -import sys -import inspect - -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger - from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL - from tcutils.pkgs.Traffic.traffic.utils.util import is_v6, is_v4 -except ImportError: - # Distributed and installed as package - from traffic.utils.logger import LOGGER, get_logger - from traffic.utils.globalvars import LOG_LEVEL - from traffic.utils.util import is_v6, is_v4 - - -LOGGER = "%s.core.listener" % LOGGER -log = get_logger(name=LOGGER, level=LOG_LEVEL) - - -def help(header="all"): - """lists the keywords of fields available in currenlty implemented - protocols. - This is a helper method to the users to get the list of fields, - before creating a stream. - - Usage: - import stream - stream.help() - stream.help("IPHeader") - """ - clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass) - if not header == "all": - clsmembers = filter(lambda x: x[0] == header, clsmembers) - for clsname, clsmember in clsmembers: - clsobj = clsmember() - clsattrs = dir(clsobj) - if "fields" in clsattrs: - print clsname, ": ", clsobj.fields - if "options" in clsattrs: - print clsname, ": ", clsobj.options - - -class Stream(object): - - def __init__(self, **kwargs): - if not kwargs: - # Just for getting Help. - return - self.all_fields = kwargs - - try: - self.protocol = self.all_fields['protocol'] - except KeyError: - self.protocol = "ip" # Defualt L3 protocol. - dst = self.all_fields['dst'] - if is_v6(dst): - self.protocol = "ipv6" - try: - proto = self.all_fields['proto'] - except KeyError, err: - print err, "Must specify proto." - if 'dst' in self.all_fields.keys(): - self.all_fields['dst'] = str(self.all_fields['dst']) - - self.l2 = self._eth_header() - if self.protocol == 'ip': - self.l3 = self._ip_header() - elif self.protocol == 'ipv6': - self.l3 = self._ip6_header() - if proto == 'tcp': - self.l4 = self._tcp_header() - elif proto == 'udp': - self.l4 = self._udp_header() - elif proto == 'icmp': - self.l4 = self._icmp_header() - - def _eth_header(self): - return {} - - def _ip_header(self): - return IPHeader(**self.all_fields).get_header() - - def _ip6_header(self): - return IP6Header(**self.all_fields).get_header() - - def _tcp_header(self): - return TCPHeader(**self.all_fields).get_header() - - def _udp_header(self): - return UDPHeader(**self.all_fields).get_header() - - def _icmp_header(self): - if self.protocol == 'ipv6': - return None - return ICMPHeader(**self.all_fields).get_header() - - def get_l4_proto(self): - return getattr(self.l3, 'proto', None) or \ - getattr(self.l3, 'nh', None).lower() - - -class Header(object): - - def __init__(self, fields={}): - for key, val in fields.items(): - self.__setattr__(key, val) - - -class AnyHeader(object): - - def __init__(self, **kwargs): - self.all_fields = kwargs - try: - self.all_fields.update({'sport': int(self.all_fields['sport'])}) - self.all_fields.update({'dport': int(self.all_fields['dport'])}) - except KeyError: - pass - - def create_header(self, fields): - header = {} - for field in fields: - if field in self.all_fields.keys(): - if field == "iplen": # UDP also has len - field = "len" - if field == "ipflags": # TCP also has flags - field = "flags" - header.update({field: self.all_fields[field]}) - - return header - - -class TCPHeader(AnyHeader): - - def __init__(self, **kwargs): - super(TCPHeader, self).__init__(**kwargs) - # Set got from "fields_desc" attribute of protocol headers in scapy. - self.fields = ("sport", "dport", "seq", "ack", "dataofs", "reserved", - "flags", "window", "chksum", "urgptr") - self.options = ("EOL", "NOP", "MSS", "WScale", "SAckOK", "SAck", - "Timestamp", "AltChkSum", "AltChkSumOpt") - - def get_header(self): - header = self.create_header(self.fields) - options = self.create_header(self.options) - - if options: - header.update({'options': options}) - - return Header(header) - - -class UDPHeader(AnyHeader): - - def __init__(self, **kwargs): - super(UDPHeader, self).__init__(**kwargs) - # Set got from "fields_desc" attribute of protocol headers in scapy. - self.fields = ("sport", "dport", "len", "chksum") - - def get_header(self): - header = self.create_header(self.fields) - - return Header(header) - - -class ICMPHeader(AnyHeader): - - def __init__(self, **kwargs): - super(ICMPHeader, self).__init__(**kwargs) - # Set got from "fields_desc" attribute of protocol headers in scapy. - self.fields = ("type", "code", "chksum", "id", "seq", "ts_ori", "ts_rx" - "ts_tx", "gw", "ptr", "reserved", "addr_mask") - - def get_header(self): - header = self.create_header(self.fields) - - return Header(header) - - -class IPHeader(AnyHeader): - - def __init__(self, **kwargs): - super(IPHeader, self).__init__(**kwargs) - # Set got from "fields_desc" attribute of protocol headers in scapy. - self.fields = ("version", "ihl", "tos", "iplen", "id", "ipflags", - "frag", "ttl", "proto", "ipchksum", "src", "dst", - "options") - - def get_header(self): - header = self.create_header(self.fields) - - return Header(header) - - -class IP6Header(AnyHeader): - - def __init__(self, **kwargs): - super(IP6Header, self).__init__(**kwargs) - # Set got from "fields_desc" attribute of protocol headers in scapy. - self.fields = ("version", "tc", "fl", "iplen", "nh", "proto", - "hlim", "ttl", "src", "dst") - - def get_header(self): - header = self.create_header(self.fields) - hdr_obj = Header(header) - if hasattr(hdr_obj, 'proto'): - hdr_obj.nh = hdr_obj.proto.upper() - if 'ICMP' in hdr_obj.nh: - hdr_obj.nh = 'ICMPv6' - del hdr_obj.proto - if hasattr(hdr_obj, 'ttl'): - hdr_obj.hlim = hdr_obj.ttl - del hdr_obj.ttl - return hdr_obj diff --git a/tcutils/pkgs/Traffic/traffic/core/tcpclient.py b/tcutils/pkgs/Traffic/traffic/core/tcpclient.py deleted file mode 100644 index 03aaf84ee..000000000 --- a/tcutils/pkgs/Traffic/traffic/core/tcpclient.py +++ /dev/null @@ -1,184 +0,0 @@ -"""TCP Client module built on top of scapy Automaton""" - -from scapy.all import * -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.core.profile import * - from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger - from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL -except ImportError: - # Distributed and installed as package - from traffic.core.profile import * - from traffic.utils.logger import LOGGER, get_logger - from traffic.utils.globalvars import LOG_LEVEL - -LOGGER = "%s.core.tcpclient" % LOGGER -log = get_logger(name=LOGGER, level=LOG_LEVEL) - - -class TCPClient(Automaton): - - """ Controlled TCP_client to send RST, which can be trrigered from the - main thread""" - - def parse_args(self, gen, sport, *args, **kargs): - log.debug("Parsing the args") - self.gen = gen - l3_hdr = self.gen.creater._l3_hdr() - l4_hdr = self.gen.creater._l4_hdr() - - self.pkt = l3_hdr / l4_hdr - self.pkt[TCP].flags = 0 - self.pkt[TCP].seq = random.randrange(0, 2 ** 32) - - self.src = self.gen.profile.stream.l3.src - self.dst = self.gen.profile.stream.l3.dst - self.sport = sport - self.dport = self.gen.profile.stream.l4.dport - self.swin = self.pkt[TCP].window - self.dwin = 1 - self.rcvbuf = "" - self.count = 0 - self.recv_count = 0 - bpf = "host %s and host %s and port %i and port %i" % (self.src, - self.dst, - self.sport, - self.dport) - log.debug("BPF: %s", bpf) - -# bpf=None - Automaton.parse_args(self, filter=bpf, **kargs) - - def master_filter(self, pkt): - log.debug("Master filter") - return (((IP in pkt and - pkt[IP].src == self.dst and - pkt[IP].dst == self.src) or - (IPv6 in pkt and - pkt[IPv6].src == self.dst and - pkt[IPv6].dst == self.src) - ) and TCP in pkt and - pkt[TCP].sport == self.dport and - pkt[TCP].dport == self.sport and - # XXX: seq/ack 2^32 wrap up - self.pkt[TCP].seq >= pkt[TCP].ack and - ((self.pkt[TCP].ack == 0) or (self.pkt[TCP].ack <= pkt[TCP].seq <= self.pkt[TCP].ack + self.swin))) - - @ATMT.state(initial=1) - def START(self): - log.debug("state = START") - pass - - @ATMT.state() - def SYN_SENT(self): - log.debug("state = SYN_SENT") - pass - - @ATMT.state() - def ESTABLISHED(self): - log.debug("state = ESTABLISHED") - pass - - @ATMT.state() - def LAST_ACK(self): - log.debug("state = LAST_ACK") - pass - - @ATMT.state(final=1) - def CLOSED(self): - log.debug("state = CLOSED") - pass - - @ATMT.condition(START) - def connect(self): - raise self.SYN_SENT() - - @ATMT.action(connect) - def send_syn(self): - log.info("Sending SYN") - self.pkt[TCP].flags = "S" - log.debug(`self.pkt`) - self.send(self.pkt) - self.pkt[TCP].seq += 1 - - @ATMT.receive_condition(SYN_SENT) - def synack_received(self, pkt): - if pkt[TCP].flags & 0x3f == 0x12: - raise self.ESTABLISHED().action_parameters(pkt) - - @ATMT.action(synack_received) - def send_ack_of_synack(self, pkt): - log.info("Received SYN ACK") - self.pkt[TCP].ack = pkt[TCP].seq + 1 - self.pkt[TCP].flags = "A" - log.info("Sending ACK for SYN ACK") - log.debug(`self.pkt`) - self.send(self.pkt) - - @ATMT.receive_condition(ESTABLISHED) - def incoming_data_received(self, pkt): - if not isinstance(pkt[TCP].payload, NoPayload) and not isinstance(pkt[TCP].payload, Padding): - raise self.ESTABLISHED().action_parameters(pkt) - - @ATMT.action(incoming_data_received) - def receive_data(self, pkt): - log.debug("Received data in ESTABLISHED state.") - data = str(pkt[TCP].payload) - if data and self.pkt[TCP].ack == pkt[TCP].seq: - self.pkt[TCP].ack += len(data) - self.pkt[TCP].flags = "A" - log.debug(`self.pkt`) - self.send(self.pkt) - self.rcvbuf += data - if pkt[TCP].flags & 8 != 0: # PUSH - self.oi.tcp.send(self.rcvbuf) - self.rcvbuf = "" - - @ATMT.ioevent(ESTABLISHED, name="tcp", as_supersocket="tcplink") - def outgoing_data_received(self, fd): - raise self.ESTABLISHED().action_parameters(fd.recv()) - - @ATMT.action(outgoing_data_received) - def send_data(self, d): - log.debug("Got '%s' to send at ESTABLISHED state.", d) - if d == "STOP_STREAM": - log.debug("Sending RST") - # User requested to reset the TCP connection - self.pkt[TCP].flags = "R" - log.debug(`self.pkt`) - self.send(self.pkt) - return - - self.count += 1 - self.pkt[TCP].flags = "PA" - log.debug(`self.pkt / d`) - self.send(self.pkt / d) - self.pkt[TCP].seq += len(d) - self.gen.update_result("Sent=%s\nReceived=%s" % - (self.count, self.recv_count)) - - @ATMT.receive_condition(ESTABLISHED) - def reset_received(self, pkt): - log.debug("Recevied RST") - if pkt[TCP].flags & 4 != 0: - raise self.CLOSED() - - @ATMT.receive_condition(ESTABLISHED) - def fin_received(self, pkt): - if pkt[TCP].flags & 0x1 == 1: - raise self.LAST_ACK().action_parameters(pkt) - - @ATMT.action(fin_received) - def send_finack(self, pkt): - log.debug("Sending FIN ACK") - self.pkt[TCP].flags = "FA" - self.pkt[TCP].ack = pkt[TCP].seq + 1 - log.debug(`self.pkt`) - self.send(self.pkt) - self.pkt[TCP].seq += 1 - - @ATMT.receive_condition(LAST_ACK) - def ack_of_fin_received(self, pkt): - log.debug("Sending ACK for FIN") - if pkt[TCP].flags & 0x3f == 0x10: - raise self.CLOSED() diff --git a/tcutils/pkgs/Traffic/traffic/core/tests/generator_ut.py b/tcutils/pkgs/Traffic/traffic/core/tests/generator_ut.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/traffic/core/tests/listener_ut.py b/tcutils/pkgs/Traffic/traffic/core/tests/listener_ut.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/traffic/core/tests/stream_ut.py b/tcutils/pkgs/Traffic/traffic/core/tests/stream_ut.py deleted file mode 100644 index f4111b416..000000000 --- a/tcutils/pkgs/Traffic/traffic/core/tests/stream_ut.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Unittest for stream module.""" - -import unittest - -import tcutils.pkgs.Traffic.traffic.core.stream as stream - - -class TestStream(unittest.TestCase): - - def test_help(self): - stream.help() - stream.help("IPHeader") - -if __name__ == '__main__': - unittest.main() diff --git a/tcutils/pkgs/Traffic/traffic/scripts/recvpkts b/tcutils/pkgs/Traffic/traffic/scripts/recvpkts deleted file mode 100755 index 8cb89d81f..000000000 --- a/tcutils/pkgs/Traffic/traffic/scripts/recvpkts +++ /dev/null @@ -1,68 +0,0 @@ -#! /usr/bin/python - -"""Module to receive packets. -""" -import sys - -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.utils.daemon import Daemon - from tcutils.pkgs.Traffic.traffic.utils.logger import get_logger, LOGGER - from tcutils.pkgs.Traffic.traffic.core.listener import ListenerArgParser, PktListener - from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL -except ImportError: - # Distributed and installed as package - from traffic.utils.daemon import Daemon - from traffic.core.listener import ListenerArgParser, PktListener - from traffic.utils.logger import LOGGER, get_logger - from traffic.utils.globalvars import LOG_LEVEL - -LOGGER = "%s.scripts.recvpkts" % LOGGER -log = get_logger(name=LOGGER, level=LOG_LEVEL) - - -def poll(name): - fd = open("/tmp/%s.results" % name, "r") - results = fd.readlines() - fd.close() - return results - - -class PktRcvDaemon(Daemon): - - def __init__(self, args, pidfile, stdin, stdout, stderr): - super(PktRcvDaemon, self).__init__(pidfile, stdin, stdout, stderr) - self.args = args - - def run(self): - self.listener = PktListener(self.args) - self.listener.start() - - def stop(self): - self._stop() - return poll(self.args.name) - - -def main(): - args = ListenerArgParser().parse() - if args.poll: - return poll(args.name) - - pidfile = '/tmp/%s.pid' % args.name - if args.stop: - prefix = "stop" - else: - prefix = "start" - logfile = '/tmp/%s-recvpkts-%s.log' % (prefix, args.name) - open(logfile, 'w+') - daemon = PktRcvDaemon(args, pidfile, logfile, logfile, logfile) - if args.stop: - return daemon.stop() - else: - return daemon.start() - - -if __name__ == "__main__": - result = main() - print result - sys.exit(0) diff --git a/tcutils/pkgs/Traffic/traffic/scripts/sendpkts b/tcutils/pkgs/Traffic/traffic/scripts/sendpkts deleted file mode 100755 index 7713fc799..000000000 --- a/tcutils/pkgs/Traffic/traffic/scripts/sendpkts +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/python - -"""User interface to send packets. -""" -import sys -from logging import INFO - -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.utils.daemon import Daemon - from tcutils.pkgs.Traffic.traffic.utils.logger import get_logger, LOGGER - from tcutils.pkgs.Traffic.traffic.core.generator import GenArgParser, PktGenerator - from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL -except ImportError: - # Distributed and installed as package - from traffic.utils.daemon import Daemon - from traffic.utils.logger import get_logger, LOGGER - from traffic.core.generator import GenArgParser, PktGenerator - from traffic.utils.globalvars import LOG_LEVEL - -LOGGER = "%s.scripts.sendpkts" % LOGGER -log = get_logger(name=LOGGER, level=LOG_LEVEL) - - -def poll(name): - fd = open("/tmp/%s.results" % name, "r") - results = fd.readlines() - fd.close() - return results - - -class PktGenDaemon(Daemon): - - def __init__(self, args, pidfile, stdin, stdout, stderr): - super(PktGenDaemon, self).__init__(pidfile, stdin, stdout, stderr) - self.args = args - - def run(self): - self.gen = PktGenerator(self.args) - self.gen.start() - - def stop(self): - self._stop() - return poll(self.args.name) - - -def main(): - args = GenArgParser().parse() - if args.poll: - return poll(args.name) - - pidfile = '/tmp/%s.pid' % args.name - if args.stop: - prefix = "stop" - else: - prefix = "start" - logfile = '/tmp/%s-sendpkts-%s.log' % (prefix, args.name) - open(logfile, 'w+') - daemon = PktGenDaemon(args, pidfile, logfile, logfile, logfile) - if args.stop: - return daemon.stop() - else: - return daemon.start() - -if __name__ == "__main__": - results = main() - print results - sys.exit(0) diff --git a/tcutils/pkgs/Traffic/traffic/scripts/tests/recvpkts_ut.py b/tcutils/pkgs/Traffic/traffic/scripts/tests/recvpkts_ut.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/traffic/scripts/tests/sendpkts_ut.py b/tcutils/pkgs/Traffic/traffic/scripts/tests/sendpkts_ut.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/traffic/utils/__init__.py b/tcutils/pkgs/Traffic/traffic/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/traffic/utils/daemon.py b/tcutils/pkgs/Traffic/traffic/utils/daemon.py deleted file mode 100644 index f7aa219f8..000000000 --- a/tcutils/pkgs/Traffic/traffic/utils/daemon.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Module to make any python script a a daemon. -""" -import os -import sys -import time -import atexit -from signal import SIGTERM - -try: - # Running from the source repo "test". - from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger -except ImportError: - # Distributed and installed as package - from traffic.utils.logger import LOGGER, get_logger - -LOGGER = "%s.core.listener" % LOGGER -log = get_logger(name=LOGGER) - - -class Daemon(object): - - """ - A generic daemon class. - - Usage: subclass the Daemon class and override the run() method - """ - - def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): - self.stdin = stdin - self.stdout = stdout - self.stderr = stderr - self.pidfile = pidfile - - def daemonize(self): - """ - do the UNIX double-fork magic, see Stevens' "Advanced - Programming in the UNIX Environment" for details (ISBN 0201563177) - http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 - """ - try: - pid = os.fork() - if pid > 0: - # exit first parent - sys.exit(0) - except OSError, e: - sys.stderr.write("fork #1 failed: %d (%s)\n" % - (e.errno, e.strerror)) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(0) - - # do second fork - try: - pid = os.fork() - if pid > 0: - # exit from second parent - sys.exit(0) - except OSError, e: - sys.stderr.write("fork #2 failed: %d (%s)\n" % - (e.errno, e.strerror)) - sys.exit(1) - - # redirect standard file descriptors - sys.stdout.flush() - sys.stderr.flush() - si = file(self.stdin, 'r') - so = file(self.stdout, 'a+') - se = file(self.stderr, 'a+', 0) - os.dup2(si.fileno(), sys.stdin.fileno()) - os.dup2(so.fileno(), sys.stdout.fileno()) - os.dup2(se.fileno(), sys.stderr.fileno()) - - # write pidfile - atexit.register(self.delpid) - pid = str(os.getpid()) - file(self.pidfile, 'w+').write("%s\n" % pid) - - def delpid(self): - os.remove(self.pidfile) - - def start(self): - """ - Start the daemon - """ - # Check for a pidfile to see if the daemon already runs - try: - pf = file(self.pidfile, 'r') - pid = int(pf.read().strip()) - pf.close() - except IOError: - pid = None - - if pid: - message = "pidfile %s already exist. Daemon already running?\n" - sys.stderr.write(message % self.pidfile) - sys.exit(1) - - # Start the daemon - self.daemonize() - self.run() - - def _stop(self, pid=None): - """ - Stop the daemon - """ - if not pid: - try: - pf = file(self.pidfile, 'r') - pid = int(pf.read().strip()) - pf.close() - except IOError: - pid = None - - if not pid: - message = "pidfile %s does not exist. Daemon not running?\n" - sys.stderr.write(message % self.pidfile) - return # not an error in a restart - - # Try killing the daemon process - try: - while 1: - os.kill(pid, SIGTERM) - time.sleep(0.1) - except OSError, err: - err = str(err) - if err.find("No such process") > 0: - if os.path.exists(self.pidfile): - os.remove(self.pidfile) - else: - log.error(str(err)) - sys.exit(1) - - def restart(self): - """ - Restart the daemon - """ - self.stop() - self.start() - - def run(self): - """ - You should override this method when you subclass Daemon. It will be called after the process has been - daemonized by start() or restart(). - """ diff --git a/tcutils/pkgs/Traffic/traffic/utils/globalvars.py b/tcutils/pkgs/Traffic/traffic/utils/globalvars.py deleted file mode 100644 index 3348b2412..000000000 --- a/tcutils/pkgs/Traffic/traffic/utils/globalvars.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Hold all global variables of the traffic package. -""" - -import logging - -LOG_LEVEL = logging.INFO -# Uncomment the following when debugging. -#LOG_LEVEL = logging.DEBUG diff --git a/tcutils/pkgs/Traffic/traffic/utils/logger.py b/tcutils/pkgs/Traffic/traffic/utils/logger.py deleted file mode 100644 index 7bc58b50d..000000000 --- a/tcutils/pkgs/Traffic/traffic/utils/logger.py +++ /dev/null @@ -1,47 +0,0 @@ -import logging -import os -import sys - -# logging/format definitions -BANNER_WIDTH = 70 -LOG_BASE = "contrail" -LOGGER = "%s.traffic" % LOG_BASE - -DEFAULT_LEVEL = logging.DEBUG -DEFAULT_FORMAT = "%(asctime)s %(levelname)-8s| %(message)s" -LOG_HANDLERS = [] - - -def get_logger(name=LOGGER, level=DEFAULT_LEVEL, format=None): - """Configures a basic logger. - On the first invocation, the root logger will be configured. - """ - - root_logger = logging.getLogger('') - - # If the root logger already has a handler, don't add a new one. - if not (LOG_HANDLERS or root_logger.handlers): - # Configure the root logger - - root_logger.setLevel(logging.WARNING) - - stdout_handler = logging.StreamHandler(sys.stdout) - LOG_HANDLERS.append(stdout_handler) - - # stderr_handler = logging.StreamHandler(sys.stderr) - # stderr_handler.setLevel(logging.WARNING) - # LOG_HANDLERS.append(stderr_handler) - - if format is None: - format = DEFAULT_FORMAT - - formatter = logging.Formatter(format, datefmt="%H:%M:%S") - - for handler in LOG_HANDLERS: - root_logger.addHandler(handler) - handler.setFormatter(formatter) - - ret_log = logging.getLogger(name) - ret_log.setLevel(level) - - return ret_log diff --git a/tcutils/pkgs/Traffic/traffic/utils/tests/daemon_ut.py b/tcutils/pkgs/Traffic/traffic/utils/tests/daemon_ut.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/traffic/utils/tests/logger_ut.py b/tcutils/pkgs/Traffic/traffic/utils/tests/logger_ut.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/pkgs/Traffic/traffic/utils/util.py b/tcutils/pkgs/Traffic/traffic/utils/util.py deleted file mode 100644 index 61b1ab3bf..000000000 --- a/tcutils/pkgs/Traffic/traffic/utils/util.py +++ /dev/null @@ -1,15 +0,0 @@ -import socket - -def is_v4(address): - try: - socket.inet_pton(socket.AF_INET, address) - except socket.error: - return False - return True - -def is_v6(address): - try: - socket.inet_pton(socket.AF_INET6, address) - except socket.error: - return False - return True diff --git a/tcutils/pkgs/__init__.py b/tcutils/pkgs/__init__.py deleted file mode 100644 index 41f811dc9..000000000 --- a/tcutils/pkgs/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Package directory to hold re-distributable python packages. -""" diff --git a/tcutils/pkgs/install.py b/tcutils/pkgs/install.py deleted file mode 100644 index 8a74131cc..000000000 --- a/tcutils/pkgs/install.py +++ /dev/null @@ -1,209 +0,0 @@ -"""Module to build and install specific package in a specific host.""" - -import os -import importlib -import logging as LOG -from time import sleep - -from fabric.api import run -from fabric.operations import put -from fabric.context_managers import settings, hide -from tcutils.util import run_fab_cmd_on_node, fab_put_file_to_vm - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - -SETUP_SCRIPT = 'setup.py' - - -class SSHError(Exception): - pass - - -def build_pkg(pkgdir, pkgsrc, log=LOG): - """Builds the specific package. - - Requires setup.py to be present in the "pkgdir" - """ - builder = Builder(pkgdir, pkgsrc, log) - return builder.build() - - -def install_pkg(pkgdir, pkgdst, log=LOG): - """Copies the package to the specific host - and installs it in the site-packages. - - Requires setup.py to be present in the "pkgdir" - """ - installer = Installer(pkgdir, pkgdst, log) - return installer.install() - - -def build_and_install(pkgdir, pkgsrc, pkgdst, log=LOG): - """Builds the specific package, copies it to the specific host - and installs it in the site-packages. - - Requires setup.py to be present in the "pkgdir" - """ - pass - builder = Builder(pkgdir, pkgsrc, log) - if not builder.build(): - return False - - installer = Installer(pkgdir, pkgsrc, pkgdst, log) - return installer.install() - - -class PkgHost(object): - - def __init__(self, host, vm_node_ip=None, user="root", password="C0ntrail123", key=None): - self.host = host - # if None vm_node_ip is same as the host. - if not vm_node_ip: - self.vm_node_ip = host - else: - self.vm_node_ip = vm_node_ip - self.user = user - self.password = password - self.key = key - - -class BuildInstallBase(object): - - def __init__(self, pkgdir, pkgsrc, log): - self.pkgsrc = pkgsrc - self.log = log - pkg = importlib.import_module('tcutils.pkgs.%s' % pkgdir) - pkg = os.path.abspath(pkg.__file__) - self.log.debug("pkg path: %s", pkg) - # If already complied. - self.pkg_path = pkg.replace("__init__.pyc", "") - self.pkg_path = self.pkg_path.replace("__init__.py", "") - self.dist_path = os.path.join(self.pkg_path, "dist") - - def build(self): - pass - - def install(self): - pass - - -class Builder(BuildInstallBase): - - def __init__(self, pkgdir, pkgsrc, log): - super(Builder, self).__init__(pkgdir, pkgsrc, log) - - def build(self): - with hide('everything'): - with settings(host_string='%s@%s' % (self.pkgsrc.user, - self.pkgsrc.host), - password=self.pkgsrc.password, warn_only=True, - abort_on_prompts=False): - if os.path.isfile(os.path.join(self.pkg_path, SETUP_SCRIPT)): - run("cd %s; python %s sdist" % - (self.pkg_path, SETUP_SCRIPT)) - else: - self.log.error("No setup script found at: %s" % - self.pkg_path) - return False - - return True - - -class Installer(BuildInstallBase): - - def __init__(self, pkgdir, pkgsrc, pkgdst, log): - super(Installer, self).__init__(pkgdir, pkgsrc, log) - self.pkgdst = pkgdst - - def copy_to_vm(self, pkg, host): - output = None - self.log.debug("Copying Package %s to VM" % (str(pkg))) - try: - with hide('everything'): - with settings(host_string='%s@%s' % (self.pkgsrc.user, host), - password=self.pkgsrc.password, warn_only=True, - abort_on_prompts=False): - output = fab_put_file_to_vm(host_string='%s@%s' % ( - self.pkgdst.user, self.pkgdst.host), - password=self.pkgdst.password, src=pkg, - dest='~/') - self.log.debug(str(output)) - self.log.debug( - "Copied the distro from compute '%s' to VM '%s'", host, self.pkgdst.host) - except Exception, errmsg: - self.logger.exception( - "Exception: %s occured when copying %s" % (errmsg, pkg)) - finally: - return - - def execute_in_vm(self, cmd, host): - output = None - with hide('everything'): - with settings(host_string='%s@%s' % (self.pkgsrc.user, host), - password=self.pkgsrc.password, warn_only=True, - abort_on_prompts=False): - retry = 6 - while True: - output = '' - output = run_fab_cmd_on_node( - host_string='%s@%s' % ( - self.pkgdst.user, self.pkgdst.host), - password=self.pkgdst.password, cmd=cmd, - as_sudo=True) - if ("Connection timed out" in output or - "Connection refused" in output) and retry: - self.log.debug( - "SSH timeout, sshd might not be up yet. will retry after 5 secs.") - sleep(5) - retry -= 1 - continue - elif "Connection timed out" in output: - raise SSHError(output) - else: - break - self.log.debug(output) - return output - - def install(self): - # Look for the pkg distro. - with hide('everything'): - with settings(host_string='%s@%s' % (self.pkgsrc.user, - self.pkgsrc.host), password=self.pkgsrc.password, - warn_only=True, abort_on_prompts=False): - distro = run("cd %s; ls" % self.dist_path) - if (distro == '' or "No such file or directory" in distro): - self.log.error( - "No distribution package found at: %s, Build one." % - self.dist_path) - return False - - # copy distro to the compute node/node in which the vm is present. - pkgsrc_host = self.pkgsrc.host - dist_path = self.dist_path - if self.pkgsrc.host != self.pkgsrc.vm_node_ip: - self.log.debug("Cfgm and compute are different; copy the distro from cfgm '%s'" - " to compute '%s'", self.pkgsrc.host, self.pkgsrc.vm_node_ip) - pkgsrc_host = self.pkgsrc.vm_node_ip - dist_path = "/tmp/" - with hide('everything'): - with settings(host_string='%s@%s' % (self.pkgsrc.user, - pkgsrc_host), - password=self.pkgsrc.password, warn_only=True, - abort_on_prompts=False): - put(os.path.join(self.dist_path, distro), dist_path) - self.log.debug( - "Copied the distro to compute '%s'", pkgsrc_host) - - # Copy the pkg to VM and install in it. - distro_dir = distro.replace(".tar.gz", "") - scpout = self.copy_to_vm(os.path.join(dist_path, distro), pkgsrc_host) - self.log.debug(scpout) - # Remove the distro dir if present - out = self.execute_in_vm("rm -rf %s" % distro_dir, pkgsrc_host) - out = self.execute_in_vm("tar -xvzf %s" % distro, pkgsrc_host) - self.log.debug(out) - out = self.execute_in_vm("cd %s; python %s install" % (distro_dir, - SETUP_SCRIPT), pkgsrc_host) - self.log.debug(out) - - return True diff --git a/tcutils/pkgs/syn_ack_test/syn_client.py b/tcutils/pkgs/syn_ack_test/syn_client.py deleted file mode 100755 index 069d47dfe..000000000 --- a/tcutils/pkgs/syn_ack_test/syn_client.py +++ /dev/null @@ -1,30 +0,0 @@ -#! /usr/bin/env python - -from scapy.all import * -import sys - -ip_remote = sys.argv[1] -ip_local = sys.argv[2] - -os.system( - 'iptables -A OUTPUT -p tcp --tcp-flags RST RST -s %s -j DROP' % - ip_local) - -ip=IP(dst=ip_remote) -# Generate random source port number -port=8100 - -# Create SYN packet -SYN=ip/TCP(sport=port, dport=8000, flags="S", seq=42) - -# Send SYN and receive SYN,ACK -SYNACK=sr1(SYN) -print SYNACK - -# Create ACK packet -ACK=ip/TCP(sport=SYNACK.dport, dport=8000, flags="A", seq=SYNACK.ack, ack=SYNACK.seq + 1) - -# SEND our ACK packet -send(ACK) - -print "SUCCESS" diff --git a/tcutils/pkgs/syn_ack_test/syn_server.py b/tcutils/pkgs/syn_ack_test/syn_server.py deleted file mode 100755 index f7fbe8a4d..000000000 --- a/tcutils/pkgs/syn_ack_test/syn_server.py +++ /dev/null @@ -1,21 +0,0 @@ -#! /usr/bin/env python - -from scapy.all import * -from time import sleep -import sys - -ip_remote = sys.argv[1] -ip_local = sys.argv[2] - -os.system( - 'iptables -A OUTPUT -p tcp --tcp-flags RST RST -s %s -j DROP' % - ip_local) - -server = conf.L3socket(filter='host %s' % ip_remote) -ip=IP(dst=ip_remote) -SYN = server.recv() -sleep(182) -SYNACK = ip/TCP(sport=SYN.dport, dport=SYN.sport, flags="SA", seq=1001, ack=SYN.seq + 1) -sr1(SYNACK) - -print "SUCCESS" diff --git a/tcutils/pkgs/traceroute_2.0.18-1_amd64.deb b/tcutils/pkgs/traceroute_2.0.18-1_amd64.deb deleted file mode 100644 index 151058953..000000000 Binary files a/tcutils/pkgs/traceroute_2.0.18-1_amd64.deb and /dev/null differ diff --git a/tcutils/poc.py b/tcutils/poc.py deleted file mode 100644 index d2d99639a..000000000 --- a/tcutils/poc.py +++ /dev/null @@ -1,45 +0,0 @@ -from functools import (partial, wraps) - - -def template(args): - - def wrapper(func): - func.template = args - return func - - return wrapper - - -def method_partial(func, *parameters, **kparms): - @wraps(func) - def wrapped(self, *args, **kw): - kw.update(kparms) - return func(self, *(args + parameters), **kw) - return wrapped - - -class TemplateTestCase(type): - - def __new__(cls, name, bases, attr): - - new_methods = {} - - for method_name in attr: - if hasattr(attr[method_name], "template"): - source = attr[method_name] - source_name = method_name.lstrip("_") - - for test_name, args in source.template.items(): - parg, kwargs = args - - new_name = "%s_%s" % (source_name, test_name) - new_methods[new_name] = method_partial( - source, *parg, **kwargs) - new_methods[new_name].__name__ = new_name - - attr.update(new_methods) - return type(name, bases, attr) - - -def Call(*args, **kwargs): - return (args, kwargs) diff --git a/tcutils/rsyslog_utils.py b/tcutils/rsyslog_utils.py deleted file mode 100644 index 701433744..000000000 --- a/tcutils/rsyslog_utils.py +++ /dev/null @@ -1,208 +0,0 @@ -from fabric.api import run -from fabric.context_managers import settings -import time -COLLECTOR_CONF_FILE = '/etc/contrail/contrail-collector.conf' -RSYSLOG_CONF_FILE = '/etc/rsyslog.conf' - -def restart_collector_to_listen_on_port( - self, - collector_ip, - port_no=35999): - try: - with settings(host_string='%s@%s' % (self.inputs.username, - collector_ip), password=self.inputs.password, - warn_only=True, abort_on_prompts=False): - cmd = "grep 'syslog_port' " + COLLECTOR_CONF_FILE - output = run('%s' % (cmd), pty=True) - if output: - output = output.rstrip() - cmd = "sed -i '/" + str(output) + "/c\ syslog_port=" \ - + str(port_no) + "' " + COLLECTOR_CONF_FILE - run('%s' % (cmd), pty=True) - # Restart vizd if port no has been changed. - cmd = "service contrail-collector restart" - run('%s' % (cmd), pty=True) - cmd = "service contrail-collector status | grep 'RUNNING'" - output = run('%s' % (cmd), pty=True) - if not 'RUNNING' in output: - self.logger.error( - "contrail-collector service restart failure!!") - else: - cmd = "sed -i '/DEFAULT/ a \ syslog_port=" + \ - str(port_no) + "' " + COLLECTOR_CONF_FILE - run('%s' % (cmd), pty=True) - # Restart vizd if port no has been changed. - cmd = "service contrail-collector restart" - run('%s' % (cmd), pty=True) - cmd = "service contrail-collector status | grep 'RUNNING'" - output = run('%s' % (cmd), pty=True) - if not 'RUNNING' in output: - self.logger.error( - "contrail-collector service restart failure!!") - except Exception as e: - self.logger.exception( - "Got exception at restart_collector_to_listen_on_port as %s" % - (e)) -# end restart_collector_to_listen_on_port - - -def update_rsyslog_client_connection_details( - self, - node_ip, - server_ip='127.0.0.1', - protocol='udp', - port=35999, - restart=False): - try: - with settings(host_string='%s@%s' % (self.inputs.username, node_ip), - password=self.inputs.password, - warn_only=True, abort_on_prompts=False): - - if protocol == 'tcp': - protocol = '@@' - else: - protocol = '@' - connection_string = protocol + server_ip + ':' + str(port) - - cmd = "grep '@\{1,2\}" - cmd = cmd + "[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}' " - cmd = cmd + RSYSLOG_CONF_FILE - output = run('%s' % (cmd), pty=True) - if output: - output = output.rstrip() - cmd = "sed -i '/" + \ - str(output) + "/c\*.* " + str(connection_string) \ - + "' " + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - else: - cmd = "echo '*.* " + connection_string + \ - "' >> " + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - - if restart is True: - # restart rsyslog service - cmd = "service rsyslog restart" - run('%s' % (cmd), pty=True) - cmd = "service rsyslog status | grep 'running'" - output = run('%s' % (cmd), pty=True) - if not 'running' in output: - self.logger.error("rsyslog service restart failure!!") - - except Exception as e: - self.logger.exception( - "Got exception at update_rsyslog_client_connection_details as %s" % - (e)) - -# end update_syslog_client_connection_details - - -def restart_rsyslog_client_to_send_on_port( - self, - node_ip, - server_ip, - port_no=35999): - try: - with settings(host_string='%s@%s' % (self.inputs.username, node_ip), - password=self.inputs.password, - warn_only=True, abort_on_prompts=False): - - # update Working Directory in rsyslog.conf - cmd = "grep 'WorkDirectory' " + RSYSLOG_CONF_FILE - output = run('%s' % (cmd), pty=True) - if output: - output = output.rstrip() - cmd = "sed -i '/$WorkDirectory/c\\\$WorkDirectory \/var\/tmp' " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - else: - cmd = "echo '$WorkDirectory /var/tmp' >> " + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - - # update Queue file name in rsyslog.conf - cmd = "grep 'ActionQueueFileName' " + RSYSLOG_CONF_FILE - output = run('%s' % (cmd), pty=True) - if output: - output = output.rstrip() - cmd = "sed -i '/" + \ - str(output) + "/c\\\$ActionQueueFileName fwdRule1' " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - else: - cmd = "echo '$ActionQueueFileName fwdRule1' >> " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - - # update Max Disk Space for remote logging packets in - # rsyslog.conf - cmd = "grep 'ActionQueueMaxDiskSpace' " + RSYSLOG_CONF_FILE - output = run('%s' % (cmd), pty=True) - if output: - output = output.rstrip() - cmd = "sed -i '/" + \ - str(output) + "/c\\\$ActionQueueMaxDiskSpace 1g' " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - else: - cmd = "echo '$ActionQueueMaxDiskSpace 1g' >> " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - - # update Queue save on shutdown - cmd = "grep 'ActionQueueSaveOnShutdown' " + RSYSLOG_CONF_FILE - output = run('%s' % (cmd), pty=True) - if output: - output = output.rstrip() - cmd = "sed -i '/" + \ - str(output) + "/c\\\$ActionQueueSaveOnShutdown on' " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - else: - cmd = "echo '$ActionQueueSaveOnShutdown on' >> " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - - # update Queue type - cmd = "grep 'ActionQueueType' " + RSYSLOG_CONF_FILE - output = run('%s' % (cmd), pty=True) - if output: - output = output.rstrip() - cmd = "sed -i '/" + \ - str(output) + "/c\\\$ActionQueueType LinkedList' " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - else: - cmd = "echo '$ActionQueueType LinkedList' >> " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - - # update Connection resume retry count - cmd = "grep 'ActionResumeRetryCount' " + RSYSLOG_CONF_FILE - output = run('%s' % (cmd), pty=True) - if output: - output = output.rstrip() - cmd = "sed -i '/" + \ - str(output) + "/c\\\$ActionResumeRetryCount -1' " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - else: - cmd = "echo '$ActionResumeRetryCount -1' >> " - cmd = cmd + RSYSLOG_CONF_FILE - run('%s' % (cmd), pty=True) - - # update rsyslog client-server connection details - update_rsyslog_client_connection_details(self, node_ip, server_ip) - - # restart rsyslog service - cmd = "service rsyslog restart" - run('%s' % (cmd), pty=True) - cmd = "service rsyslog status | grep 'running'" - output = run('%s' % (cmd), pty=True) - if not 'running' in output: - self.logger.error("rsyslog service restart failure!!") - - except Exception as e: - self.logger.exception( - "Got exception at restart_rsyslog_client_to_send_on_port as %s" % - (e)) - -# end restart_rsyslog_client_to_send_on_port diff --git a/tcutils/services.py b/tcutils/services.py deleted file mode 100644 index ff0fa7f27..000000000 --- a/tcutils/services.py +++ /dev/null @@ -1,17 +0,0 @@ -""" Module to perfome command line operation in the services.""" - -from fabric.api import run, cd -from fabric.context_managers import settings, hide - - -def execute_cmd_in_node(node, user, passwd, cmd): - with hide('everything'): - with settings(host_string='%s@%s' % (user, node), password=password, - warn_only=True, abort_on_prompts=False): - output = run(cmd) - return output - - -def get_status(node, user, passwd, service): - cmd = "service %s status" % serivce - return execute_cmd_in_node(node, user, passwd, cmd) diff --git a/tcutils/tcpdump_utils.py b/tcutils/tcpdump_utils.py deleted file mode 100644 index 8ca56e449..000000000 --- a/tcutils/tcpdump_utils.py +++ /dev/null @@ -1,70 +0,0 @@ -# utils to start and stop tcpdump on VM -import logging - -from util import retry -from tcutils.commands import ssh, execute_cmd, execute_cmd_out -from tcutils.util import get_random_name - -def start_tcpdump_for_intf(ip, username, password, interface, filters='-v', logger=None): - if not logger: - logger = logging.getLogger(__name__) - session = ssh(ip, username, password) - pcap = '/tmp/%s_%s.pcap' % (interface, get_random_name()) - cmd = 'tcpdump -ni %s -U %s -w %s' % (interface, filters, pcap) - execute_cmd(session, cmd, logger) - return (session, pcap) - -def stop_tcpdump_for_intf(session, pcap, logger=None): - if not logger: - logger = logging.getLogger(__name__) - cmd = 'kill $(ps -ef|grep tcpdump | grep pcap| awk \'{print $2}\')' - execute_cmd(session, cmd, logger) - return True - -def start_tcpdump_for_vm_intf(obj, vm_fix, vn_fq_name, filters='-v'): - compute_ip = vm_fix.vm_node_ip - compute_user = obj.inputs.host_data[compute_ip]['username'] - compute_password = obj.inputs.host_data[compute_ip]['password'] - vm_tapintf = obj.orch.get_vm_tap_interface(vm_fix.tap_intf[vn_fq_name]) - return start_tcpdump_for_intf(compute_ip, compute_user, - compute_password, vm_tapintf, filters) - -def stop_tcpdump_for_vm_intf(obj, session, pcap): - return stop_tcpdump_for_intf(session, pcap) - -@retry(delay=2, tries=6) -def verify_tcpdump_count(obj, session, pcap, exp_count=None): - - cmd = 'tcpdump -r %s | wc -l' % pcap - out, err = execute_cmd_out(session, cmd, obj.logger) - count = int(out.strip('\n')) - result = True - if exp_count is not None: - if count != exp_count: - obj.logger.warn("%s packets are found in tcpdump output file %s but \ - expected %s" % (count, pcap, exp_count)) - result = False - else: - if count == 0: - obj.logger.warn("No packets are found in tcpdump output file %s but \ - expected some packets" % (pcap)) - result = False - - if result: - obj.logger.info( - "%s packets are found in tcpdump output as expected", - count) - stop_tcpdump_for_vm_intf(obj, session, pcap) - return result - -def search_in_pcap(session, pcap, search_string): - cmd = 'tcpdump -v -r %s | grep "%s"' % (pcap, search_string) - out, err = execute_cmd_out(session, cmd) - if search_string in out: - return True - else: - return False -# end search_in_pcap - -def delete_pcap(session, pcap): - execute_cmd_out(session, 'rm -f %s' % (pcap)) diff --git a/tcutils/tcpechoclient.py b/tcutils/tcpechoclient.py deleted file mode 100644 index b56424574..000000000 --- a/tcutils/tcpechoclient.py +++ /dev/null @@ -1,82 +0,0 @@ -import socket -import sys -import time -import argparse - -def parse_cli(args): - '''Define and Parse arguments for the script''' - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('--servers', action='store', required=True, nargs='+', - help='List of servers') - parser.add_argument('--dports', action='store', nargs='+', - default=[50000], type=int, help='List of dst ports') - parser.add_argument('--flows', action='store', default='1', type=int, - help='No of flows per dst port[1]') - parser.add_argument('--slow', action='store_true', - help='Enable slow mode where in there is a pause between each send') - parser.add_argument('--retry', action='store_true', - help='Retry connecting to service indefinitely, if down') - parser.add_argument('--count', action='store', default='0', type=int, - help='No of echo pkts to send, <=0 means indefinite') - pargs = parser.parse_args(args) - return pargs - -def run(args): - socks = list() - connects = 0 - servers = args.servers - dports = args.dports - sport_start = 20000 - n_tranx = 1 if args.slow else 10 - - for server in servers: - for port in dports: - for sport in range(sport_start, sport_start+args.flows): - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - #s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) - s.bind(('', sport)) - service = (server, port) - while True: - try: - s.connect(service) - connects += 1 - break - except socket.error as e: - print 'service', service, 'seems to be down.', e - if args.retry: - time.sleep(5) - continue - raise - socks.append(s) - - print 'Able to successfully create ', connects, 'connections' - message = 'Hello' - - iter = 1 - while True: - for s in socks: - for i in range(0, n_tranx): - s.send(message) - for s in socks: - data = s.recv(1024) - print data - if not data: - print 'closing socket', s.getsockname() - s.close() - iter = iter + 1 - if args.count > 0 and iter > args.count: - for s in socks: - s.close() - print 'sent and received', args.count, 'echos' - break - if args.slow: - time.sleep(1) - -def main(): - args = parse_cli(sys.argv[1:]) - run(args) - -if __name__ == '__main__': - main() - diff --git a/tcutils/tcpechoserver.py b/tcutils/tcpechoserver.py deleted file mode 100644 index f51ff716b..000000000 --- a/tcutils/tcpechoserver.py +++ /dev/null @@ -1,94 +0,0 @@ -import select -import socket -import sys -import Queue - -min=int(sys.argv[1]) if len(sys.argv) > 1 else 50000 -max=int(sys.argv[2]) if len(sys.argv) > 2 else 50000 -sockets= list() -# Create a TCP/IP socket -for port in range(min, max+1): - socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - socket.setblocking(0) - # Bind the socket to the port - service = ('', port) - print 'starting up on %s port %s' % service - socket.bind(service) - - # Listen for incoming connections - socket.listen(5) - sockets.append(socket) - -# Sockets from which we expect to read -inputs = [s for s in sockets] - -# Sockets to which we expect to write -outputs = [ ] - -# Outgoing message queues (socket:Queue) -message_queues = {} - -while inputs: - - # Wait for at least one of the sockets to be ready for processing - readable, writable, exceptional = select.select(inputs, outputs, inputs) - # Handle inputs - for s in readable: - if s in sockets: - # A "readable" server socket is ready to accept a connection - connection, client_address = s.accept() - print 'new connection from', client_address - connection.setblocking(0) - inputs.append(connection) - - # Give the connection a queue for data we want to send - message_queues[connection] = Queue.Queue() - else: - try: - data = s.recv(1024) - except Exception as e: - print e - continue - if data: - # A readable client socket has data - message_queues[s].put(data) - # Add output channel for response - if s not in outputs: - outputs.append(s) - else: - # Interpret empty result as closed connection - print 'closing', s, 'after reading no data' - # Stop listening for input on the connection - if s in outputs: - outputs.remove(s) - inputs.remove(s) - s.close() - - # Remove message queue - del message_queues[s] - - # Handle outputs - for s in writable: - try: - next_msg = message_queues[s].get_nowait() - except Queue.Empty: - # No messages waiting so stop checking for writability. - outputs.remove(s) - else: - try: - s.send(next_msg) - except Exception as e: - print e - - # Handle "exceptional conditions" - for s in exceptional: - # Stop listening for input on the connection - inputs.remove(s) - if s in outputs: - outputs.remove(s) - print 'closing socket', s - s.close() - - # Remove message queue - del message_queues[s] - diff --git a/tcutils/templates/pktgen_template.py b/tcutils/templates/pktgen_template.py deleted file mode 100644 index 9846aa7b6..000000000 --- a/tcutils/templates/pktgen_template.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -modprobe pktgen - -function pgset() { - local result - - echo $1 > $PGDEV - - result=$(cat $PGDEV | fgrep "Result: OK:") - if [ "$result" = "" ]; then - cat $PGDEV | fgrep Result: - fi -} - -PGDEV=/proc/net/pktgen/kpktgend_0 -pgset "rem_device_all" - -PGDEV=/proc/net/pktgen/kpktgend_0 -pgset "add_device eth0" - -PGDEV=/proc/net/pktgen/eth0 -pgset "clone_skb 0" -pgset "pkt_size $__pkt_size__" -pgset "count $__count__" -pgset "delay 0" -pgset "dst $__dst_ip__" -pgset "src_min $__src_ip__" -pgset "src_max 10.100.12.252" -pgset "udp_dst_min $__dst_port_mim__" -pgset "udp_dst_max $__dst_port_max__" -pgset "udp_src_min $__src_port_min__" -pgset "udp_src_max $__src_port_max__" - - -PGDEV=/proc/net/pktgen/pgctrl -echo "Starting...ctrl^C to stop" -pgset "start" -echo "Done diff --git a/tcutils/test_lib/__init__.py b/tcutils/test_lib/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/test_lib/test_utils.py b/tcutils/test_lib/test_utils.py deleted file mode 100644 index f7d02ca1e..000000000 --- a/tcutils/test_lib/test_utils.py +++ /dev/null @@ -1,18 +0,0 @@ -# Add common test utils, which can be used by all test scripts.. -from netaddr import IPNetwork - -def assertEqual(a, b, error_msg): - '''Assert with error msg''' - assert (a == b), error_msg - -def get_ip_list_from_prefix(prefix): - - return map(str, IPNetwork(prefix).iter_hosts()) - -def get_min_max_ip_from_prefix(prefix): - - ip_list = get_ip_list_from_prefix(prefix) - min_ip = ip_list[0] - max_ip = ip_list[-1] - return [min_ip, max_ip] - diff --git a/tcutils/tests/commands_ut.py b/tcutils/tests/commands_ut.py deleted file mode 100644 index 0742350d5..000000000 --- a/tcutils/tests/commands_ut.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Unittest for commands module.""" - -import unittest -from time import sleep - -from tcutils.commands import Command - - -class TestCommand(unittest.TestCase): - - def test_command(self): - ping = Command("ping localhost") - ping.start() - sleep(2) - r, o, e = ping.stop() - assert 'PING' in o - -if __name__ == '__main__': - unittest.main() diff --git a/tcutils/tests/cores_ut.py b/tcutils/tests/cores_ut.py deleted file mode 100644 index cf63128ae..000000000 --- a/tcutils/tests/cores_ut.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Unittests for cores module. -""" diff --git a/tcutils/timeout.py b/tcutils/timeout.py deleted file mode 100644 index 299f52e8d..000000000 --- a/tcutils/timeout.py +++ /dev/null @@ -1,28 +0,0 @@ -# From http://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish -# -# Usage : -# with timeout(seconds=3): -# sleep(4) - -from threading import Timer -import thread - -from time import sleep - -class TimeoutError(Exception): - pass - -class timeout: - def __init__(self, seconds=1, error_message='Timeout'): - self.seconds = seconds - self.error_message = error_message - self.timer = Timer(seconds, self.handle_timeout) - - def handle_timeout(self): - raise TimeoutError(self.error_message) - - def __enter__(self): - self.timer.start() - - def __exit__(self, type, value, traceback): - self.timer.cancel() diff --git a/tcutils/topo/__init__.py b/tcutils/topo/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/topo/sdn_topo_setup.py b/tcutils/topo/sdn_topo_setup.py deleted file mode 100644 index 1a806c032..000000000 --- a/tcutils/topo/sdn_topo_setup.py +++ /dev/null @@ -1,240 +0,0 @@ -import os -import copy -from common.openstack_libs import nova_client as mynovaclient -from common.openstack_libs import nova_exception as novaException -import fixtures -import testtools -import topo_steps -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from vn_policy_test import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from vm_test import * -from common.connections import ContrailConnections -from floating_ip import * -from policy_test import * -from contrail_fixtures import * -from tcutils.agent.vna_introspect_utils import * -from topo_helper import * -from vnc_api import vnc_api -from vnc_api.gen.resource_test import * -try: - from webui_test import * -except ImportError: - pass - -class sdnTopoSetupFixture(fixtures.Fixture): - - def __init__(self, connections, topo): - self.ini_file = os.environ.get('TEST_CONFIG_FILE') - self.connections = connections - self.inputs = self.connections.inputs - self.quantum_h = self.connections.quantum_h - self.nova_h = self.connections.nova_h - self.vnc_lib = self.connections.vnc_lib - self.orch = self.connections.orch - self.logger = self.inputs.logger - self.topo = topo - if self.inputs.verify_thru_gui(): - self.browser = self.connections.browser - self.browser_openstack = self.connections.browser_openstack - self.webui = WebuiTest(self.connections, self.inputs) - # end __init__ - - def setUp(self): - super(sdnTopoSetupFixture, self).setUp() - # end setUp - - def topo_setup(self, config_option='openstack', skip_verify='no', flavor='contrail_flavor_small', vms_on_single_compute=False, VmToNodeMapping=None): - '''Take topology to be configured as input and return received & configured topology -collection - of dictionaries. we return received topology as some data is updated and is required for - reference. - Bring up with 2G RAM to support multiple traffic streams..For scaling tests, min of 8192 is recommended. - Available config_option for SDN topo setup - 1. 'openstack': Configures all sdn entities like VN,policy etc using Openstack API - a. Project: Keystone - b. Policy: Quantum - c. IPAM: Contrail API - d. VN: Quantum - e. VM: Nova - 2. 'contrail': Configures all sdn entities like VN,policy etc using Contrail API - a. Project: Keystone - b. Policy: Contrail API - c. IPAM: Contrail API - d. VN: Contrail API - e. VM: Nova - ''' - config_option = 'contrail' if self.inputs.orchestrator == 'vcenter' else config_option - self.result = True - self.err_msg = [] - self.flavor = flavor - self.skip_verify = skip_verify - self.public_vn_present = False - self.fvn_vm_map = False - self.fvn_fixture = None - self.fip_fixture = None - self.si_fixture = {} - self.fip_fixture_dict = { - } - self.secgrp_fixture = None - topo_helper_obj = topology_helper(self.topo) - self.topo.vmc_list = topo_helper_obj.get_vmc_list() - self.topo.policy_vn = topo_helper_obj.get_policy_vn() - self.logger.info("Starting setup") - topo_steps.createUser(self) - topo_steps.createProject(self) - topo_steps.createSec_group(self, option=config_option) - topo_steps.createServiceTemplate(self) - topo_steps.createServiceInstance(self) - topo_steps.createIPAM(self, option=config_option) - topo_steps.createVN(self, option=config_option) - topo_steps.createPolicy(self, option=config_option) - topo_steps.attachPolicytoVN(self, option=config_option) - # If vm to node pinning is defined then pass it on to create VM method. - if VmToNodeMapping is not None: - topo_steps.createVMNova( - self, config_option, vms_on_single_compute, VmToNodeMapping) - else: - topo_steps.createVMNova(self, config_option, vms_on_single_compute) - topo_steps.createPublicVN(self) - topo_steps.verifySystemPolicy(self) - # prepare return data - config_topo = { - 'project': self.project_fixture, 'policy': self.policy_fixt, 'vn': self.vn_fixture, 'vm': self.vm_fixture, - 'fip': [self.public_vn_present, self.fvn_fixture, self.fip_fixture, self.fvn_vm_map, self.fip_fixture_dict], - 'si': self.si_fixture, 'st': self.st_fixture, 'sec_grp': self.secgrp_fixture, 'ipam': self.ipam_fixture} - if self.err_msg != []: - self.result = False - updated_topo = copy.copy(self.topo) - return {'result': self.result, 'msg': self.err_msg, 'data': [updated_topo, config_topo]} - # end topo_setup - - def sdn_topo_setup(self, config_option='openstack', skip_verify='no', flavor='contrail_flavor_small', vms_on_single_compute=False): - '''This is wrapper script which internally calls topo_setup to setup sdn topology based on topology. - This wrapper is basically used to configure multiple projects and it support assigning of FIP to VM from public VN. - ''' - topo = {} - topo_objs = {} - config_topo = {} - result = True - err_msg = [ - ] - total_vm_cnt = 0 - fip_possible = False - - # If a vm to compute node mapping is defined pass it on to topo_setup() - try: - if self.topo.vm_node_map: - VmToNodeMapping = self.topo.vm_node_map - else: - VmToNodeMapping = None - except: - VmToNodeMapping = None - - self.public_vn_present = False - self.fvn_vm_map = False - self.fip_ip_by_vm = { - } - self.fvn_fixture = None - self.fip_fixture = None - self.fip_fixture_dict = {} - topo_name = self.topo.__class__ - if 'project_list' in dir(self.topo): - self.projectList = self.topo.project_list - else: - self.projectList = [self.inputs.project_name] - for project in self.projectList: - setup_obj = {} - topo_obj = topo_name() - # expect class topology elements to be defined under method - # "build_topo_" - try: - topo[project] = eval("topo_obj." + self.topo.topo_of_project[project] + "(" + - "project='" + project + - "',username='" + self.topo.user_of_project[project] + - "',password='" + self.topo.pass_of_project[project] + - "',config_option='" + config_option + - "')") - except (NameError, AttributeError): - topo[project] = eval("topo_obj.build_topo_" + project + "()") - - setup_obj[project] = self.useFixture( - sdnTopoSetupFixture(self.connections, topo[project])) - out = setup_obj[project].topo_setup( - config_option, skip_verify, flavor, vms_on_single_compute, VmToNodeMapping) - if out['result'] == True: - topo_objs[project], config_topo[project] = out['data'] - total_vm_cnt = total_vm_cnt + len(config_topo[project]['vm']) - fip_info = config_topo[project]['fip'] - # If public VN present, get the public vn and FIP fixture obj - if fip_info[0]: - self.public_vn_present = True - self.fvn_fixture = fip_info[1] - self.fip_fixture = fip_info[2] - # If floating ip pools are created in VN's and supposed to be - # assigned to VM's in other VN - if fip_info[3]: - self.fvn_vm_map = True - self.fip_fixture_dict = fip_info[4] - self.logger.info("Setup completed for project %s with result %s" % - (project, out['result'])) - if out['result'] == False: - result = False - err_msg.append(out['msg']) - # Allocate and Associate floating IP to VM,if there is any provision to - # do so - fip_possible = topo_steps.verify_fip_associate_possible( - self, vm_cnt=total_vm_cnt) - if fip_possible: - topo_steps.allocateNassociateFIP(self, config_topo) - - self.config_topo = config_topo - # Extra steps to assign FIP from VNs configured with FIP pool to VMs as defined in topology - topo_steps.createAllocateAssociateVnFIPPools(self) - - if len(self.projectList) == 1 and 'admin' in self.projectList: - return {'result': result, 'msg': err_msg, 'data': [topo_objs[self.inputs.project_name], config_topo[self.inputs.project_name], [fip_possible, self.fip_ip_by_vm]]} - else: - return {'result': result, 'msg': err_msg, 'data': [topo_objs, config_topo, [fip_possible, self.fip_ip_by_vm]]} - - # end sdn_topo_setup - - def verify_sdn_topology(self, topo_objects, config_topo): - """Verify basic components of sdn topology. Takes topo_objects and config_topo as input parameter""" - for project in topo_objects.keys(): - # verify projects - assert config_topo[project]['project'][ - project].verify_on_setup(), "One or more verifications failed for Project:%s" % project - # verify security-groups - for sec_grp in topo_objects[project].sg_list: - assert config_topo[project]['sec_grp'][sec_grp].verify_on_setup( - ), "One or more verifications failed for Security-Group:%s" % sec_grp - # verify virtual-networks and ipams - for vnet in topo_objects[project].vnet_list: - assert config_topo[project]['vn'][vnet].verify_on_setup_without_collector( - ), "One or more verifications failed for VN:%s" % vnet - if vnet in topo_objects[project].vn_ipams.keys(): - ipam = topo_objects[project].vn_ipams[vnet] - assert config_topo[project]['ipam'][ - ipam].verify_on_setup(), "One or more verifications failed for IPAM:%s" % ipam - # verify policy - for policy in topo_objects[project].policy_list: - assert config_topo[project]['policy'][ - policy].verify_on_setup(), "One or more verifications failed for Policy:%s" % policy - # verify virtual-machines - for vmc in topo_objects[project].vmc_list: - assert config_topo[project]['vm'][ - vmc].verify_on_setup(), "One or more verifications failed for VM:%s" % vmc - return True - # end verify_sdn_topology - - def cleanUp(self): - if self.inputs.fixture_cleanup == 'yes': - super(sdnTopoSetupFixture, self).cleanUp() - else: - self.logger.info('Skipping sdn topology config cleanup') - # end cleanUp - -# end sdnSetupFixture diff --git a/tcutils/topo/topo_helper.py b/tcutils/topo/topo_helper.py deleted file mode 100644 index 8ceadfd80..000000000 --- a/tcutils/topo/topo_helper.py +++ /dev/null @@ -1,50 +0,0 @@ -''' Take logical topology object and provide methods to extend/derive data from user topology to help verifications ''' - - -class topology_helper (): - - def __init__(self, topology_obj): - self.topo_dict = topology_obj.__dict__ - self.vnet_list = self.topo_dict['vnet_list'] - self.vn_policy = self.topo_dict['vn_policy'] - self.policy_list = self.topo_dict['policy_list'] - self.rules = self.topo_dict['rules'] - if 'si_list' in self.topo_dict: - self.si_list = self.topo_dict['si_list'] - else: - self.si_list = [] - self.vmc_list = [] - self.policy_vn = {} - self.pol_si = {} - self.si_pol = {} - - - def get_vmc_list(self): - vn_vm_l = self.topo_dict['vn_of_vm'] - for key, value in vn_vm_l.iteritems(): - self.vmc_list.append(key) - return self.vmc_list - - def get_policy_vn(self): - for policy in self.policy_list: - self.policy_vn[policy] = [] - for vn in self.vnet_list: - if policy in self.vn_policy[vn]: - self.policy_vn[policy].append(vn) - # print "added vn %s to list for policy %s" %(vn, policy) - # print "completed parsing vn %s policy list" %(vn) - # print "completed building vn list for policy %s, list is %s" - # %(policy, self.policy_vn[policy]) - return self.policy_vn - - def get_vm_of_vn(self): - '''return vm list by vn; data of format {vn1: [vmc1, vmc2], vn2: [vmc3, vmc4]}''' - self.vm_of_vn = {} - # self.vn_of_vm= {'vmc0': 'vnet0', 'vmc1': 'vnet1'} format... - vn_vm_l = self.topo_dict['vn_of_vm'] - for vn in self.vnet_list: - self.vm_of_vn[vn] = [] - for k, v in vn_vm_l.items(): - self.vm_of_vn[v].append(k) - return self.vm_of_vn -# end diff --git a/tcutils/topo/topo_steps.py b/tcutils/topo/topo_steps.py deleted file mode 100644 index 02074c3d3..000000000 --- a/tcutils/topo/topo_steps.py +++ /dev/null @@ -1,908 +0,0 @@ -''' This module provides utils for setting up sdn topology given the topo inputs''' -import os -import copy -from common.openstack_libs import nova_client as mynovaclient -from common.openstack_libs import nova_exception as novaException -import fixtures -import topo_steps -from common.contrail_test_init import ContrailTestInit -from vn_test import * -from vn_policy_test import * -from quantum_test import * -from vnc_api_test import * -from nova_test import * -from vm_test import * -from common.connections import ContrailConnections -from floating_ip import * -from policy_test import * -from contrail_fixtures import * -from user_test import UserFixture -from tcutils.agent.vna_introspect_utils import * -from topo_helper import * -from vnc_api import vnc_api -from vnc_api.gen.resource_test import * -from netaddr import * -from common.policy import policy_test_helper -from svc_template_fixture import SvcTemplateFixture -from svc_instance_fixture import SvcInstanceFixture -from security_group import SecurityGroupFixture -try: - from webui_test import * -except ImportError: - pass - -def createUser(self): - self.logger.info("Setup step: Creating User") - if not ( - (self.topo.username == 'admin' or self.topo.username is None) and ( - self.topo.project == 'admin')): - self.user_fixture = self.useFixture( - UserFixture( - connections=self.connections, - username=self.topo.username, password=self.topo.password)) - return self -# end createUser - -def createProject(self): - self.logger.info("Setup step: Creating Project") - self.project_fixture = {} - self.project_fixture[self.topo.project] = self.useFixture( - ProjectFixture( - project_name=self.topo.project, vnc_lib_h=self.vnc_lib, - username=self.topo.username, password=self.topo.password, - connections=self.connections)) - if not ( - (self.topo.username == 'admin' or self.topo.username is None) and ( - self.topo.project == 'admin')): - self.logger.info( - "provision user %s with role as admin in tenant %s" % - (self.topo.username, self.topo.project)) - self.user_fixture.add_user_to_tenant( - self.topo.project, - self.topo.username, - 'admin') - self.project_inputs = ContrailTestInit( - self.ini_file, stack_user=self.project_fixture[ - self.topo.project].username, stack_password=self.project_fixture[ - self.topo.project].password, project_fq_name=[ - 'default-domain', self.topo.project], logger=self.logger) - self.project_connections = ContrailConnections( - self.project_inputs, - self.logger) - self.project_parent_fixt = self.useFixture( - ProjectTestFixtureGen(self.vnc_lib, project_name=self.topo.project)) - if self.skip_verify == 'no': - assert self.project_fixture[ - self.topo.project].verify_on_setup() - return self -# end createProject - - -def createSec_group(self, option='contrail'): - if option == 'openstack': - create_sg_quantum(self) - elif option == 'contrail': - create_sg_contrail(self) - else: - self.logger.error("invalid config option %s" % option) - return self -# end of createSec_group - -def create_sg_quantum(self): - if hasattr(self.topo, 'sg_list'): - self.sg_uuid = {} - self.secgrp_fixture = {} - for sg_name in self.topo.sg_list: - result = True - msg = [] - self.logger.info("Setup step: Creating Security Group") - self.secgrp_fixture[sg_name] = self.useFixture( - SecurityGroupFixture( - inputs=self.inputs, - connections=self.project_connections, - domain_name=self.topo.domain, - project_name=self.topo.project, - secgrp_name=sg_name, - secgrp_entries=self.topo.sg_rules[sg_name],option='neutron')) - self.sg_uuid[sg_name] = self.secgrp_fixture[sg_name].secgrp_id - if self.skip_verify == 'no': - ret, msg = self.secgrp_fixture[sg_name].verify_on_setup() - assert ret, "Verifications for security group is :%s failed and its error message: %s" % ( - sg_name, msg) - return self -# end of create_sg_quantum - -def create_sg_contrail(self): - if hasattr(self.topo, 'sg_list'): - self.sg_uuid = {} - self.secgrp_fixture = {} - for sg_name in self.topo.sg_list: - result = True - msg = [] - self.logger.info("Setup step: Creating Security Group") - self.secgrp_fixture[sg_name] = self.useFixture( - SecurityGroupFixture( - inputs=self.inputs, - connections=self.project_connections, - domain_name=self.topo.domain, - project_name=self.topo.project, - secgrp_name=sg_name, - secgrp_entries=self.topo.sg_rules[sg_name],option='contrail')) - self.sg_uuid[sg_name] = self.secgrp_fixture[sg_name].secgrp_id - if self.skip_verify == 'no': - ret, msg = self.secgrp_fixture[sg_name].verify_on_setup() - assert ret, "Verifications for security group is :%s failed and its error message: %s" % ( - sg_name, msg) - return self -# end of create_sg_contrail - - -def createPolicy(self, option='openstack'): - if option == 'openstack' or self.inputs.orchestrator == 'vcenter': - createPolicyFixtures(self) - elif option == 'contrail': - createPolicyContrail(self) - else: - self.logger.error("invalid config option %s" % option) - return self -# end createPolicy - - -def createPolicyFixtures(self, option='openstack'): - self.logger.info("Setup step: Creating Policies") - self.policy_fixt = {} - self.conf_policy_objs = {} - d = [p for p in self.topo.policy_list] - to_be_created_pol = (p for p in d if d) - for policy_name in to_be_created_pol: - self.policy_fixt[policy_name] = self.useFixture( - PolicyFixture(policy_name=policy_name, - rules_list=self.topo.rules[policy_name], - inputs=self.project_inputs, - connections=self.project_connections)) - if self.skip_verify == 'no': - ret = self.policy_fixt[policy_name].verify_on_setup() - if ret['result'] == False: - self.logger.error( - "Policy %s verification failed after setup" % policy_name) - assert ret['result'], ret['msg'] - for vn in self.topo.vnet_list: - self.conf_policy_objs[vn] = [] - for policy_name in self.topo.vn_policy[vn]: - self.conf_policy_objs[vn].append( - self.policy_fixt[policy_name].policy_obj) - return self -# end createPolicyOpenstack - - -def createPolicyContrail(self): - self.logger.info("Setup step: Creating Policies") - self.policy_fixt = {} - self.conf_policy_objs = {} - d = [p for p in self.topo.policy_list] - to_be_created_pol = (p for p in d if d) - for policy_name in to_be_created_pol: - self.policy_fixt[policy_name] = self.useFixture( - NetworkPolicyTestFixtureGen( - self.vnc_lib, - network_policy_name=policy_name, - parent_fixt=self.project_parent_fixt, - network_policy_entries=PolicyEntriesType( - self.topo.rules[policy_name]))) - policy_read = self.vnc_lib.network_policy_read( - id=str(self.policy_fixt[policy_name]._obj.uuid)) - if not policy_read: - self.logger.error("Policy:%s read on API server failed" % - policy_name) - assert False, "Policy %s read failed on API server" % policy_name - for vn in self.topo.vnet_list: - self.conf_policy_objs[vn] = [] - for policy_name in self.topo.vn_policy[vn]: - self.conf_policy_objs[vn].append( - self.policy_fixt[policy_name]._obj) - return self -# end createPolicyContrail - - -def createIPAM(self, option='openstack'): - self.logger.info("Setup step: Creating IPAM's") - track_created_ipam = [] - self.ipam_fixture = {} - self.conf_ipam_objs = {} - default_ipam_name = self.topo.project + "-default-ipam" - if 'vn_ipams' in dir(self.topo): - print "topology has IPAM specified, need to create for each VN" - for vn in self.topo.vnet_list: - self.conf_ipam_objs[vn] = [] - if vn in self.topo.vn_ipams: - ipam_name = self.topo.vn_ipams[vn] - else: - ipam_name = default_ipam_name - if ipam_name in track_created_ipam: - if option == 'contrail': - self.conf_ipam_objs[vn] = self.ipam_fixture[ipam_name].obj - else: - self.conf_ipam_objs[vn] = self.ipam_fixture[ - ipam_name].fq_name - continue - print "creating IPAM %s" % ipam_name - self.ipam_fixture[ipam_name] = self.useFixture( - IPAMFixture( - project_obj=self.project_fixture[ - self.topo.project], - name=ipam_name)) - if self.skip_verify == 'no': - assert self.ipam_fixture[ - ipam_name].verify_on_setup(), "verification of IPAM:%s failed" % ipam_name - track_created_ipam.append(ipam_name) - if option == 'contrail': - self.conf_ipam_objs[vn] = self.ipam_fixture[ipam_name].obj - else: - self.conf_ipam_objs[vn] = self.ipam_fixture[ipam_name].fq_name - else: - ipam_name = default_ipam_name - print "creating project default IPAM %s" % ipam_name - self.ipam_fixture[ipam_name] = self.useFixture( - IPAMFixture( - project_obj=self.project_fixture[ - self.topo.project], - name=ipam_name)) - if self.skip_verify == 'no': - assert self.ipam_fixture[ - ipam_name].verify_on_setup(), "verification of IPAM:%s failed" % ipam_name - for vn in self.topo.vnet_list: - if option == 'contrail': - self.conf_ipam_objs[vn] = self.ipam_fixture[ipam_name].obj - else: - self.conf_ipam_objs[vn] = self.ipam_fixture[ipam_name].fq_name - return self -# end createIPAM - - -def createVN_Policy(self, option='openstack'): - if option == 'openstack': - createVN_Policy_OpenStack(self) - elif option == 'contrail': - createVN_Policy_Contrail(self) - else: - self.logger.error("invalid config option %s" % option) - return self -# end createVN_Policy - - -def createVN(self, option='openstack'): - if option == 'openstack' or self.inputs.orchestrator == 'vcenter': - createVNOrch(self) - elif option == 'contrail': - createVNContrail(self) - else: - self.logger.error("invalid config option %s" % option) - return self -# end createVN - - -def createVNOrch(self): - self.logger.info("Setup step: Creating VN's") - self.vn_fixture = {} - self.vn_of_cn = {} - for vn in self.topo.vnet_list: - router_asn = None - rt_number = None - if hasattr(self.topo, 'vn_params'): - if self.topo.vn_params.has_key(vn): - if self.topo.vn_params[vn].has_key('router_asn'): - router_asn = self.topo.vn_params[vn]['router_asn'] - if self.topo.vn_params[vn].has_key('rt_number'): - rt_number = self.topo.vn_params[vn]['rt_number'] - - self.vn_fixture[vn] = self.useFixture( - VNFixture(project_name=self.topo.project, - connections=self.project_connections, vn_name=vn, - inputs=self.project_inputs, subnets=self.topo.vn_nets[vn], - ipam_fq_name=self.conf_ipam_objs[vn], router_asn=router_asn, - rt_number=rt_number)) - if self.skip_verify == 'no': - ret = self.vn_fixture[vn].verify_on_setup() - assert ret, "One or more verifications for VN:%s failed" % vn - # Initialize compute's VN list - for cn in self.inputs.compute_names: - self.vn_of_cn[self.inputs.compute_info[cn]] = [] - return self -# end create_VN_only_OpenStack - - -def attachPolicytoVN(self, option='openstack'): - self.vn_policy_fixture = {} - for vn in self.topo.vnet_list: - self.vn_policy_fixture[vn] = self.useFixture( - VN_Policy_Fixture( - connections=self.project_connections, - vn_name=vn, - vn_obj=self.vn_fixture, - vn_policys=self.topo.vn_policy[vn], - project_name=self.topo.project, - options=option, - policy_obj=self.conf_policy_objs)) - if self.skip_verify == 'no': - ret = self.vn_fixture[vn].verify_on_setup() - assert ret, "One or more verifications for VN:%s failed" % vn - for policy_name in self.topo.vn_policy[vn]: - ret = self.policy_fixt[policy_name].verify_on_setup() - if ret['result'] == False: - self.logger.error( - "Policy %s verification failed after setup" % - policy_name) - assert ret['result'], ret['msg'] - return self -# end attachPolicytoVN - - -def attachPolicytoVN(self, option='contrail'): - self.vn_policy_fixture = {} - for vn in self.topo.vnet_list: - self.vn_policy_fixture[vn] = self.useFixture( - VN_Policy_Fixture( - connections=self.project_connections, - vn_name=vn, - options=option, - policy_obj=self.conf_policy_objs, - vn_obj=self.vn_fixture, - vn_policys=self.topo.vn_policy[vn], - project_name=self.topo.project)) - return self -# end attachPolicytoVN - - -def createVNContrail(self): - self.logger.info("Setup step: Creating VN's") - self.vn_fixture = {} - self.vn_of_cn = {} - - for vn in self.topo.vnet_list: - router_asn = None - rt_number = None - rt_obj = None - if hasattr(self.topo, 'vn_params'): - if self.topo.vn_params.has_key(vn): - if self.topo.vn_params[vn].has_key('router_asn'): - router_asn = self.topo.vn_params[vn]['router_asn'] - if self.topo.vn_params[vn].has_key('rt_number'): - rt_number = self.topo.vn_params[vn]['rt_number'] - - rt_val = "target:%s:%s" % (router_asn, rt_number) - rt_obj = RouteTargetList([rt_val]) - - for ipam_info in self.topo.vn_nets[vn]: - ipam_info = list(ipam_info) - ipam_info[0] = self.conf_ipam_objs[vn] - ipam_info = tuple(ipam_info) - self.vn_fixture[vn] = self.useFixture( - VirtualNetworkTestFixtureGen( - self.vnc_lib, - virtual_network_name=vn, - parent_fixt=self.project_parent_fixt, - id_perms=IdPermsType( - enable=True), - network_ipam_ref_infos=[ipam_info], - route_target_list=rt_obj)) - vn_read = self.vnc_lib.virtual_network_read( - id=str(self.vn_fixture[vn]._obj.uuid)) - if vn_read: - self.logger.info("VN created successfully %s " % (vn)) - if not vn_read: - self.logger.error("VN %s read on API server failed" % vn) - assert False, "VN:%s read failed on API server" % vn - # Initialize compute's VN list - for cn in self.inputs.compute_names: - self.vn_of_cn[self.inputs.compute_info[cn]] = [] - return self -# end createVNContrail - - -def createVN_Policy_OpenStack(self): - self.logger.info("Setup step: Creating VN's") - self.vn_fixture = {} - self.vn_of_cn = {} - for vn in self.topo.vnet_list: - self.vn_fixture[vn] = self.useFixture( - VNFixture( - project_name=self.topo.project, - connections=self.project_connections, - vn_name=vn, - inputs=self.project_inputs, - subnets=self.topo.vn_nets[vn], - policy_objs=self.conf_policy_objs[vn], - ipam_fq_name=self.conf_ipam_objs[vn])) - if self.skip_verify == 'no': - ret = self.vn_fixture[vn].verify_on_setup() - assert ret, "One or more verifications for VN:%s failed" % vn - # Initialize compute's VN list - for cn in self.inputs.compute_names: - self.vn_of_cn[self.inputs.compute_info[cn]] = [] - return self -# end createVN_Policy_OpenStack - - -def createVN_Policy_Contrail(self): - self.logger.info("Setup step: Creating VN's") - self.vn_fixture = {} - self.vn_of_cn = {} - for vn in self.topo.vnet_list: - ref_tuple = [] - for conf_policy in self.conf_policy_objs[vn]: - ref_tuple.append( - (conf_policy, - VirtualNetworkPolicyType( - sequence=SequenceType( - major=0, - minor=0)))) - for ipam_info in self.topo.vn_nets[vn]: - ipam_info = list(ipam_info) - ipam_info[0] = self.conf_ipam_objs[vn] - ipam_info = tuple(ipam_info) - self.vn_fixture[vn] = self.useFixture( - VirtualNetworkTestFixtureGen( - self.vnc_lib, - virtual_network_name=vn, - parent_fixt=self.project_parent_fixt, - id_perms=IdPermsType( - enable=True), - network_policy_ref_infos=ref_tuple, - network_ipam_ref_infos=[ipam_info])) - vn_read = self.vnc_lib.virtual_network_read( - id=str(self.vn_fixture[vn]._obj.uuid)) - if not vn_read: - self.logger.error("VN %s read on API server failed" % vn) - assert False, "VN:%s read failed on API server" % vn - # Initialize compute's VN list - for cn in self.inputs.compute_names: - self.vn_of_cn[self.inputs.compute_info[cn]] = [] - return self -# end createVN_Policy_Contrail - - -def createVMNova( - self, - option='openstack', - vms_on_single_compute=False, - VmToNodeMapping=None): - self.logger.info("Setup step: Creating VM's") - self.vm_fixture = {} - host_list = self.connections.orch.get_hosts() - vm_image_name = os.environ['ci_image'] if os.environ.has_key('ci_image') else 'ubuntu-traffic' - - for vm in self.topo.vmc_list: - sec_gp = [] - if option == 'contrail': - vn_read = self.vnc_lib.virtual_network_read( - id=str(self.vn_fixture[self.topo.vn_of_vm[vm]].getObj().uuid)) - vn_obj = self.orch.get_vn_obj_if_present( - vn_read.name, - project_id=self.project_fixture[ - self.topo.project].uuid) - else: - vn_obj = self.vn_fixture[self.topo.vn_of_vm[vm]].obj - if hasattr(self.topo, 'sg_of_vm'): - if self.topo.sg_of_vm.has_key(vm): - for sg in self.topo.sg_of_vm[vm]: - sec_gp.append(self.sg_uuid[sg]) - else: - pass - if vms_on_single_compute: - self.vm_fixture[vm] = self.useFixture( - VMFixture( - project_name=self.topo.project, - connections=self.project_connections, - vn_obj=vn_obj, - flavor=self.flavor, - image_name=vm_image_name, - vm_name=vm, - sg_ids=sec_gp, - node_name=host_list[0])) - else: - # If vm is pinned to a node get the node name from node IP and pass - # it on to VM creation method. - if VmToNodeMapping is not None and len(VmToNodeMapping) != 0: - IpToNodeName = self.inputs.host_data[ - VmToNodeMapping[vm]]['name'] - self.vm_fixture[vm] = self.useFixture( - VMFixture( - project_name=self.topo.project, - connections=self.project_connections, - vn_obj=vn_obj, - flavor=self.flavor, - image_name=vm_image_name, - vm_name=vm, - sg_ids=sec_gp, - node_name=IpToNodeName)) - else: - self.vm_fixture[vm] = self.useFixture( - VMFixture( - project_name=self.topo.project, - connections=self.project_connections, - vn_obj=vn_obj, - flavor=self.flavor, - image_name=vm_image_name, - sg_ids=sec_gp, - vm_name=vm)) - - # We need to retry following section and scale it up if required (for slower VM environment) - # TODO: Use @retry annotation instead - if "TEST_RETRY_FACTOR" in os.environ: - retry_factor = os.environ.get("TEST_RETRY_FACTOR") - else: - retry_factor = "1.0" - retry_count = math.floor(5 * float(retry_factor)) - - # added here 30 seconds sleep - #import time; time.sleep(30) - self.logger.info( - "Setup step: Verify VM status and install Traffic package... ") - for vm in self.topo.vmc_list: - if self.skip_verify == 'no': - # Include retry to handle time taken by less powerful computes or - # if launching more VMs... - retry = 0 - while True: - vm_verify_out = self.vm_fixture[vm].verify_on_setup() - retry += 1 - if vm_verify_out or retry > 2: - break - if not vm_verify_out: - m = "on compute %s - vm %s verify failed after setup" % ( - self.vm_fixture[vm].vm_node_ip, self.vm_fixture[vm].vm_name) - self.err_msg.append(m) - assert vm_verify_out, self.err_msg - else: - # Even if vm verify is set to skip, run minimum needed - # verifications.. - vm_verify_out = self.vm_fixture[vm].mini_verify_on_setup() - if not vm_verify_out: - m = "%s - mini_vm_verify in agent after setup failed" % self.vm_fixture[ - vm].vm_node_ip - self.err_msg.append(m) - assert vm_verify_out, self.err_msg - - vm_node_ip = self.inputs.host_data[ - self.orch.get_host_of_vm( - self.vm_fixture[vm].vm_obj)]['host_ip'] - self.vn_of_cn[vm_node_ip].append(self.topo.vn_of_vm[vm]) - - # In some less powerful computes, VM takes more time to come up.. including retry... - # each call to wait_till_vm_is_up inturn includes 20 retries with 5s - # sleep. - retry = 0 - while True: - out = self.vm_fixture[vm].wait_till_vm_is_up() - retry += 1 - if out or retry > 2: - break - if not out: - self.logger.debug('VM Console log : %s' % ( - vm_fixture[vm].get_console_output())) - assert out, "VM %s failed to come up in node %s" % (vm, vm_node_ip) - - assert self.vm_fixture[vm].wait_for_ssh_on_vm() - # Add compute's VN list to topology object based on VM creation - self.topo.__dict__['vn_of_cn'] = self.vn_of_cn - - # Provision static route if defined in topology - createStaticRouteBehindVM(self) - - return self -# end createVMNova - - -def createPublicVN(self): - if 'public_vn' in dir(self.topo): - fip_pool_name = self.inputs.fip_pool_name - fvn_name = self.topo.public_vn - fip_subnets = [self.inputs.fip_pool] - mx_rt = self.inputs.mx_rt - self.fvn_fixture = self.useFixture( - VNFixture( - project_name=self.topo.project, - connections=self.project_connections, - vn_name=fvn_name, - inputs=self.project_inputs, - subnets=fip_subnets, - router_asn=self.inputs.router_asn, - rt_number=mx_rt)) - assert self.fvn_fixture.verify_on_setup() - self.logger.info('created public VN:%s' % fvn_name) - self.fip_fixture = self.useFixture( - FloatingIPFixture( - project_name=self.topo.project, - inputs=self.project_inputs, - connections=self.project_connections, - pool_name=fip_pool_name, - vn_id=self.fvn_fixture.vn_id, - vn_name=fvn_name)) - assert self.fip_fixture.verify_on_setup() - self.logger.info('created FIP Pool:%s under Project:%s' % - (fip_pool_name, self.topo.project)) - self.public_vn_present = True - return self -# end createPublicVN - - -def verifySystemPolicy(self): - result, err_msg = policy_test_helper.comp_rules_from_policy_to_system(self) - self.result = result - if err_msg: - self.err_msg = err_msg - else: - self.err_msg = [] - return self.result, self.err_msg - - -def verify_fip_associate_possible(self, vm_cnt): - self.cn_inspect = self.connections.cn_inspect - if not self.public_vn_present: - return False - - if len(self.inputs.ext_routers) >= 1: - router_name = self.inputs.ext_routers[0][0] - router_ip = self.inputs.ext_routers[0][1] - for host in self.inputs.bgp_ips: - # Verify the connection between all control nodes and MX(if - # present) - cn_bgp_entry = self.cn_inspect[host].get_cn_bgp_neigh_entry() - if isinstance(cn_bgp_entry, type(dict())): - if cn_bgp_entry['peer_address'] == router_ip: - if cn_bgp_entry['state'] != 'Established': - return False - else: - for entry in cn_bgp_entry: - if entry['peer_address'] == router_ip: - if entry['state'] != 'Established': - return False - else: - self.logger.info( - 'No MX connectivity exists for this setup, we can use normal way to pump traffic') - return False - fip_pool = IPNetwork(self.inputs.fip_pool) - if fip_pool.size <= 3: - self.logger.info( - 'FIP pool is not sufficient to allocate FIPs to all VM') - return False - if vm_cnt <= (fip_pool.size - 3): - self.logger.info('FIP pool is sufficient to allocate FIPs to all VM') - return True - else: - self.logger.info( - 'FIP pool is not sufficient to allocate FIPs to all VM') - return False -# end verify_fip_associate_possible - - -def allocateNassociateFIP(self, config_topo): - self.fip_ip_by_vm = {} - for project in self.projectList: - self.logger.info("Share public-pool with project:%s" % project) - pool_share = self.fip_fixture.assoc_project(project) - self.addCleanup(self.fip_fixture.deassoc_project, project) - for vmfixt in config_topo[project]['vm']: - if self.inputs.is_gui_based_config(): - self.fip_fixture.create_and_assoc_fip_webui( - self.fvn_fixture.vn_id, - config_topo[project]['vm'][vmfixt].vm_id) - else: - fip_id = self.fip_fixture.create_and_assoc_fip( - self.fvn_fixture.vn_id, - config_topo[project]['vm'][vmfixt].vm_id) - assert self.fip_fixture.verify_fip( - fip_id, config_topo[project]['vm'][vmfixt], self.fvn_fixture) - self.fip_ip_by_vm[vmfixt] = config_topo[project]['vm'][ - vmfixt].chk_vmi_for_fip(vn_fq_name=self.fvn_fixture.vn_fq_name) - self.addCleanup(self.fip_fixture.disassoc_and_delete_fip, fip_id) - return self -# end allocateNassociateFIP - - -def createStaticRouteBehindVM(self): - try: - self.topo.vm_static_route - except AttributeError: - return self - for vm_name in self.topo.vm_static_route: - vm_fixt = self.vm_fixture[vm_name] - prefix = self.topo.vm_static_route[vm_name] - vm_uuid = vm_fixt.vm_id - vm_ip = vm_fixt.vm_ip - vm_tap_intf = vm_fixt.tap_intf - vmi = vm_tap_intf[vm_fixt.vn_fq_name] - vmi_id = vmi['uuid'] - vm_route_table_name = "%s_rt" % vm_name - self.logger.info( - "Provisioning static route %s behind vm - %s in project %s." % - (prefix, vm_name, self.topo.project)) - self.vm_fixture[vm_name].provision_static_route( - prefix=prefix, - tenant_name=self.topo.project, - virtual_machine_interface_id=vmi_id, - route_table_name=vm_route_table_name, - user=self.topo.username, - password=self.topo.password) - return self -# end createStaticRouteBehindVM - -def createServiceTemplate(self): - self.st_fixture = {} - if not hasattr(self.topo, 'st_list'): - return self - - self.logger.info("Setup step: Creating Service Templates") - for st_name in self.topo.st_list: - self.st_fixture[st_name] = self.useFixture( - SvcTemplateFixture( - connections=self.project_connections, - inputs=self.project_inputs, - domain_name=self.topo.domain, - st_name=st_name, - svc_img_name=self.topo.st_params[st_name]['svc_img_name'], - svc_type=self.topo.st_params[st_name]['svc_type'], - if_list=self.topo.st_params[st_name]['if_list'], - svc_mode=self.topo.st_params[st_name]['svc_mode'], - svc_scaling=self.topo.st_params[st_name]['svc_scaling'], - flavor=self.topo.st_params[st_name]['flavor'], - ordered_interfaces=self.topo.st_params[st_name]['ordered_interfaces'])) - if self.skip_verify == 'no': - assert self.st_fixture[st_name].verify_on_setup() - return self -# end createServiceTemplate - -def checkNAddAdminRole(self): - if not ((self.topo.username == 'admin' or self.topo.username == None) and (self.topo.project == 'admin')): - self.logger.info("Adding user 'admin' to non-default tenant %s with admin role" %self.topo.project) - self.user_fixture.add_user_to_tenant(self.topo.project, 'admin', 'admin') - return self -#end checkNAddAdminRole - -def checkNAddAdminRole(self): - if not ( - (self.topo.username == 'admin' or self.topo.username is None) and ( - self.topo.project == 'admin')): - self.logger.info( - "Adding user 'admin' to non-default tenant %s with admin role" % - self.topo.project) - self.user_fixture.add_user_to_tenant( - self.topo.project, - 'admin', - 'admin') - return self -# end checkNAddAdminRole - - -def createServiceInstance(self): - self.si_fixture = {} - if not hasattr(self.topo, 'si_list'): - return self - - self.logger.info("Setup step: Creating Service Instances") - # For SVC case to work in non-admin tenant, link "admin" user - checkNAddAdminRole(self) - for si_name in self.topo.si_list: - self.si_fixture[si_name] = self.useFixture( - SvcInstanceFixture( - connections=self.project_connections, - inputs=self.project_inputs, - domain_name=self.topo.domain, - project_name=self.topo.project, - si_name=si_name, - svc_template=self.st_fixture[ - self.topo.si_params[si_name]['svc_template']].st_obj, - if_list=self.topo.si_params[si_name]['if_list'], - left_vn_name=self.topo.si_params[si_name]['left_vn'])) - - self.logger.info("Setup step: Verify Service Instances") - for si_name in self.topo.si_list: - # Irrespective of verify flag, run minimum verification to make sure SI is up.. - # Include retry to handle time taken by less powerful computes .. - retry = 0 - while True: - ret, msg = self.si_fixture[si_name].verify_si() - retry += 1 - if ret or retry > 2: - break - # In case of failure, set verify flag to get more data, even if global - # verify flag is diabled - if not ret: - self.skip_verify = 'no' - - if self.skip_verify == 'no': - ret, msg = self.si_fixture[si_name].verify_on_setup(report=False) - - if not ret: - m = "service instance %s verify failed after setup with error %s" % ( - si_name, msg) - self.err_msg.append(m) - assert ret, self.err_msg - - return self -# end createServiceInstance - - -def allocNassocFIP(self): - # Need Floating VN fixture in current project and destination VM fixtures from all projects - # topology rep: self.fvn_vm_map = {'project1': - # {'vnet1':{'project1': ['vmc2'], 'project2': ['vmc4']}}, - # {'vnet2':{'project1': ['vmc21'], 'project2': ['vmc14']}} - for vn_proj, fvn_vm_map in self.topo.fvn_vm_map.iteritems(): - for vn_name, map in fvn_vm_map.iteritems(): - # {'project1': ['vmc2', 'vmc3'], 'project2': ['vmc4']}, - for vm_proj, vm_list in map.iteritems(): - for index in range(len(vm_list)): - # Get VM fixture from config_topo - vm_fixture = self.config_topo[ - vm_proj]['vm'][vm_list[index]] - self.vn_fixture = self.config_topo[vn_proj]['vn'] - assigned_fip = vm_fixture.chk_vmi_for_fip( - vn_fq_name=self.vn_fixture[vn_name].vn_fq_name) - self.logger.info( - 'Allocating and associating FIP from %s VN pool in project %s to %s VM in project %s' % - (vn_name, vn_proj, vm_list[index], vm_proj)) - if self.inputs.is_gui_based_config(): - self.fip_fixture_dict[vn_name].create_and_assoc_fip_webui( - self.vn_fixture[vn_name].vn_id, - self.vm_fixture[self.topo.fvn_vm_map[vn_name][index]].vm_id, - self.topo.fvn_vm_map[vn_name]) - self.addCleanup( - self.fip_fixture_dict[vn_name].disassoc_and_delete_fip_webui, - self.vm_fixture[self.topo.fvn_vm_map[vn_name][index]].vm_id) - else: - fip_id = self.fip_fixture_dict[vn_name].create_and_assoc_fip( - self.vn_fixture[vn_name].vn_id, - vm_fixture.vm_id) - if fip_id: - assert self.fip_fixture_dict[vn_name].verify_fip( - fip_id, vm_fixture, self.vn_fixture[vn_name]) - self.logger.info('alloc&assoc FIP %s' % (fip_id)) - self.addCleanup( - self.fip_fixture_dict[vn_name].deassoc_project, - vn_proj) - self.addCleanup( - self.fip_fixture_dict[vn_name].disassoc_and_delete_fip, - fip_id) - else: - # To handle repeat test runs without config cleanup, in which case, new FIP is assigned to VMI every time causing pool exhaustion - # Need to revisit check to skip assigning FIP if VMI - # already has a FIP from FIP-VN's - self.logger.info( - 'Ignoring create_and_assoc_fip error as it can happen due to FIP pool exhaustion..') - - return self -# end allocNassocFIP - - - -def createAllocateAssociateVnFIPPools(self): - if 'fvn_vm_map' in dir(self.topo): - # topology rep: self.fip_pools= {'project1': {'p1-vn1-pool1': - # {'host_vn': 'vnet1', 'target_projects': ['project1', 'project2']}}, - for fip_proj, fip_info in self.topo.fip_pools.iteritems(): - for fip_pool_name, info in fip_info.iteritems(): - vn_name = info['host_vn'] - self.vn_fixture = self.config_topo[fip_proj]['vn'] - self.fip_fixture_dict[vn_name] = self.useFixture( - FloatingIPFixture( - project_name=fip_proj, - inputs=self.inputs, - connections=self.connections, - pool_name=fip_pool_name, - vn_id=self.vn_fixture[vn_name].vn_id)) - assert self.fip_fixture_dict[vn_name].verify_on_setup() - self.logger.info( - 'created FIP Pool:%s in Virtual Network:%s under Project:%s' % - (fip_pool_name, self.fip_fixture_dict[vn_name].pub_vn_name, fip_proj)) - self.config_topo[fip_proj]['fip'][3] = True - self.config_topo[fip_proj]['fip'][4] = self.fip_fixture_dict - self.fvn_vm_map = True - allocNassocFIP(self) - return self -# end createAllocateAssociateVnFIPPools - -if __name__ == '__main__': - ''' Unit test to invoke sdn topo setup utils.. ''' - -# end __main__ diff --git a/tcutils/topo/ui_topo_steps.py b/tcutils/topo/ui_topo_steps.py deleted file mode 100644 index c869f6ba0..000000000 --- a/tcutils/topo/ui_topo_steps.py +++ /dev/null @@ -1,118 +0,0 @@ -''' This module provides utils for setting up sdn topology given the ui topo inputs''' -import os -import copy -import fixtures -import topo_steps -from common.contrail_test_init import ContrailTestInit -from common.connections import ContrailConnections -from contrail_fixtures import * -try: - from webui_test import * -except ImportError: - pass - - -def createPort(self, option='contrail'): - if not hasattr(self.topo, 'port_list'): - self.logger.info("No port configs found in topo file") - return True - result = True - self.logger.info("Setup step: Creating port ") - for port in self.topo.port_list: - mac = self.topo.port_params[port]['mac'] - net = self.topo.port_params[port]['net'] - fixed_ip = self.topo.port_params[port]['fixed_ip'] - device_owner = self.topo.port_params[port]['device_owner'] - sg = self.topo.port_params[port]['sg'] - fip = self.topo.port_params[port]['fip'] - subnet = self.topo.port_params[port]['subnet'] - state = self.topo.port_params[port]['state'] - port_name = self.topo.port_params[port]['port_name'] - if not self.webui.create_port( - net, - subnet, - mac, - state, - port_name, - fixed_ip, - fip, - sg, - device_owner): - result = result and False - return result -# end createPort - - -def createRouter(self, option='contrail'): - if not hasattr(self.topo, 'router_list'): - self.logger.info("No router configs found in topo file") - return True - result = True - self.logger.info("Setup step: Creating Router") - for router in self.topo.router_list: - router_name = router - state = self.topo.router_list[router]['state'] - gateway = self.topo.router_list[router]['gateway'] - networks = self.topo.router_list[router]['networks'] - snat = self.topo.router_list[router]['snat'] - if not self.webui.create_router( - router_name, - networks, - state, - gateway, - snat): - result = result and False - return result - # end createPort - - -def createDnsServer(self, option='contrail'): - if not hasattr(self.topo, 'dns_server_list'): - self.logger.info("No dns server configs found in topo file") - return True - result = True - self.logger.info("Setup step: Creating DNS Server") - for dserver in self.topo.dns_server_list: - server_name = dserver - domain_name = self.topo.dns_server_params[dserver]['domain_name'] - rr_order = self.topo.dns_server_params[dserver]['rr_order'] - fip_record = self.topo.dns_server_params[dserver]['fip_record'] - ipam_list = self.topo.dns_server_params[dserver]['ipam_list'] - dns_forwarder = self.topo.dns_server_params[dserver]['dns_forwarder'] - ttl = self.topo.dns_server_params[dserver]['ttl'] - if not self.webui.create_dns_server( - server_name, - domain_name, - rr_order, - fip_record, - ipam_list, - ttl, - dns_forwarder): - result = result and False - return result -# end createDnsServer - - -def createDnsRecord(self, option='contrail'): - if not hasattr(self.topo, 'dns_record_list'): - self.logger.info("No DNS record configs found in topo file") - return True - result = True - self.logger.info("Setup step: Creating DNS Record") - for dns_record in self.topo.dns_record_list: - host_name = self.topo.dns_record_params[dns_record]['host_name'] - server_name = self.topo.dns_record_params[dns_record]['server_name'] - ip_address = self.topo.dns_record_params[dns_record]['ip_address'] - record_type = self.topo.dns_record_params[dns_record]['type'] - dns_class = self.topo.dns_record_params[dns_record]['dns_class'] - ttl = self.topo.dns_record_params[dns_record]['ttl'] - if not self.webui.create_dns_record( - server_name, - host_name, - ip_address, - record_type, - dns_class, - ttl): - result = result and False - return result -# end createDnsRecord diff --git a/tcutils/traffic_utils/base_traffic.py b/tcutils/traffic_utils/base_traffic.py deleted file mode 100644 index db1000018..000000000 --- a/tcutils/traffic_utils/base_traffic.py +++ /dev/null @@ -1,36 +0,0 @@ -# general traffic class to use different traffic tools to trigger traffic based on input tool/utils -# if no tool is passed, then netcat traditional is used for tcp/udp and -# scapy is used for icmp - -import os -import sys -sys.path.append(os.path.realpath('tcutils/traffic_utils')) -from time import sleep - -NETCAT = 'netcat' -SCAPY = 'scapy' -TCP = 'tcp' -UDP = 'udp' - - -class BaseTraffic(): - - @staticmethod - def factory(tool=None, proto=None): - - if tool and not (tool == NETCAT or tool == SCAPY): - # tool not supported, return False - return False - - if not tool and (proto == TCP or proto == UDP): - tool = NETCAT - if not tool and not (proto == TCP or proto == UDP): - tool = SCAPY - - if tool == NETCAT: - from netcat_traffic import Netcat - return Netcat() - elif tool == SCAPY: - from scapy_traffic import Scapy - return Scapy() - diff --git a/tcutils/traffic_utils/netcat_traffic.py b/tcutils/traffic_utils/netcat_traffic.py deleted file mode 100644 index 2bead8e80..000000000 --- a/tcutils/traffic_utils/netcat_traffic.py +++ /dev/null @@ -1,177 +0,0 @@ -# traffic generator using nc.traditional, supports only tcp and udp protocol - -from tcutils.util import retry -from base_traffic import * -from tcutils.util import get_random_name -default_data = '*****This is default data.*****' -result_file = '/tmp/nc' - -class Netcat(BaseTraffic): - - def __init__(self): - - self.sender = None - self.receiver = None - self.sent = None - self.recv = None - self.result_file = result_file + '_' + get_random_name() + '.result' - - def start( - self, - sender_vm_fix, - receiver_vm_fix, - proto, - sport, - dport, - pkt_count=1): - - self.sender_vm_fix = sender_vm_fix - self.receiver_vm_fix = receiver_vm_fix - self.proto = proto - self.sport = sport - self.dport = dport - self.inputs = sender_vm_fix.inputs - self.logger = self.inputs.logger - if pkt_count: - self.pkt_count = pkt_count - else: - self.pkt_count = 1 - - result, pid_recv = self.start_nc_receiver() - if not result: - self.logger.error("netcat could not start on receiver") - return False - sleep(1) - result, sent = self.start_nc_sender() - if not result: - self.logger.error("netcat could not start on sender") - return False - - self.sent = sent - self.receiver = pid_recv - return True - - - def stop(self): - - if self.receiver: - cmd = 'kill -s SIGINT %s' % self.receiver - output = self.receiver_vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - self.logger.debug("Result of killing netcat on VM: %s" % output) - self.receiver = None - - return self.get_packet_count() - - - def get_packet_count(self): - - sent, self.recv = self.get_packet_count_nc(self.receiver_vm_fix) - - self.logger.info("Sent : %s, Received: %s" % (self.sent, self.recv)) - return (self.sent, self.recv) - - def get_packet_count_nc(self, vm_fix): - - cmd = 'cat %s' % self.result_file - output = vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - self.logger.debug("output for count: %s" % output) - - if 'rcvd' in output[cmd]: - recv = output[cmd].split('rcvd ')[1].split('\r')[0] - else: - recv = 0 - - if 'sent' in output[cmd]: - sent = output[cmd].split('sent ')[1].split(',')[0] - else: - sent = 0 - - return (int(sent), int(recv)) - - - @retry(delay=3, tries=3) - def start_nc_receiver(self): - - pid_recv = None - result = False - if self.proto == 'udp': - cmd = 'nc.traditional -l -s %s -p %s -u -vv 2>%s 1>%s' % ( - self.receiver_vm_fix.vm_ip, self.dport, self.result_file, self.result_file) - elif self.proto == 'tcp': - cmd = 'nc.traditional -l -s %s -p %s -vv 2>%s 1>%s' % ( - self.receiver_vm_fix.vm_ip, self.dport, self.result_file, self.result_file) - output = self.receiver_vm_fix.run_cmd_on_vm( - cmds=[cmd], - as_sudo=True, - as_daemon=True) - self.logger.debug("output for starting nc on recvr: %s" % output[cmd]) - - cmd = 'pidof nc.traditional' - output = self.receiver_vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - self.logger.debug("output for cmd %s: %s" % (cmd, output[cmd])) - if 'received nonzero return code 1 while executing' in output[cmd]: - self.logger.warn( - "nc.traditional could not start properly on receiver, retrying after 3 second") - result = False - return result, pid_recv - - if '\r\n' in output[cmd]: - pid_recv = output[cmd].split('\r\n')[1].split('\r')[0] - result = True - elif '\r' in output[cmd]: - pid_recv = output[cmd].split('\r')[0] - result = True - else: - result = False - - return result, pid_recv - - - @retry(delay=3, tries=3) - def start_nc_sender( - self, - data=default_data): - - pid_sender = None - sent = 0 - result = False - - if self.proto == 'udp': - cmd1 = 'echo -e "%s" | nc.traditional %s %s -s %s -p %s -u -vv 2>%s 1>%s' % ( - data, self.receiver_vm_fix.vm_ip, self.dport, self.sender_vm_fix.vm_ip, self.sport, self.result_file, self.result_file) - elif self.proto == 'tcp': - cmd1 = 'echo -e "%s" | nc.traditional %s %s -s %s -p %s -vv 2>%s 1>%s' % ( - data, self.receiver_vm_fix.vm_ip, self.dport, self.sender_vm_fix.vm_ip, self.sport, self.result_file, self.result_file) - - for i in xrange(self.pkt_count): - output = self.sender_vm_fix.run_cmd_on_vm( - cmds=[cmd1], - as_sudo=True, - as_daemon=True) - sleep(0.5) - cmd = 'pidof nc.traditional' - output = self.sender_vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - self.logger.debug("output for cmd %s: %s" % (cmd, output[cmd])) - if 'received nonzero return code 1 while executing' in output[cmd]: - self.logger.warn( - "nc.traditional could not start properly on sender, retrying after 3 seconds") - result = False - return result, sent - - if '\r\n' in output[cmd]: - pid_sender = output[cmd].split('\r\n')[1].split('\r')[0] - result = True - elif '\r' in output[cmd]: - pid_sender = output[cmd].split('\r')[0] - result = True - else: - result = False - if result: - cmd = 'kill -s SIGINT %s' % pid_sender - output = self.sender_vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - self.logger.debug("output to kill on sender : %s" % output) - sent1, recv1 = self.get_packet_count_nc(self.sender_vm_fix) - sent = sent + sent1 - sleep(0.5) - - return result, sent diff --git a/tcutils/traffic_utils/pktgen_traffic.py b/tcutils/traffic_utils/pktgen_traffic.py deleted file mode 100644 index f3ff286fe..000000000 --- a/tcutils/traffic_utils/pktgen_traffic.py +++ /dev/null @@ -1,51 +0,0 @@ -#utils to start and stop traffic on VM -import vm_test -from util import retry -from tcutils.commands import ssh, execute_cmd, execute_cmd_out - - -def start_traffic_pktgen( - vm_fix, - src_min_ip='', - src_max_ip='', - dest_ip='', - dest_min_port='', - dest_max_port=''): - """ This routine is for generation of UDP flows using pktgen. Only UDP packets are generated using this routine. - """ - vm_fix.logger.info("Sending traffic...") - try: - cmd = '~/pktgen_new.sh %s %s %s %s %s' % (src_min_ip, - src_max_ip, - dest_ip, - dest_min_port, - dest_max_port) - vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - except Exception as e: - vm_fix.logger.exception("Got exception at start_traffic as %s" % (e)) -# end start_traffic - - -def stop_traffic_pktgen(vm_fix): - vm_fix.logger.info("Stopping traffic...") - try: - cmd = 'killall ~/pktgen_new.sh' - vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True) - except Exception as e: - vm_fix.logger.exception("Got exception at stop_traffic as %s" % (e)) - - -def start_traffic_pktgen_between_vm( - sr_vm_fix, - dst_vm_fix, - dest_min_port=10000, - dest_max_port=10000): - """This method starts traffic between VMs using pktgen""" - - start_traffic_pktgen( - sr_vm_fix, - src_min_ip=sr_vm_fix.vm_ip, - src_max_ip=sr_vm_fix.vm_ip, - dest_ip=dst_vm_fix.vm_ip, - dest_min_port=dest_min_port, - dest_max_port=dest_max_port) diff --git a/tcutils/traffic_utils/scapy_traffic.py b/tcutils/traffic_utils/scapy_traffic.py deleted file mode 100644 index 3ee4a1da3..000000000 --- a/tcutils/traffic_utils/scapy_traffic.py +++ /dev/null @@ -1,108 +0,0 @@ -from traffic.core.stream import Stream -from traffic.core.helpers import Host, Sender, Receiver -from traffic.core.profile import StandardProfile,\ - ContinuousProfile -from tcutils.util import get_random_name -from base_traffic import * -sys.path.append(os.path.realpath('tcutils/pkgs/Traffic')) - -class Scapy(BaseTraffic): - - def __init__(self): - - self.sender = None - self.receiver = None - self.sent = None - self.recv = None - - def start( - self, - sender_vm, - receiver_vm, - proto, - sport, - dport, - pkt_count=None, - fip=None): - - self.sender_vm = sender_vm - self.receiver_vm = receiver_vm - self.proto = proto - self.sport = sport - self.dport = dport - self.inputs = sender_vm.inputs - self.logger = self.inputs.logger - self.pkt_count = pkt_count - self.fip = fip - - if self.fip: - stream = Stream( - protocol="ip", - sport=self.sport, - dport=self.dport, - proto=self.proto, - src=self.sender_vm.vm_ip, - dst=self.fip) - else: - stream = Stream( - protocol="ip", - sport=self.sport, - dport=self.dport, - proto=self.proto, - src=self.sender_vm.vm_ip, - dst=self.receiver_vm.vm_ip) - profile_kwargs = {'stream': stream} - if self.fip: - profile_kwargs.update({'listener': self.receiver_vm.vm_ip}) - if self.pkt_count: - profile_kwargs.update({'count': self.pkt_count}) - profile = StandardProfile(**profile_kwargs) - else: - profile = ContinuousProfile(**profile_kwargs) - - # Set VM credentials - send_node = Host(self.sender_vm.vm_node_ip, - self.sender_vm.inputs.host_data[self.sender_vm.vm_node_ip]['username'], - self.sender_vm.inputs.host_data[self.sender_vm.vm_node_ip]['password']) - recv_node = Host(self.receiver_vm.vm_node_ip, - self.sender_vm.inputs.host_data[self.receiver_vm.vm_node_ip]['username'], - self.sender_vm.inputs.host_data[self.receiver_vm.vm_node_ip]['password']) - send_host = Host(self.sender_vm.local_ip, - self.sender_vm.vm_username, self.sender_vm.vm_password) - recv_host = Host(self.receiver_vm.local_ip, - self.receiver_vm.vm_username, self.receiver_vm.vm_password) - - # Create send, receive helpers - random = get_random_name() - send_name = 'send' + self.proto + '_' + random - recv_name = 'recv' + self.proto + '_' + random - sender = Sender(send_name, - profile, send_node, send_host, self.logger) - receiver = Receiver(recv_name, - profile, recv_node, recv_host, self.logger) - - # start traffic - receiver.start() - sender.start() - - self.sender = sender - self.receiver = receiver - return True - - - def stop(self): - - # stop traffic - self.sender.stop() - self.receiver.stop() - - self.sent = self.sender.sent - self.recv= self.receiver.recv - - return self.get_packet_count() - - def get_packet_count(self): - - self.logger.info("Sent : %s, Received: %s" % (self.sent, self.recv)) - return (self.sent, self.recv) - diff --git a/tcutils/util.py b/tcutils/util.py deleted file mode 100644 index 5c4a268a7..000000000 --- a/tcutils/util.py +++ /dev/null @@ -1,834 +0,0 @@ -import math -import subprocess -import os -import re -import time -from collections import defaultdict, MutableMapping -from netaddr import * -import pprint -from fabric.operations import get, put, sudo -from fabric.api import run, env -import logging as log -import threading -from functools import wraps -import errno -import signal -import uuid -import string -import random -import fcntl -import socket -import struct -from fabric.exceptions import CommandTimeout -from fabric.contrib.files import exists -from fabric.context_managers import settings, hide -import ConfigParser -from testtools.testcase import TestSkipped -import functools -import testtools -from fabfile import * - -log.basicConfig(format='%(levelname)s: %(message)s', level=log.DEBUG) - -sku_dict={'2014.1':'icehouse','2014.2':'juno','2015.1':'kilo'} - -# Code borrowed from http://wiki.python.org/moin/PythonDecoratorLibrary#Retry - - -def retry(tries=5, delay=3): - '''Retries a function or method until it returns True. - delay sets the initial delay in seconds. - ''' - - # Update test retry count. - retry_factor = get_os_env("TEST_RETRY_FACTOR") or "1.0" - tries = math.floor(tries * float(retry_factor)) - if tries < 0: - raise ValueError("tries must be 0 or greater") - - # Update test delay interval. - delay_factor = get_os_env("TEST_DELAY_FACTOR") or "1.0" - delay = math.floor(delay * float(delay_factor)) - if delay < 0: - raise ValueError("delay must be 0 or greater") - - def deco_retry(f): - def f_retry(*args, **kwargs): - mtries, mdelay = tries, delay # make mutable - - result = f(*args, **kwargs) # first attempt - rv = result - final = False - if type(result) is tuple: - rv = result[0] - if 'final' in result: - final = True - if type(result) is dict: - rv = result['result'] - if 'final' in result.keys() and result['final']: - final = True - while mtries > 0: - if rv is True: # Done on success - if type(result) is tuple: - return (True, result[1]) - if type(result) is dict: - return {'result': True, 'msg': result['msg']} - else: - return True - if final: - break - mtries -= 1 # consume an attempt - time.sleep(mdelay) # wait... - - result = f(*args, **kwargs) # Try again - rv = result - if type(result) is tuple: - rv = result[0] - if type(result) is dict: - rv = result['result'] - if not rv: - if type(result) is tuple: - return (False, result[1]) - if type(result) is dict: - return {'result': False, 'msg': result['msg']} - return False # Ran out of tries :-( - else: - if type(result) is tuple: - return (True, result[1]) - if type(result) is dict: - return {'result': True, 'msg': result['msg']} - else: - return True - - return f_retry # true decorator -> decorated function - return deco_retry # @retry(arg[, ...]) -> true decorator -# end retry - - -def web_invoke(httplink): - output = None - try: - cmd = 'curl ' + httplink - output = subprocess.check_output(cmd, shell=True) - except Exception, e: - output = None - print e - return output - return output -# end web_invoke - -# function to get match count of a list of string from a string -# will return a dictionary - - -def get_string_match_count(string_list, string_where_to_search): - - print ('insdie function get_string_match_count') - list_of_string = [] - list_of_string = string_list - print string_where_to_search - d = defaultdict(int) - for i in list_of_string: - d[i] += string_where_to_search.count(i) - return d - - -def get_os_env(var): - if var in os.environ: - return os.environ.get(var) - else: - return None -# end get_os_env - - -def _escape_some_chars(text): - chars = ['"', '='] - for char in chars: - text = text.replace(char, '\\\\' + char) - return text -# end escape_chars - - -def remove_unwanted_output(text): - ''' Fab output usually has content like [ x.x.x.x ] out : - ''' - return_list = text.split('\n') - - return_list1 = [] - for line in return_list: - line_split = line.split(' out: ') - if len(line_split) == 2: - return_list1.append(line_split[1]) - else: - if ' out:' not in line: - return_list1.append(line) - real_output = '\n'.join(return_list1) - return real_output - - -def run_netconf_on_node(host_string, password, cmds, op_format='text'): - ''' - Run netconf from node to a VM.Usecase: vSRX or vMX or any netconf supporting device. - ''' - (username, host_ip) = host_string.split('@') - timeout = 10 - device = 'junos' - hostkey_verify = "False" - # Sometimes, during bootup, there could be some intermittent conn. issue - tries = 1 - output = None - copy_fabfile_to_agent() - while tries > 0: - if 'show' in cmds: - cmd_str = 'fab -u %s -p "%s" -H %s -D -w --hide status,user,running get_via_netconf:\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"' % ( - username, password, host_ip, cmds, timeout, device, hostkey_verify, op_format) - else: - cmd_str = 'fab -u %s -p "%s" -H %s -D -w --hide status,user,running config_via_netconf:\"%s\",\"%s\",\"%s\",\"%s\"' % ( - username, password, host_ip, cmds, timeout, device, hostkey_verify) - print cmd_str - output = run(cmd_str) - if ((output) and ('Fatal error' in output)): - tries -= 1 - time.sleep(5) - else: - break - # end while - return output -# end run_netconf_on_node - - -def copy_fabfile_to_agent(): - src = 'tcutils/fabfile.py' - dst = '~/fabfile.py' - if 'fab_copied_to_hosts' not in env.keys(): - env.fab_copied_to_hosts = list() - if not env.host_string in env.fab_copied_to_hosts: - if not exists(dst): - put(src, dst) - env.fab_copied_to_hosts.append(env.host_string) - -def run_fab_cmd_on_node(host_string, password, cmd, as_sudo=False, timeout=120, as_daemon=False, raw=False): - ''' - Run fab command on a node. Usecase : as part of script running on cfgm node, can run a cmd on VM from compute node - - If raw is True, will return the fab _AttributeString object itself without removing any unwanted output - ''' - cmd = _escape_some_chars(cmd) - (username, host_ip) = host_string.split('@') - copy_fabfile_to_agent() - cmd_str = 'fab -u %s -p "%s" -H %s -D -w --hide status,user,running ' % ( - username, password, host_ip) - if as_daemon: - cmd_str += '--no-pty ' - cmd = 'nohup ' + cmd + ' &' - if username == 'root': - as_sudo = False - elif username == 'cirros': - cmd_str += ' -s "/bin/sh -l -c" ' - if as_sudo: - cmd_str += 'sudo_command:\"%s\"' % (cmd) - else: - cmd_str += 'command:\"%s\"' % (cmd) - # Sometimes, during bootup, there could be some intermittent conn. issue - print cmd_str - tries = 1 - output = None - while tries > 0: - if timeout: - try: - output = sudo(cmd_str, timeout=timeout) - except CommandTimeout: - return output - else: - output = run(cmd_str) - if ((output) and ('Fatal error' in output)): - tries -= 1 - time.sleep(5) - else: - break - # end while - - if not raw: - real_output = remove_unwanted_output(output) - else: - real_output = output - return real_output -# end run_fab_cmd_on_node - - -def fab_put_file_to_vm(host_string, password, src, dest): - copy_fabfile_to_agent() - (username, host_ip) = host_string.split('@') - cmd_str = 'fab -u %s -p "%s" -H %s -D -w --hide status,user,running fput:\"%s\",\"%s\"' % ( - username, password, host_ip, src, dest) - log.debug(cmd_str) - output = run(cmd_str) - real_output = remove_unwanted_output(output) -# end fab_put_file_to_vm - - -def fab_check_ssh(host_string, password): - copy_fabfile_to_agent() - (username, host_ip) = host_string.split('@') - cmd_str = 'fab -u %s -p "%s" -H %s -D -w --hide status,user,running verify_socket_connection:22' % ( - username, password, host_ip) - log.debug(cmd_str) - output = run(cmd_str) - if 'True' in output: - return True - return False -# end fab_check_ssh - - -def retry_for_value(tries=5, delay=3): - '''Retries a function or method until it returns True. - delay sets the initial delay in seconds. - ''' - tries = tries * 1.0 - tries = math.floor(tries) - if tries < 0: - raise ValueError("tries must be 0 or greater") - - if delay <= 0: - raise ValueError("delay must be greater than 0") - - def deco_retry(f): - def f_retry(*args, **kwargs): - mtries, mdelay = tries, delay # make mutable - result = None - while (mtries > 0): - result = f(*args, **kwargs) # first attempt - if result: - return result - else: - mtries -= 1 # consume an attempt - time.sleep(mdelay) - return result - return f_retry # true decorator -> decorated function - return deco_retry # @retry(arg[, ...]) -> true decorator - -# end retry_for_value - - -class threadsafe_iterator: - - """Takes an iterator/generator and makes it thread-safe by - serializing call to the `next` method of given iterator/generator. - """ - - def __init__(self, it): - self.it = it - self.lock = threading.Lock() - - def __iter__(self): - return self - - def next(self): - with self.lock: - return self.it.next() -# end threadsafe_iterator - - -def threadsafe_generator(f): - """A decorator that takes a generator function and makes it thread-safe. - """ - def g(*a, **kw): - return threadsafe_iterator(f(*a, **kw)) - return g -# end thread_safe generator - - -class TimeoutError(Exception): - pass - - -def timeout(seconds=10, error_message=os.strerror(errno.ETIME)): - '''Takes a I/O function and raises time out exception if function is stuck for specified time''' - def decorator(func): - def _handle_timeout(signum, frame): - raise TimeoutError(error_message) - - def wrapper(*args, **kwargs): - signal.signal(signal.SIGALRM, _handle_timeout) - signal.alarm(seconds) - try: - result = func(*args, **kwargs) - finally: - signal.alarm(0) - return result - - return wraps(func)(wrapper) - - return decorator -# End timeout - - -def get_dashed_uuid(id): - ''' Return a UUID with dashes ''' - return(str(uuid.UUID(id))) - - -def get_plain_uuid(id): - ''' Remove the dashes in a uuid ''' - return id.replace('-', '') - - -def get_random_string(size=8, chars=string.digits): - return ''.join(random.choice(chars) for _ in range(size)) - - -def get_random_name(prefix=None): - if not prefix: - prefix = 'random' - return prefix + '-' + get_random_string() - - -def gen_str_with_spl_char(size, char_set=None): - if char_set: - special_chars = char_set - else: - special_chars = ['<', '>', '&', '%', '.', '_', ',', '"', ' ', '$'] - char_set = ''.join(special_chars) + \ - string.ascii_uppercase[:6] + string.digits[:6] - return ''.join(random.choice(char_set) for _ in range(size)) - - -def is_v4(address): - try: - ip = IPNetwork(address) - if ip.version == 4: - return True - except AddrFormatError: - pass - return False - - -def is_v6(address): - try: - ip = IPNetwork(address) - if ip.version == 6: - return True - except AddrFormatError: - pass - return False - - -def is_mac(address): - try: - mac = EUI(address) - if mac.version == 48: - return True - except AddrFormatError: - pass - return False - - -def get_af_type(address): - try: - if is_v4(address): - return 'v4' - if is_v6(address): - return 'v6' - if is_mac(address): - return 'mac' - except: - pass - return None - - -def get_af_from_cidrs(cidrs): - af_list = list(map(get_af_type, cidrs)) - if 'v4' in af_list and 'v6' in af_list: - return 'dual' - return af_list[0] - - -def is_valid_af(af): - valid_address_families = ['v4', 'v6'] - if af in valid_address_families: - return True - return False - - -def update_reserve_cidr(cidr): - if not cidr: - return - current = os.getenv('RESERVED_CIDRS', '').split(',') - current.extend([cidr]) - env = dict(RESERVED_CIDRS=','.join(current).strip(',')) - os.environ.update(env) - -SUBNET_MASK = {'v4': {'min': 8, 'max': 29, 'default': 24}, - 'v6': {'min': 64, 'max': 125, 'default': 64}} - - -def is_valid_subnet_mask(plen, af='v4'): - ''' - Minimum v4 subnet mask is 8 and max 29 - Minimum v6 subnet mask is 64 and max 125(openstack doesnt support 127) - ''' - plen = int(plen) - if plen < SUBNET_MASK[af]['min'] or plen > SUBNET_MASK[af]['max']: - return False - return True - - -def is_reserved_address(address): - ''' - Check whether a particular address is reserved and should not be allocated. - RESERVED_CIDRS env variable will take comma separated list of cidrs - ''' - reserved_cidrs = os.getenv('RESERVED_CIDRS', None) - if reserved_cidrs: - cidrs = list(set(reserved_cidrs.split(','))) # Handling duplicates - for cidr in cidrs: - if not cidr.strip(): # taking care of empty commas - continue - if not cidr_exclude(address, cidr.strip()): - return True - return False - - -def is_valid_address(address): - ''' Validate whether the address provided is routable unicast address ''' - addr = IPAddress(address) - if addr.is_loopback() or addr.is_reserved() or addr.is_private()\ - or addr.is_link_local() or addr.is_multicast(): - return False - return True - - -def get_random_cidr(mask=None, af='v4'): - ''' Generate a random subnet based on netmask and address family ''' - if not is_valid_af(af=af): - raise ValueError("Address family not supported %s" % af) - if mask is None: - mask = SUBNET_MASK[af]['default'] - if type(mask) is int: - mask = str(mask) - if not is_valid_subnet_mask(plen=mask, af=af): - raise ValueError("Invalid subnet mask %s for af %s" % (mask, af)) - while (True): - if af == 'v6': - min = 0x2001000000000000 - max = 0x3fffffffffffffff - address = socket.inet_ntop(socket.AF_INET6, - struct.pack('>2Q', - random.randint(min, max), - random.randint(0, 2 ** 64))) - elif af == 'v4': - address = socket.inet_ntop(socket.AF_INET, - struct.pack('>I', - random.randint(2 ** 24, 2 ** 32))) - if is_reserved_address(address): - continue - if is_valid_address(address): - return '%s/%s' % (str(IPNetwork(address + '/' + mask).network), mask) - - -def get_random_cidrs(stack): - subnets = list() - if 'v4' in stack or 'dual' in stack: - subnets.append(get_random_cidr(af='v4')) - if 'v6' in stack or 'dual' in stack: - subnets.append(get_random_cidr(af='v6')) - return subnets - - -def get_an_ip(cidr, offset=2): - ''' - Fetch an ip from the subnet - default offset is 2 as 0 points to subnet and 1 is taken by gateway - This stands good for openstack v6 implementation as of Juno - ''' - return str(IPNetwork(cidr)[offset]) - - -def get_subnet_broadcast(cidr): - return str(IPNetwork(cidr).broadcast) - - -def get_default_cidr(stack='dual'): - return [str(IPNetwork(x).supernet()[0]) for x in get_random_cidrs(stack=stack)] - - -# Min support mask is /30 or /126 -def get_random_ip(cidr): - first = IPNetwork(cidr).first - last = IPNetwork(cidr).last - if first + 2 >= last: - return cidr - return get_an_ip(cidr, offset=random.randint(2, last - first - 1)) - - -def get_random_string_list(max_list_length, prefix='', length=8): - final_list = [] - list_length = random.randint(0, max_list_length) - for i in range(0, list_length): - final_list.append(prefix + '-' + get_random_string(length)) - return final_list - - -def get_random_mac(): - return ':'.join(map(lambda x: "%02x" % x, [0x00, 0x16, 0x3E, - random.randint(0x00, 0x7F), random.randint( - 0x00, 0xFF), - random.randint(0x00, 0xFF)])) - - -def search_arp_entry(arp_output, ip_address=None, mac_address=None): - ''' - arp_output : output of 'arp -an' - Returns a tuple (ip, mac) if ip_address or mac_address matched - ''' - if ip_address: - match_string = ip_address - elif mac_address: - match_string = mac_address - else: - return (None, None) - for line in arp_output.splitlines(): - search_obj = None - if match_string in line: - search_obj = re.search( - '\? \((.*)\) at ([0-9:a-f]+)', line, re.M | re.I) - if search_obj: - (ip, mac) = (search_obj.group(1), search_obj.group(2)) - return (ip, mac) - return (None, None) - - -def get_random_rt(): - return str(random.randint(9000000, 4294967295)) - - -def get_random_boolean(): - bool_list = [True, False] - return random.choice(bool_list) - - -def get_uuid(): - return str(uuid.uuid1()) - - -def compare(val1, val2, operator='subset'): - if type(val1) is bool: - val1 = str(val1) - if type(val2) is bool: - val2 = str(val2) - if type(val1) is list and type(val2) is list: - val1 = sorted(val1) - val2 = sorted(val2) - if operator == 'subset': - return val1 <= val2 - else: - return val1 == val2 - - -def run_once(f): - '''A decorator which can be used to call a function only once - ''' - def wrapper(*args, **kwargs): - if not wrapper.has_run: - wrapper.has_run = True - return f(*args, **kwargs) - wrapper.has_run = False - return wrapper - - -class Lock: - - def __init__(self, filename): - self.filename = filename - # This will create it if it does not exist already - self.handle = open(filename, 'w') - - # Bitwise OR fcntl.LOCK_NB if you need a non-blocking lock - def acquire(self): - fcntl.flock(self.handle, fcntl.LOCK_EX) - - def release(self): - fcntl.flock(self.handle, fcntl.LOCK_UN) - - def __del__(self): - self.handle.close() - - -def read_config_option(config, section, option, default_option): - ''' Read the config file. If the option/section is not present, return the default_option - ''' - if not config: - return default_option - try: - val = config.get(section, option) - if val.lower() == 'true': - val = True - elif val.lower() == 'false' or val.lower() == 'none': - val = False - elif not val: - val = default_option - return val - except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): - return default_option -# end read_config_option - - -def copy_file_to_server(host, src, dest, filename, force=False): - - fname = "%s/%s" % (dest, filename) - with settings(host_string='%s@%s' % (host['username'], - host['ip']), password=host['password'], - warn_only=True, abort_on_prompts=False): - if not exists(fname) or force: - time.sleep(random.randint(1, 10)) - put(src, dest) -# end copy_file_to_server - - -def get_random_vxlan_id(): - return random.randint(1, 16777215) - - -def get_random_asn(): - return random.randint(1, 64511) - - -class v4OnlyTestException(TestSkipped): - pass - - -class custom_dict(MutableMapping, dict): - - ''' - custom dict wrapper around dict which could be used in scenarios - where setitem can be deffered until getitem is requested - - MutableMapping was reqd to inherit clear,get,free etal - - :param callback: callback function which would create value upon keynotfound - :param env_key : Key under env incase the dict can be shared across testcases - ''' - - def __init__(self, callback, env_key=None): - self.callback = callback - self.env_key = env_key - if self.env_key and self.env_key not in env: - env[self.env_key] = dict() - - def __getitem__(self, key): - try: - return dict.__getitem__(self, key) - except KeyError: - if self.env_key and key in env[self.env_key]: - return env[self.env_key][key] - self[key] = self.callback(key) - return dict.__getitem__(self, key) - - def __setitem__(self, key, value): - if self.env_key: - env[self.env_key][key] = value - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - if self.env_key: - del env[self.env_key][key] - dict.__delitem__(self, key) - - def __iter__(self): - return dict.__iter__(self) - - def __len__(self): - return dict.__len__(self) - - def __keytransform__(self, key): - return key - - def __contains__(self, key): - if self.env_key: - return True if key in env[self.env_key] else False - else: - return True if key in self else False - - -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - try: - f = '/tmp/%s.lock' % (str(cls.__name__)) - lock = Lock(f) - lock.acquire() - if cls not in cls._instances: - cls._instances[cls] = super( - Singleton, cls).__call__(*args, **kwargs) - finally: - lock.release() - return cls._instances[cls] -# end Singleton - - -def skip_because(*args, **kwargs): - """A decorator useful to skip tests hitting known bugs or specific orchestrator - @param bug: optional bug number causing the test to skip - @param orchestrator: optional orchestrator to be checked to skip test - @param feature: optional feature to be checked to skip test - """ - def decorator(f): - @functools.wraps(f) - def wrapper(self, *func_args, **func_kwargs): - skip = False - if "orchestrator" in kwargs and 'address_family' in kwargs: - if ((kwargs["orchestrator"] in self.inputs.orchestrator)\ - and (kwargs['address_family'] in self.inputs.address_family)): - skip = True - msg = "Skipped as not supported in %s orchestration setup" %self.inputs.orchestrator - raise testtools.TestCase.skipException(msg) - - if "orchestrator" in kwargs and 'address_family' not in kwargs: - if kwargs["orchestrator"] in self.inputs.orchestrator: - skip = True - msg = "Skipped as not supported in %s orchestration setup" %self.inputs.orchestrator - raise testtools.TestCase.skipException(msg) - - if "feature" in kwargs: - if not self.orch.is_feature_supported(kwargs["feature"]): - skip = True - msg = "Skipped as feature %s not supported in %s \ - orchestration setup" %(kwargs["feature"],self.inputs.orchestrator) - raise testtools.TestCase.skipException(msg) - - if 'ha_setup' in kwargs: - if ((not self.inputs.ha_setup ) and (kwargs["ha_setup"] == False)): - skip = True - msg = "Skipped as not supported in non-HA setup" - raise testtools.TestCase.skipException(msg) - - if "bug" in kwargs: - skip = True - if not kwargs['bug'].isdigit(): - raise ValueError('bug must be a valid bug number') - msg = "Skipped until Bug: %s is resolved." % kwargs["bug"] - raise testtools.TestCase.skipException(msg) - return f(self, *func_args, **func_kwargs) - return wrapper - return decorator - -def get_build_sku(openstack_node_ip, openstack_node_password='c0ntrail123', user='root'): - build_sku = get_os_env("SKU") - if build_sku is not None: - return str(build_sku).lower() - else: - host_str='%s@%s' % (user, openstack_node_ip) - pswd=openstack_node_password - cmd = 'nova-manage version' - env.host_string=openstack_node_ip - tries = 10 - while not build_sku and tries: - try: - output = run_fab_cmd_on_node(host_str, pswd, cmd, timeout=10, as_sudo=True) - build_sku = sku_dict[re.findall("[0-9]{4}.[0-9]+",output)[0]] - except NetworkError, e: - time.sleep(1) - pass - tries -= 1 - return build_sku diff --git a/tcutils/vdns/__init__.py b/tcutils/vdns/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tcutils/vdns/dns_introspect_utils.py b/tcutils/vdns/dns_introspect_utils.py deleted file mode 100755 index 4c6a512f3..000000000 --- a/tcutils/vdns/dns_introspect_utils.py +++ /dev/null @@ -1,60 +0,0 @@ -import logging as LOG - -from tcutils.verification_util import * - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - - -class DnsAgentInspect (VerificationUtilBase): - - def __init__(self, ip, logger=LOG): - super(DnsAgentInspect, self).__init__( - ip, 8092, XmlDrv, logger=logger) - - def get_dnsa_dns_list(self, domain='default-domain'): - ''' - method: get_dnsa_dns_list returns a list - returns None if not found, a dict w/ attrib. eg: - ''' - path = 'Snh_ShowAgentXmppDnsData?' - xpath = './data/list/AgentDnsData' - dns_agent = self.dict_get(path) - dns_agents = EtreeToDict(xpath).get_all_entry(dns_agent) - - dns_data = {} - for data in dns_agents: - dns_data.update({data['agent']: data['agent_data']}) - return dns_data - # end of get_dnsa_dns_list - - def get_dnsa_config(self, domain='default-domain'): - ''' - method: get_dnsa_config returns a list - returns None if not found, a dict w/ attrib. - ''' - path = 'Snh_ShowVirtualDnsServers?' - xpath = './virtual_dns_servers/list/VirtualDnsServersSandesh/virtual_dns' - virtual_dns = self.dict_get(path) - virtual_dns_data = EtreeToDict(xpath).get_all_entry(virtual_dns) - - if type(virtual_dns_data) == type(dict()): - virtual_dns_data = [virtual_dns_data] - - return_vdns_data = [] - for vdata in virtual_dns_data: - dns_data = {} - dns_data['virtual_dns'] = vdata - # get the record data - record_data = self.get_rec_data( - vdns_server=vdata['VirtualDnsTraceData']['name']) - dns_data['records'] = record_data - return_vdns_data.append(dns_data) - return return_vdns_data - - def get_rec_data(self, vdns_server): - path = 'Snh_ShowVirtualDnsRecords?x=%s' % vdns_server - xpath = './records' - rec_data = self.dict_get(path) - return_data = EtreeToDict(xpath).get_all_entry(rec_data) - return return_data['records'] - # end of get_dnsa_config diff --git a/tcutils/verification_util.py b/tcutils/verification_util.py deleted file mode 100644 index 02c506cdd..000000000 --- a/tcutils/verification_util.py +++ /dev/null @@ -1,230 +0,0 @@ -import os -import json -import urllib2 -import requests -import logging as LOG -from lxml import etree - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.INFO) - - -class JsonDrv (object): - _DEFAULT_HEADERS = { - 'Content-type': 'application/json; charset="UTF-8"', - } - _authn_port = 35357 - _DEFAULT_AUTHN_URL = "/v2.0/tokens" - - def __init__(self, vub, logger=LOG, args=None): - self.log = logger - self._vub = vub - self._headers = None - self._args = args - - def _auth(self): - if self._args: - if os.getenv('OS_AUTH_URL'): - url = os.getenv('OS_AUTH_URL') + '/tokens' - else: - url = "http://%s:%s%s" % (self._args.openstack_ip, - self._authn_port, - self._DEFAULT_AUTHN_URL) - insecure = bool(os.getenv('OS_INSECURE',True)) - verify = not insecure - self._authn_body = \ - '{"auth":{"passwordCredentials":{"username": "%s", "password": "%s"}, "tenantName":"%s"}}' % ( - self._args.stack_user, self._args.stack_password, - self._args.project_name) - response = requests.post(url, data=self._authn_body, - headers=self._DEFAULT_HEADERS, - verify=verify) - if response.status_code == 200: - # plan is to re-issue original request with new token - authn_content = json.loads(response.text) - self._auth_token = authn_content['access']['token']['id'] - self._headers = {'X-AUTH-TOKEN': self._auth_token} - return - raise RuntimeError('Authentication Failure') - - def load(self, url, retry=True): - self.log.debug("Requesting: %s", url) - resp = requests.get(url, headers=self._headers) - if resp.status_code == 401: - if retry: - self._auth() - return self.load(url, False) - if resp.status_code == 200: - return json.loads(resp.text) - - self.log.debug("Response Code: %d" % resp.status_code) - return None - - -class XmlDrv (object): - - def __init__(self, vub, logger=LOG, args=None): - self.log = logger - self._vub = vub - if args: - pass - - def load(self, url): - try: - self.log.debug("Requesting: %s", url) - resp = requests.get(url) - return etree.fromstring(resp.text) - except requests.ConnectionError, e: - self.log.error("Socket Connection error: %s", str(e)) - return None - - -class VerificationUtilBase (object): - - def __init__(self, ip, port, drv=JsonDrv, logger=LOG, args=None): - self.log = logger - self._ip = ip - self._port = port - self._drv = drv(self, logger=logger, args=args) - self._force_refresh = False - - def get_force_refresh(self): - return self._force_refresh - - def set_force_refresh(self, force=False): - self._force_refresh = force - return self.get_force_refresh() - - def _mk_url_str(self, path=''): - if path.startswith('http:'): - return path - return "http://%s:%d/%s" % (self._ip, self._port, path) - - def dict_get(self, path=''): - try: - if path: - return self._drv.load(self._mk_url_str(path)) - except urllib2.HTTPError: - return None - # end dict_get - - -def elem2dict(node, alist=False): - d = list() if alist else dict() - for e in node.iterchildren(): - #key = e.tag.split('}')[1] if '}' in e.tag else e.tag - if e.tag == 'list': - value = elem2dict(e, alist=True) - else: - value = e.text if e.text else elem2dict(e) - if type(d) == type(list()): - d.append(value) - else: - d[e.tag] = value - return d - -class Result (dict): - def __init__(self, d={}): - super(Result, self).__init__() - if type(d) is not dict and hasattr(d, 'tag'): - d = elem2dict(d) - self.update(d) - - def xpath(self, *plist): - ''' basic path ''' - d = self - try: - for p in plist: - d = d[p] - return d - except KeyError, e: - return None - - -class EtreeToDict(object): - - """Converts the xml etree to dictionary/list of dictionary.""" - - def __init__(self, xpath): - self.xpath = xpath - self.xml_list = ['policy-rule'] - - def _handle_list(self, elems): - """Handles the list object in etree.""" - a_list = [] - for elem in elems.getchildren(): - rval = self._get_one(elem, a_list) - if 'element' in rval.keys(): - a_list.append(rval['element']) - elif 'list' in rval.keys(): - a_list.append(rval['list']) - else: - a_list.append(rval) - - if not a_list: - return None - return a_list - - def _get_one(self, xp, a_list=None): - """Recrusively looks for the entry in etree and converts to dictionary. - - Returns a dictionary. - """ - val = {} - - child = xp.getchildren() - if not child: - val.update({xp.tag: xp.text}) - return val - - for elem in child: - if elem.tag == 'list': - val.update({xp.tag: self._handle_list(elem)}) - - if elem.tag == 'data': - # Remove CDATA; if present - text = elem.text.replace("") - nxml = etree.fromstring(text) - rval = self._get_one(nxml, a_list) - else: - rval = self._get_one(elem, a_list) - - if elem.tag in self.xml_list: - val.update({xp.tag: self._handle_list(xp)}) - if elem.tag in rval.keys(): - val.update({elem.tag: rval[elem.tag]}) - elif 'SandeshData' in elem.tag: - val.update({xp.tag: rval}) - else: - val.update({elem.tag: rval}) - return val - - def get_all_entry(self, path): - """All entries in the etree is converted to the dictionary - - Returns the list of dictionary/didctionary. - """ - xps = path.xpath(self.xpath) - if not xps: - # sometime ./xpath dosen't work; work around - # should debug to find the root cause. - xps = path.xpath(self.xpath.strip('.')) - if type(xps) is not list: - return self._get_one(xps) - - val = [] - for xp in xps: - val.append(self._get_one(xp)) - if len(val) == 1: - return val[0] - return val - - def find_entry(self, path, match): - """Looks for a particular entry in the etree. - - Returns the element looked for/None. - """ - xp = path.xpath(self.xpath) - f = filter(lambda x: x.text == match, xp) - if len(f): - return f[0].text - return None diff --git a/tcutils/wrappers.py b/tcutils/wrappers.py deleted file mode 100644 index 626db300c..000000000 --- a/tcutils/wrappers.py +++ /dev/null @@ -1,163 +0,0 @@ -""" Module wrrapers that can be used in the tests.""" - -import traceback, os -from functools import wraps -from testtools.testcase import TestSkipped -import cgitb -import cStringIO -from datetime import datetime -from tcutils.util import v4OnlyTestException - -from cores import * - -def detailed_traceback(): - buf = cStringIO.StringIO() - cgitb.Hook(format="text", file=buf).handle(sys.exc_info()) - tb_txt = buf.getvalue() - buf.close() - return tb_txt - -def preposttest_wrapper(function): - """Decorator to perform pretest and posttest validations. - when a test is wrraped with this decorator - 1. Logs the test start with test doc string - 2. Checks connection states - 3. Collects cores/crashes before test - 4. Executes the test - 5. Collects cores/crashes after test - 6. Compares pre-cores/crashes with post-cores/crashes to decide test result. - 7. Logs the test result. - """ - @wraps(function) - def wrapper(self, *args, **kwargs): - core_count = 0 - crash_count = 0 - log = self.inputs.logger - log.info('=' * 80) - log.info('STARTING TEST : %s', function.__name__) - start_time = datetime.now().replace(microsecond=0) - # if 'ci_image' in os.environ.keys(): - # if os.environ['stop_execution_flag'] == 'set': - # assert False, "test failed skipping further tests. Refer to the logs for further analysis" - doc = function.__doc__ - if doc: - log.info('TEST DESCRIPTION : %s', doc) - errmsg = [] - nodes = get_node_ips(self.inputs) - initial_cores = get_cores(self.inputs) - if initial_cores: - log.warn("Test is running with cores: %s", initial_cores) - - initial_crashes = get_service_crashes(self.inputs) - if initial_crashes: - log.warn("Test is running with crashes: %s", initial_crashes) - - testfail = None - testskip = None - try: - # check state of the connections. - if not self.inputs.verify_control_connection( - connections=self.connections): - log.warn("Pre-Test validation failed.." - " Skipping test %s" % (function.__name__)) - #WA for bug 1362020 - # assert False, "Test did not run since Pre-Test validation failed\ - # due to BGP/XMPP connection issue" - - # else: - result = None - (test_valid, reason) = self.is_test_applicable() - if not test_valid: - raise self.skipTest(reason) - result = function(self, *args, **kwargs) - except KeyboardInterrupt: - raise - except (TestSkipped, v4OnlyTestException), msg: - testskip = True - log.info(msg) - result = True - raise - except Exception, testfail: - test_fail_trace = detailed_traceback() - # Stop the test in the fail state for debugging purpose - if self.inputs.stop_on_fail: - print test_fail_trace - print "Failure occured; Stopping test for debugging." - import pdb - pdb.set_trace() - finally: - cleanupfail = None - cleanup_trace = '' - while self._cleanups: - cleanup, args, kwargs = self._cleanups.pop(-1) - try: - cleanup(*args, **kwargs) - except KeyboardInterrupt: - raise - except Exception, cleanupfail: - #result.addError(self, sys.exc_info()) - cet, cei, ctb = sys.exc_info() - formatted_traceback = ''.join(traceback.format_tb(ctb)) - cleanup_trace += '\n{0}\n{1}:\n{2}'.format( - formatted_traceback, - cet.__name__, - cei.message) - - final_cores = get_cores(self.inputs) - cores = find_new(initial_cores, final_cores) - - final_crashes = get_service_crashes(self.inputs) - crashes = find_new(initial_crashes, final_crashes) - - if testfail: - log.error(test_fail_trace) - errmsg.append("Test failed: %s" % test_fail_trace) - - if cleanupfail: - log.error(cleanup_trace) - errmsg.append("Cleanup failed: %s" % cleanup_trace) - - if cores: - for node, corelist in cores.items(): - core_count += len(corelist) - # Preserve this msg format, it is used by - # tcutils.contrailtestrunner - msg = "Cores found(%s): %s" % (core_count, cores) - log.error(msg) - errmsg.append(msg) - if crashes: - for node, crashlist in crashes.items(): - crash_count += len(crashlist) - # Preserve this msg format, it is used by - # tcutils.contrailtestrunner - msg = "Contrail service crashed(%s): %s" % ( - crash_count, crashes) - log.error(msg) - errmsg.append(msg) - - test_time = datetime.now().replace(microsecond=0) - start_time - if cores == {} and crashes == {} and not testfail and \ - not cleanupfail and result is None: - log.info("END TEST : %s : PASSED[%s]", - function.__name__, test_time) - log.info('-' * 80) - elif cores or crashes or testfail or cleanupfail or result is False: - log.info('') - log.info("END TEST : %s : FAILED[%s]", - function.__name__, test_time) - log.info('-' * 80) - if 'ci_image' in os.environ.keys(): - os.environ['stop_execution_flag'] = 'set' - raise TestFailed("\n ".join(errmsg)) - elif testskip: - log.info('') - log.info('END TEST : %s : SKIPPED[%s]', - function.__name__, test_time) - log.info('-' * 80) - else: - log.info('') - log.info('END TEST : %s : PASSED[%s]', - function.__name__, test_time) - log.info('-' * 80) - - return wrapper diff --git a/test.py b/test.py deleted file mode 100644 index d0bed0867..000000000 --- a/test.py +++ /dev/null @@ -1,152 +0,0 @@ -import functools -import os -import time -from testtools import content, content_type - -import fixtures -import testresources -import testtools -from common.contrail_test_init import ContrailTestInit -from common import log_orig as logging -#from common import config -import logging as std_logging -from tcutils.util import get_random_name - -def attr(*args, **kwargs): - """A decorator which applies the testtools attr decorator - - This decorator applies the testtools.testcase.attr if it is in the list of - attributes to testtools we want to apply. - """ - - def decorator(f): - if 'type' in kwargs and isinstance(kwargs['type'], str): - f = testtools.testcase.attr(kwargs['type'])(f) - elif 'type' in kwargs and isinstance(kwargs['type'], list): - for attr in kwargs['type']: - f = testtools.testcase.attr(attr)(f) - return f - - return decorator - -#LOG = logging.getLogger(__name__) -std_logging.getLogger('urllib3.connectionpool').setLevel(std_logging.WARN) -std_logging.getLogger('paramiko.transport').setLevel(std_logging.WARN) -std_logging.getLogger('keystoneclient.session').setLevel(std_logging.WARN) -std_logging.getLogger('keystoneclient.httpclient').setLevel(std_logging.WARN) -std_logging.getLogger('neutronclient.client').setLevel(std_logging.WARN) -# -#CONF = config.CONF - -class TagsHack(object): - def id(self): - orig = super(TagsHack, self).id() - tags = os.getenv('TAGS', '') - if not tags: - return orig - else: - fn = self._get_test_method() - attributes = getattr(fn, '__testtools_attrs', None) - tags = tags.split(" ") - if attributes: - for tag in tags: - if tag in attributes: - return orig - # A hack to please testtools to get uniq testcase names - return get_random_name() - -class BaseTestCase(TagsHack, - testtools.testcase.WithAttributes, - testtools.TestCase, - testresources.ResourcedTestCase): - - setUpClassCalled = False - - @classmethod - def setUpClass(cls): - if hasattr(super(BaseTestCase, cls), 'setUpClass'): - super(BaseTestCase, cls).setUpClass() - cls.setUpClassCalled = True - - if 'TEST_CONFIG_FILE' in os.environ : - cls.ini_file= os.environ.get('TEST_CONFIG_FILE') - else: - cls.ini_file= 'sanity_params.ini' - cls.Logger = logging.ContrailLogger(cls.__name__) - cls.Logger.setUp() - cls.logger = cls.Logger.logger - cls.inputs = ContrailTestInit(cls.ini_file,logger = cls.logger) - - @classmethod - def tearDownClass(cls): - #cls.logger.cleanUp() - if hasattr(super(BaseTestCase, cls), 'tearDownClass'): - super(BaseTestCase, cls).tearDownClass() - - def setUp(self): - super(BaseTestCase, self).setUp() - if not self.setUpClassCalled: - raise RuntimeError("setUpClass did not call the super's" - " setUpClass in the " - + self.__class__.__name__) - - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or - os.environ.get('OS_STDOUT_CAPTURE') == '1'): - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or - os.environ.get('OS_STDERR_CAPTURE') == '1'): - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - if (os.environ.get('OS_LOG_CAPTURE') != 'False' and - os.environ.get('OS_LOG_CAPTURE') != '0'): - log_format = '%(asctime)-15s %(message)s' - self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, - format=log_format)) -# import pdb;pdb.set_trace() -# logger = self.useFixture(log.Contrail_Logger(cls.__name__)) -# - - def cleanUp(self): - super(BaseTestCase, self).cleanUp() - - def addDetail(self, logfile, text): - if type(text) is str: - super(BaseTestCase, self).addDetail(logfile, - content.text_content(text)) - else: - super(BaseTestCase, self).addDetail(logfile, text) - - def is_test_applicable(self): - return (True, None) - - -def call_until_true(func, duration, sleep_for): - """ - Call the given function until it returns True (and return True) or - until the specified duration (in seconds) elapses (and return - False). - - :param func: A zero argument callable that returns True on success. - :param duration: The number of seconds for which to attempt a - successful call of the function. - :param sleep_for: The number of seconds to sleep after an unsuccessful - invocation of the function. - """ - now = time.time() - timeout = now + duration - while now < timeout: - if func(): - return True - LOG.debug("Sleeping for %d seconds", sleep_for) - time.sleep(sleep_for) - now = time.time() - return False diff --git a/tools/__init__.py b/tools/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tools/check_test_discovery.sh b/tools/check_test_discovery.sh deleted file mode 100755 index fd2c76509..000000000 --- a/tools/check_test_discovery.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -echo "Validating if test discovery passes in scripts/ and serial_scripts" -echo "" -GIVEN_TEST_PATH=$OS_TEST_PATH - -export PYTHONPATH=$PATH:$PWD/scripts:$PWD/fixtures -export OS_TEST_PATH=${GIVEN_TEST_PATH:-./scripts}; testr list-tests || exit 1 -export PYTHONPATH=$PATH:$PWD/serial_scripts:$PWD/fixtures -export OS_TEST_PATH=${GIVEN_TEST_PATH:-./serial_scripts}; testr list-tests || exit 1 diff --git a/tools/configure.py b/tools/configure.py deleted file mode 100644 index d61f3c01a..000000000 --- a/tools/configure.py +++ /dev/null @@ -1,321 +0,0 @@ -import argparse -import sys -import string -import json -import os -from fabric.api import env, run, local, lcd -from fabric.context_managers import settings, hide - - -def get_address_family(): - address_family = os.getenv('AF', 'dual') - # ToDo: CI to execute 'v4' testcases alone for now - if os.getenv('GUESTVM_IMAGE', None): - address_family = 'v4' - return address_family - - -def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'): - """ - Configure test environment by creating sanity_params.ini and sanity_testbed.json files - """ - sys.path.insert(0, contrail_fab_path) - from fabfile.testbeds import testbed - from fabfile.utils.host import get_openstack_internal_vip,\ - get_control_host_string, get_authserver_ip, get_admin_tenant_name, \ - get_authserver_port, get_env_passwords, get_authserver_credentials, \ - get_vcenter_ip, get_vcenter_port, get_vcenter_username, \ - get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute - from fabfile.utils.multitenancy import get_mt_enable - from fabfile.utils.interface import get_data_ip - - cfgm_host = env.roledefs['cfgm'][0] - - with settings(warn_only=True): - with lcd(contrail_fab_path): - if local('git branch').succeeded: - fab_revision = local('git log --format="%H" -n 1', capture=True) - else: - with settings(host_string=cfgm_host): - fab_revision = run('cat /opt/contrail/contrail_packages/VERSION') - with lcd(test_dir): - if local('git branch').succeeded: - revision = local('git log --format="%H" -n 1', capture=True) - else: - with settings(host_string=cfgm_host): - revision = run('cat /opt/contrail/contrail_packages/VERSION') - - sanity_testbed_dict = { - 'hosts': [], - 'vgw': [], - 'esxi_vms':[], - 'hosts_ipmi': [], - 'tor':[], - } - - sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample' - with open(sample_ini_file, 'r') as fd_sample_ini: - contents_sample_ini = fd_sample_ini.read() - sanity_ini_templ = string.Template(contents_sample_ini) - - with settings(host_string = env.roledefs['openstack'][0]): - openstack_host_name = run("hostname") - - with settings(host_string = env.roledefs['cfgm'][0]): - cfgm_host_name = run("hostname") - - control_host_names = [] - for control_host in env.roledefs['control']: - with settings(host_string = control_host): - host_name = run("hostname") - control_host_names.append(host_name) - - cassandra_host_names = [] - if 'database' in env.roledefs.keys(): - for cassandra_host in env.roledefs['database']: - with settings(host_string = cassandra_host): - host_name = run("hostname") - cassandra_host_names.append(host_name) - - internal_vip = get_openstack_internal_vip() - for host_string in env.roledefs['all']: - if host_string in env.roledefs.get('test',[]): - continue - host_ip = host_string.split('@')[1] - with settings(host_string = host_string): - host_name = run("hostname") - - host_dict = {} - - host_dict['ip'] = host_ip - host_dict['data-ip']= get_data_ip(host_string)[0] - if host_dict['data-ip'] == host_string.split('@')[1]: - host_dict['data-ip'] = get_data_ip(host_string)[0] - host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] - - host_dict['name'] = host_name - host_dict['username'] = host_string.split('@')[0] - host_dict['password'] =get_env_passwords(host_string) - host_dict['roles'] = [] - - if not internal_vip: - if host_string in env.roledefs['openstack']: - role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} - host_dict['roles'].append(role_dict) - - if host_string in env.roledefs['cfgm']: - role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} - - if internal_vip: - role_dict['openstack'] = 'contrail-vip' - else: - role_dict['openstack'] = openstack_host_name - - host_dict['roles'].append(role_dict) - - if host_string in env.roledefs['control']: - role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} - host_dict['roles'].append(role_dict) - - if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: - role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } - host_dict['roles'].append(role_dict) - - if host_string in env.roledefs['compute']: - role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} - role_dict['params']['bgp'] = [] - if len(env.roledefs['control']) == 1: - role_dict['params']['bgp'] = control_host_names - else: - for control_node in control_host_names: - role_dict['params']['bgp'].append(control_node) - # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) - host_dict['roles'].append(role_dict) - - if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: - role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } - host_dict['roles'].append(role_dict) - - if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: - role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } - host_dict['roles'].append(role_dict) - - sanity_testbed_dict['hosts'].append(host_dict) - if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) - - # Read ToR config - sanity_tor_dict = {} - if env.has_key('tor_agent'): - sanity_testbed_dict['tor_agent'] = env.tor_agent - - # Read any tor-host config - if env.has_key('tor_hosts'): - sanity_testbed_dict['tor_hosts'] = env.tor_hosts - - # Read any MX config (as physical_router ) - if env.has_key('physical_routers'): - sanity_testbed_dict['physical_routers'] = env.physical_routers - - esxi_hosts = getattr(testbed, 'esxi_hosts', None) - if esxi_hosts: - for esxi in esxi_hosts: - host_dict = {} - host_dict['ip'] = esxi_hosts[esxi]['ip'] - host_dict['data-ip'] = host_dict['ip'] - host_dict['control-ip'] = host_dict['ip'] - host_dict['name'] = esxi - host_dict['username'] = esxi_hosts[esxi]['username'] - host_dict['password'] = esxi_hosts[esxi]['password'] - host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host'] - host_dict['roles'] = [] - sanity_testbed_dict['hosts'].append(host_dict) - sanity_testbed_dict['esxi_vms'].append(host_dict) - # Adding vip VIP dict for HA test setup - - with settings(host_string = env.roledefs['openstack'][0]): - if internal_vip: - host_dict = {} - host_dict['data-ip']= get_authserver_ip() - host_dict['control-ip']= get_authserver_ip() - host_dict['ip']= get_authserver_ip() - host_dict['name'] = 'contrail-vip' - with settings(host_string = env.roledefs['cfgm'][0]): - host_dict['username'] = host_string.split('@')[0] - host_dict['password'] = get_env_passwords(host_string) - host_dict['roles'] = [] - role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} - host_dict['roles'].append(role_dict) - sanity_testbed_dict['hosts'].append(host_dict) - - # get host ipmi list - if env.has_key('hosts_ipmi'): - sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) - - # generate json file and copy to cfgm - sanity_testbed_json = json.dumps(sanity_testbed_dict) - - stop_on_fail = env.get('stop_on_fail', False) - mail_to = env.get('mail_to', '') - log_scenario = env.get('log_scenario', 'Sanity') - stack_user, stack_password = get_authserver_credentials() - stack_tenant = get_admin_tenant_name() - # Few hardcoded variables for sanity environment - # can be removed once we move to python3 and configparser - stack_domain = 'default-domain' - webserver_host = '10.204.216.50' - webserver_user = 'bhushana' - webserver_password = 'bhu@123' - webserver_log_path = '/home/bhushana/Documents/technical/logs/' - webserver_report_path = '/home/bhushana/Documents/technical/sanity' - webroot = 'Docs/logs' - mail_server = '10.204.216.49' - mail_port = '25' - fip_pool_name = 'floating-ip-pool' - public_virtual_network='public' - public_tenant_name='admin' - fixture_cleanup = 'yes' - generate_html_report = 'True' - key = 'key1' - mailSender = 'contrailbuild@juniper.net' - - use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) - orch = getattr(env, 'orchestrator', 'openstack') - router_asn = getattr(testbed, 'router_asn', '') - public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') - public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') - ext_routers = getattr(testbed, 'ext_routers', '') - router_info = str(ext_routers) - test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) - webui = getattr(testbed, 'webui', False) - horizon = getattr(testbed, 'horizon', False) - ui_config = getattr(testbed, 'ui_config', False) - ui_browser = getattr(testbed, 'ui_browser', False) - if 'mail_server' in env.keys(): - mail_server = env.mail_server - mail_port = env.mail_port - - vcenter_dc = '' - if orch == 'vcenter': - public_tenant_name='vCenter' - - if env.has_key('vcenter'): - if env.vcenter: - vcenter_dc = env.vcenter['datacenter'] - - sanity_params = sanity_ini_templ.safe_substitute( - {'__testbed_json_file__' : 'sanity_testbed.json', - '__nova_keypair_name__' : key, - '__orch__' : orch, - '__stack_user__' : stack_user, - '__stack_password__' : stack_password, - '__auth_ip__' : get_authserver_ip(), - '__auth_port__' : get_authserver_port(), - '__stack_tenant__' : stack_tenant, - '__stack_domain__' : stack_domain, - '__multi_tenancy__' : get_mt_enable(), - '__address_family__' : get_address_family(), - '__log_scenario__' : log_scenario, - '__generate_html_report__': generate_html_report, - '__fixture_cleanup__' : fixture_cleanup, - '__webserver__' : webserver_host, - '__webserver_user__' : webserver_user, - '__webserver_password__' : webserver_password, - '__webserver_log_dir__' : webserver_log_path, - '__webserver_report_dir__': webserver_report_path, - '__webroot__' : webroot, - '__mail_server__' : mail_server, - '__mail_port__' : mail_port, - '__sender_mail_id__' : mailSender, - '__receiver_mail_id__' : mail_to, - '__http_proxy__' : env.get('http_proxy', ''), - '__ui_browser__' : ui_browser, - '__ui_config__' : ui_config, - '__horizon__' : horizon, - '__webui__' : webui, - '__devstack__' : False, - '__public_vn_rtgt__' : public_vn_rtgt, - '__router_asn__' : router_asn, - '__router_name_ip_tuples__': router_info, - '__public_vn_name__' : fip_pool_name, - '__public_virtual_network__':public_virtual_network, - '__public_tenant_name__' :public_tenant_name, - '__public_vn_subnet__' : public_vn_subnet, - '__test_revision__' : revision, - '__fab_revision__' : fab_revision, - '__test_verify_on_setup__': test_verify_on_setup, - '__stop_on_fail__' : stop_on_fail, - '__ha_setup__' : getattr(testbed, 'ha_setup', ''), - '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), - '__ipmi_password__' : getattr(testbed, 'ipmi_password', ''), - '__vcenter_dc__' : vcenter_dc, - '__vcenter_server__' : get_vcenter_ip(), - '__vcenter_port__' : get_vcenter_port(), - '__vcenter_username__' : get_vcenter_username(), - '__vcenter_password__' : get_vcenter_password(), - '__vcenter_datacenter__' : get_vcenter_datacenter(), - '__vcenter_compute__' : get_vcenter_compute(), - '__use_devicemanager_for_md5__' : use_devicemanager_for_md5, - }) - - ini_file = test_dir + '/' + 'sanity_params.ini' - testbed_json_file = test_dir + '/' + 'sanity_testbed.json' - with open(ini_file, 'w') as ini: - ini.write(sanity_params) - - with open(testbed_json_file,'w') as tb: - tb.write(sanity_testbed_json) - - -def main(argv=sys.argv): - ap = argparse.ArgumentParser( - description='Configure test environment') - ap.add_argument('contrail_test_directory', type=str, - help='contrail test directory') - ap.add_argument('-p','--contrail-fab-path', type=str, default='/opt/contrail/utils', - help='Contrail fab path on local machine') - args = ap.parse_args() - - configure_test_env(args.contrail_fab_path, args.contrail_test_directory) - -if __name__ == "__main__": - sys.exit(not main(sys.argv)) diff --git a/tools/contrail-test b/tools/contrail-test deleted file mode 100755 index b2042c6ad..000000000 --- a/tools/contrail-test +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python -""" -Lists/Executes the tests in the contrail test modules. - -Usage: - Lists the test features: - ------------------------ - python contrail-test list - - Lists the tests in set of features: - ----------------------------------- - python contrail-test list -f .... - Note: .... are the feature - listed by the above command - - Lists the tests in a test module: - --------------------------------- - python contrail-test list -p - - Run the list of tests: - ---------------------- - python contrail-test run -T .... - Note: .... are the tests - listed by the avove command. -""" - -import os -import sys -import argparse - -from fabric.api import lcd, local, settings, hide - -FEATURE_ROOTS = ('scripts', 'serial_scripts') -NON_FEATURE_DIRS = ('serial_scripts', 'ubuntu_repo', 'tcutils') - -def parse_args(): - parser = argparse.ArgumentParser() - defaults = { - } - parser.set_defaults(**defaults) - parser.add_argument("operation", help="Operation to be performed [list | run]") - parser.add_argument("-t", "--testroot", help="contrail-test root directory") - parser.add_argument("-p", "--testmodulepath", help="Directory path of the specific test module") - parser.add_argument("-f", "--testfeatures", nargs='+', help="List of test features") - parser.add_argument("-T", "--tests", nargs='+', help="List of tests to execute") - - return parser.parse_args(sys.argv[1:]) - - -class ContrailTest(object): - def __init__(self, cmd_args): - self.operation = cmd_args.operation - self.testmodulepath = cmd_args.testmodulepath - self.testfeatures = cmd_args.testfeatures - self.tests = cmd_args.tests - - self.cwd = os.getcwd() - self.testroot = self.cwd.strip(os.path.basename(self.cwd)) - self.python_paths = [self.testroot, - '%sfixtures/' % self.testroot, - '%sscripts/' % self.testroot, - '%sserial_scripts/' % self.testroot, - '%stcutils/pkgs/Traffic/' % self.testroot, - '%stcutils/traffic_utils/' % self.testroot] - self.discover_args = args = "-t %s " % self.testroot - - def _print(self, output): - print "\n".join(output) - - def do(self): - """Do tests listing or execution""" - if self.operation == 'list': - self.list_any() - elif self.operation == 'run': - self.run_any() - - def list_any(self): - """Lists the test features and - tests of a test feature or set of test modules.""" - if self.testmodulepath: - self._print(self.list_tests()) - elif self.testfeatures: - self.list_feature_tests() - else: - self.list_features() - - def list_features(self): - """Lists the test features.""" - features = [] - for feature_root in FEATURE_ROOTS: - feature_list = [elem for elem in os.listdir('%s%s' % - (self.testroot, feature_root)) - if os.path.isdir(self.testroot + feature_root + - '/' + elem) and elem not in - NON_FEATURE_DIRS] - features += list(set(feature_list) - set(features)) - self._print(features) - return - - def list_feature_tests(self): - """Lists the tests of a test feature.""" - tests = [] - for test_feature in self.testfeatures: - for feature_root in FEATURE_ROOTS: - feature_dir = "%s%s/%s" % (self.testroot, feature_root, test_feature) - if os.path.isdir(feature_dir): - tests += self.discover(feature_dir) - self._print(tests) - - def list_tests(self, sub_mod=None): - """Lists the tests of a test module.""" - # List the tests in a specific test module - return self.discover(self.testmodulepath) - - def discover(self, args): - """Discovers tests.""" - args = self.discover_args + args - with settings(hide('everything'), warn_only=True): - env_vars = "PYTHONPATH=%s" % ":".join(self.python_paths) - tests = local("%s python -m testtools.run discover %s --list" % - (env_vars, args), capture=True) - return tests.split('\n') - - def run_any(self): - """Executes the set of tests.""" - env_vars = "PYTHONPATH=%s" % ":".join(self.python_paths) - with lcd(self.testroot): - with settings(warn_only=True): - for test in self.tests: - local("%s python -m testtools.run %s" % (env_vars, test)) - - -def main(): - ContrailTest(parse_args()).do() - - -if __name__ == '__main__': - sys.exit(main()) - diff --git a/tools/parse_result.py b/tools/parse_result.py deleted file mode 100644 index 1d354c94a..000000000 --- a/tools/parse_result.py +++ /dev/null @@ -1,42 +0,0 @@ -import sys -from lxml import etree as ET - -def filter_by_tests(doc, value_list = ["process-returncode"]): - elem = doc.xpath("/testsuite/testcase[@name='process-returncode']") - root = doc.getroot() - tests = int(root.get('tests')) - failures = int(root.get('failures')) - for el in elem: - root.remove(el) - tests -= 1 - failures -= 1 - root.set('failures',str(failures)) - root.set('tests',str(tests)) - return doc - -def change_tests_name(doc): - root = doc.getroot() - try: - elem = doc.xpath("/testsuite/testcase") - for el in elem: - classname = el.get('classname').split('.')[-1] - name = el.get('name') - name = "%s.%s"%(classname,name) - el.set('name',name) - el = elem[0] - pkg = el.get('classname').split('.')[0] - root.set('name',pkg) - except Exception as e: - print 'could not change test cases names' - -def write_to_a_file(file): - with open(file, 'w') as the_file: - the_file.write(ET.tostring(doc)) - -files = sys.argv[1:] -for file in files: - doc = ET.parse(file) - filter_by_tests(doc) - change_tests_name(doc) - write_to_a_file(file) - diff --git a/tools/patches/junitxml.patch b/tools/patches/junitxml.patch deleted file mode 100644 index 1ab5549ff..000000000 --- a/tools/patches/junitxml.patch +++ /dev/null @@ -1,17 +0,0 @@ ---- __init__.py 2015-05-01 19:38:01.000000000 +0530 -+++ __init__.py 2015-05-01 19:37:44.144733110 +0530 -@@ -165,10 +165,11 @@ - run. - """ - duration = self._duration(self._run_start) -+ - self._stream.write('\n' % (len(self.errors), -+ 'skipped="%d" tests="%d" time="%0.3f">\n' % (len(self.errors), - len(self.failures) + len(getattr(self, "unexpectedSuccesses", ())), -- self.testsRun, duration)) -+ len(self.skipped),self.testsRun, duration)) - self._stream.write(''.join(self._results)) - self._stream.write('\n') - - diff --git a/tools/patches/unittest2-discover.patch b/tools/patches/unittest2-discover.patch deleted file mode 100644 index 347300d17..000000000 --- a/tools/patches/unittest2-discover.patch +++ /dev/null @@ -1,16 +0,0 @@ -diff -r b2efb7df637b discover.py ---- a/discover.py Thu Mar 24 00:31:02 2011 -0400 -+++ b/discover.py Thu Nov 28 12:02:19 2013 +0000 -@@ -82,7 +82,11 @@ - """ - testMethodPrefix = 'test' - sortTestMethodsUsing = cmp -- suiteClass = unittest.TestSuite -+ try: -+ import unittest2 -+ suiteClass = unittest2.TestSuite -+ except ImportError: -+ suiteClass = unittest.TestSuite - _top_level_dir = None - - def loadTestsFromTestCase(self, testCaseClass): diff --git a/tools/report_gen.py b/tools/report_gen.py deleted file mode 100644 index cbe978007..000000000 --- a/tools/report_gen.py +++ /dev/null @@ -1,473 +0,0 @@ -import os -import re -import sys -import json -import time -import socket -import smtplib -import getpass -import ConfigParser -import datetime - -from fabric.api import env, run, cd -from fabric.operations import get, put -from fabric.context_managers import settings, hide -from fabric.exceptions import NetworkError -from tcutils.util import * -from tcutils.custom_filehandler import * - -CORE_DIR = '/var/crashes' - - -class ContrailTestInit: - - def __init__(self, ini_file): - self.build_id = None - self.bgp_stress = False - self.config = ConfigParser.ConfigParser() - self.config.read(ini_file) - self.orch = read_config_option(self.config, 'Basic', 'orchestrator', - 'openstack') - self.prov_file = read_config_option(self.config, - 'Basic', 'provFile', None) - self.log_scenario = read_config_option(self.config, - 'Basic', 'logScenario', 'Sanity') - if 'EMAIL_SUBJECT' in os.environ and os.environ['EMAIL_SUBJECT'] != '': - self.log_scenario = os.environ.get('EMAIL_SUBJECT') - if 'EMAIL_SUBJECT_PREFIX' in os.environ: - self.log_scenario = '%s %s' % (os.environ.get('EMAIL_SUBJECT_PREFIX'), - self.log_scenario) - self.ext_rtr = read_config_option( - self.config, 'router', 'router_info', 'None') - self.keystone_ip = read_config_option(self.config, - 'Basic', 'auth_ip', None) - self.ui_browser = read_config_option(self.config, - 'ui', 'browser', None) - cwd = os.getcwd() - log_path = ('%s' + '/logs/') % cwd - for file in os.listdir(log_path): - if file.startswith("results_summary") and file.endswith(".txt"): - self.bgp_stress = True - - # Web Server related details - self.web_server = read_config_option(self.config, - 'WebServer', 'host', None) - self.web_server_user = read_config_option(self.config, - 'WebServer', 'username', None) - self.web_server_password = read_config_option(self.config, - 'WebServer', 'password', None) - self.web_server_report_path = read_config_option(self.config, - 'WebServer', 'reportPath', None) - self.web_server_log_path = read_config_option(self.config, - 'WebServer', 'logPath', None) - self.web_root = read_config_option(self.config, - 'WebServer', 'webRoot', None) - # Mail Setup - self.smtpServer = read_config_option(self.config, - 'Mail', 'server', None) - self.smtpPort = read_config_option(self.config, - 'Mail', 'port', '25') - self.mailTo = read_config_option(self.config, - 'Mail', 'mailTo', None) - self.mailSender = read_config_option(self.config, - 'Mail', 'mailSender', 'contrailbuild@juniper.net') - self.ts = self.get_os_env('SCRIPT_TS') or \ - datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') - self.single_node = self.get_os_env('SINGLE_NODE_IP') - self.jenkins_trigger = self.get_os_env('JENKINS_TRIGGERED') - self.os_type = {} - self.report_details_file = 'report_details_%s.ini' % (self.ts) - self.distro = None - - # end __init__ - - def setUp(self): - if self.single_node != '': - self.prov_data = self._create_prov_data() - else: - self.prov_data = self._read_prov_file() - (self.build_id, self.sku) = self.get_build_id() - self.setup_detail = '%s %s~%s' % (self.get_distro(), self.build_id, - self.sku) - self.build_folder = self.build_id + '_' + self.ts - self.html_log_link = 'http://%s/%s/%s/junit-noframes.html' % ( - self.web_server, self.web_root, self.build_folder) - self.log_link = 'http://%s/%s/%s/logs/' % (self.web_server, self.web_root, - self.build_folder) - self.os_type = self.get_os_version() - self.username = self.host_data[self.cfgm_ip]['username'] - self.password = self.host_data[self.cfgm_ip]['password'] - self.sm_pkg = self.get_os_env('SERVER_MANAGER_INSTALLER') - self.contrail_pkg = self.get_os_env('CONTRAIL_PACKAGE') - self.puppet_pkg = self.get_os_env('PUPPET_PKG') - self.write_report_details() - if self.ui_browser: - self.upload_png_files() - # end setUp - - def upload_png_files(self): - self.build_folder = self.build_id + '_' + self.ts - self.web_server_path = self.web_server_log_path + \ - '/' + self.build_folder + '/' - cwd = os.getcwd() - log_path = ('%s' + '/logs/') % cwd - elem = log_path + '*.png' - try: - with hide('everything'): - with settings(host_string=self.web_server, - user=self.web_server_user, - password=self.web_server_password, - warn_only=True, abort_on_prompts=False): - run('mkdir -p %s' % (self.web_server_path)) - output = put(elem, self.web_server_path) - put('logs', self.web_server_path) - except Exception, e: - print 'Error occured while uploading the png files to the Web Server ', e - pass - # end upload_png_files - - def get_os_env(self, var, default=''): - if var in os.environ: - return os.environ.get(var) - else: - return default - # end get_os_env - - def get_os_version(self): - ''' - Figure out the os type on each node in the cluster - ''' - - if self.os_type: - return self.os_type - for host_ip in self.host_ips: - username = self.host_data[host_ip]['username'] - password = self.host_data[host_ip]['password'] - with settings( - host_string='%s@%s' % (username, host_ip), password=password, - warn_only=True, abort_on_prompts=False): - output = run('uname -a') - if 'el6' in output: - self.os_type[host_ip] = 'centos_el6' - if 'fc17' in output: - self.os_type[host_ip] = 'fc17' - if 'xen' in output: - self.os_type[host_ip] = 'xenserver' - if 'Ubuntu' in output: - self.os_type[host_ip] = 'ubuntu' - return self.os_type - # end get_os_version - - def _read_prov_file(self): - prov_file = open(self.prov_file, 'r') - prov_data = prov_file.read() - json_data = json.loads(prov_data) - self.host_names = [] - self.cfgm_ip = '' - self.cfgm_ips = [] - self.cfgm_control_ips = [] - self.cfgm_names = [] - self.collector_ips = [] - self.collector_control_ips = [] - self.collector_names = [] - self.database_ips = [] - self.database_names = [] - self.compute_ips = [] - self.compute_names = [] - self.compute_control_ips = [] - self.compute_info = {} - self.bgp_ips = [] - self.bgp_control_ips = [] - self.bgp_names = [] - self.ds_server_ip = [] - self.ds_server_name = [] - self.host_ips = [] - self.webui_ips = [] - self.host_data = {} - self.physical_routers_data = {} - self.vgw_data = {} - for host in json_data['hosts']: - self.host_names.append(host['name']) - host_ip = str(IPNetwork(host['ip']).ip) - host_data_ip = str(IPNetwork(host['data-ip']).ip) - host_control_ip = str(IPNetwork(host['control-ip']).ip) - self.host_ips.append(host_ip) - self.host_data[host_ip] = host - self.host_data[host_data_ip] = host - self.host_data[host_control_ip] = host - self.host_data[host['name']] = host - self.host_data[host['name']]['host_ip'] = host_ip - self.host_data[host['name']]['host_data_ip'] = host_data_ip - self.host_data[host['name']]['host_control_ip'] = host_control_ip - roles = host["roles"] - for role in roles: - if role['type'] == 'openstack': - if self.keystone_ip: - self.openstack_ip = self.keystone_ip - else: - self.openstack_ip = host_ip - self.keystone_ip = host_ip - if role['type'] == 'cfgm': - self.cfgm_ip = host_ip - self.cfgm_ips.append(host_ip) - self.cfgm_control_ips.append(host_control_ip) - self.cfgm_control_ip = host_control_ip - self.cfgm_names.append(host['name']) - self.ds_server_ip.append(host_ip) - self.ds_server_name.append(host['name']) - self.masterhost = self.cfgm_ip - self.hostname = host['name'] - if role['type'] == 'compute': - self.compute_ips.append(host_ip) - self.compute_names.append(host['name']) - self.compute_info[host['name']] = host_ip - self.compute_control_ips.append(host_control_ip) - if role['type'] == 'bgp': - - self.bgp_ips.append(host_ip) - self.bgp_control_ips.append(host_control_ip) - self.bgp_names.append(host['name']) -# if role['type'] == 'collector' : -# self.collector_ip= host_ip - if role['type'] == 'webui': - self.webui_ip = host_ip - self.webui_ips.append(host_ip) - if role['type'] == 'collector': - self.collector_ip = host_ip - self.collector_ips.append(host_ip) - self.collector_control_ips.append(host_control_ip) - self.collector_names.append(host['name']) - if role['type'] == 'database': - self.database_ip = host_ip - self.database_ips.append(host_ip) - self.database_names.append(host['name']) - if json_data.has_key('physical_routers'): - self.physical_routers_data = json_data['physical_routers'] - if json_data.has_key('vgw'): - self.vgw_data = json_data['vgw'] - return json.loads(prov_data) - # end _read_prov_file - - def _create_prov_data(self): - ''' Creates json data for a single node only. - - ''' - single_node = self.single_node - self.cfgm_ip = single_node - self.cfgm_ips = [single_node] - self.bgp_ips = [single_node] - self.compute_ips = [single_node] - self.host_ips = [single_node] - self.collector_ip = single_node - self.collector_ips = [single_node] - self.database_ip = single_node - self.database_ips = [single_node] - self.webui_ip = single_node - self.openstack_ip = single_node - json_data = {} - self.host_data = {} - hostname = socket.gethostbyaddr(single_node)[0] - self.hostname = hostname - self.compute_names = [self.hostname] - self.compute_info = {hostname: single_node} - json_data['hosts'] = [{ - 'ip': single_node, - 'name': hostname, - 'username': self.username, - 'password': self.password, - 'roles': [ - {"params": {"collector": hostname, "cfgm": hostname}, - "type": "bgp"}, - - {"params": {"bgp": [hostname, hostname], "cfgm": - hostname, "collector": hostname}, "type": "compute"}, - {"params": {"collector": hostname}, "type": "cfgm"}, - {"params": {"cfgm": hostname}, "type": "webui"}, - {"type": "collector"} - ] - }] - self.host_data[single_node] = json_data['hosts'][0] - return json_data - # end _create_prov_data - - def get_pwd(self): - if 'EMAIL_PWD' in os.environ: - self.p = os.environ.get('EMAIL_PWD') - else: - self.p = getpass.getpass( - prompt='Enter password for ' + self.mailSender + ' : ') - # end get_pwd - - def get_node_name(self, ip): - return self.host_data[ip]['name'] - - def _get_stress_test_summary(self): - cwd = os.getcwd() - log_path = ('%s' + '/logs/') % cwd - for file in os.listdir(log_path): - if file.startswith("results_summary") and file.endswith(".txt"): - file_fq_name = log_path + '/' + file - f = open(file_fq_name, 'r') - file_contents = f.read() - f.close() - return file_contents - # end _get_stress_test_summary - - def _get_phy_topology_detail(self): - detail = '' - compute_nodes = [self.get_node_name(x) for x in self.compute_ips] - bgp_nodes = [self.get_node_name(x) for x in self.bgp_ips] - collector_nodes = [self.get_node_name(x) for x in self.collector_ips] - cfgm_nodes = [self.get_node_name(x) for x in self.cfgm_ips] - webui_node = self.get_node_name(self.webui_ip) - ext_rtr = unicode(self.ext_rtr.strip('[()]').split(',')[0]) - phy_dev = [] - phy_dev = self.physical_routers_data.keys() - phy_dev.append(ext_rtr) - if self.orch == 'openstack': - openstack_node = self.get_node_name(self.openstack_ip) - database_nodes = [self.get_node_name(x) for x in self.database_ips] - - newline = '
' - detail = newline - detail += 'Config Nodes : %s %s' % (cfgm_nodes, newline) - detail += 'Control Nodes : %s %s' % (bgp_nodes, newline) - detail += 'Compute Nodes : %s %s' % (compute_nodes, newline) - if self.orch == 'openstack': - detail += 'Openstack Node : %s %s' % (openstack_node, newline) - detail += 'WebUI Node : %s %s' % (webui_node, newline) - detail += 'Analytics Nodes : %s %s' % (collector_nodes, newline) - detail += 'Physical Devices : %s %s' % (phy_dev, newline) - if self.ui_browser: - detail += 'Browser : %s %s' % (self.ui_browser, newline) - return detail - # end _get_phy_topology_detail - - def write_report_details(self): - - phy_topology = self._get_phy_topology_detail() - details_h = open(self.report_details_file, 'w') - config = ConfigParser.ConfigParser() - config.add_section('Test') - config.set('Test', 'Build', self.build_id) - config.set('Test', 'Distro_Sku', self.setup_detail) - config.set('Test', 'timestamp', self.ts) - config.set('Test', 'Report', self.html_log_link) - config.set('Test', 'LogsLocation', self.log_link) - config.set('Test', 'Cores', self.get_cores()) - - if (self.sm_pkg or self.contrail_pkg or self.puppet_pkg): - config.set('Test', 'sm_pkg', self.sm_pkg) - config.set('Test', 'contrail_pkg', self.contrail_pkg) - config.set('Test', 'puppet_pkg', self.puppet_pkg) - - if self.bgp_stress: - bgp_stress_test_summary = self._get_stress_test_summary() - config.set('Test', 'BGP Stress Test Summary', bgp_stress_test_summary) - config.set('Test', 'Topology', phy_topology) - config.set('Test', 'logScenario', self.log_scenario) - if self.ui_browser: - config.set('Test', 'Browser', self.ui_browser) - - debug_logs_location = '' - if self.jenkins_trigger: - debug_logs_location = "/cs-shared/test_runs" \ - "/%s/%s" % (self.host_data[self.cfgm_ips[0]]['name'], self.ts) - config.set('Test', 'CoreLocation', debug_logs_location) - config.write(details_h) - details_h.close() - # end - - def get_build_id(self): - if self.build_id: - return self.build_id - build_id = None - cmd = 'contrail-version | grep contrail-config | head -1 | awk \'{print $2}\'' - alt_cmd = 'contrail-version | grep contrail-nodemgr | head -1 | awk \'{print $2}\'' - tries = 50 - while not build_id and tries: - try: - build_id = self.run_cmd_on_server(self.cfgm_ips[0], cmd) - if not build_id: - build_id = self.run_cmd_on_server( - self.cfgm_ips[0], alt_cmd) - except NetworkError, e: - time.sleep(1) - pass - tries -= 1 - build_sku = self.get_os_env("SKU") - if build_sku is None: - build_sku=get_build_sku(self.openstack_ip,self.host_data[self.openstack_ip]['password']) - if (build_id.count('.') > 2): - build_id = build_id.rsplit('.', 2)[0] - return [build_id.rstrip('\n'), build_sku] - - def get_distro(self): - if self.distro: - return self.distro - cmd = ''' - if [ -f /etc/lsb-release ]; then (cat /etc/lsb-release | grep DISTRIB_DESCRIPTION | cut -d "=" -f2 ) - else - cat /etc/redhat-release | sed s/\(Final\)// - fi - ''' - try: - self.distro = self.run_cmd_on_server(self.cfgm_ips[0], cmd) - self.distro = self.distro.replace(')', '') - self.distro = self.distro.replace('(', '') - except NetworkError, e: - self.distro = '' - return self.distro - # end get_distro - - def run_cmd_on_server(self, server_ip, issue_cmd, username=None, password=None, pty=True): - if server_ip in self.host_data.keys(): - if not username: - username = self.host_data[server_ip]['username'] - if not password: - password = self.host_data[server_ip]['password'] - with hide('everything'): - with settings( - host_string='%s@%s' % (username, server_ip), password=password, - warn_only=True, abort_on_prompts=False): - output = run('%s' % (issue_cmd), pty=pty) - return output - # end run_cmd_on_server - - def get_cores(self): - '''Get the list of cores in all the nodes in the test setup - ''' - self.cores = {} - for host in self.host_ips: - username = self.host_data[host]['username'] - password = self.host_data[host]['password'] - core = self.get_cores_node(host, username, password) - if core: - self.cores.update({host: core.split()}) - # end for - return self.cores - - def get_cores_node(self, node_ip, user, password): - """Get the list of cores in one of the nodes in the test setup. - """ - cores = {} - with hide('everything'): - with settings( - host_string='%s@%s' % (user, node_ip), password=password, - warn_only=True, abort_on_prompts=False): - with cd(CORE_DIR): - core = run("ls core.* 2>/dev/null") - return core - -# end - -# accept sanity_params.ini, report_details.ini, result.xml - - -def main(arg1): - obj = ContrailTestInit(arg1) - obj.setUp() - # obj.upload_to_webserver(arg2) - obj.get_cores() -if __name__ == "__main__": - main(sys.argv[1]) diff --git a/tools/send_mail.py b/tools/send_mail.py deleted file mode 100644 index d33f4c0a7..000000000 --- a/tools/send_mail.py +++ /dev/null @@ -1,55 +0,0 @@ -from email.mime.text import MIMEText -import smtplib -import subprocess -import ConfigParser -import sys -import os -from tcutils.util import read_config_option - -def send_mail(config_file, file_to_send, report_details): - config = ConfigParser.ConfigParser() - config.read(config_file) - report_config = ConfigParser.ConfigParser() - report_config.read(report_details) - distro_sku = report_config.get('Test','Distro_Sku') - smtpServer = read_config_option(config, 'Mail', 'server', '10.204.216.49') - smtpPort = read_config_option(config, 'Mail', 'port', '25') - mailSender = read_config_option(config, 'Mail', 'mailSender', 'contrailbuild@juniper.net') - mailTo = read_config_option(config, 'Mail', 'mailTo', 'contrail-build@juniper.net') - - if 'EMAIL_SUBJECT' in os.environ and os.environ['EMAIL_SUBJECT'] != '': - logScenario = os.environ.get('EMAIL_SUBJECT') - else: - logScenario = report_config.get('Test', 'logScenario') - - if not mailTo or not smtpServer: - print 'Mail destination not configured. Skipping' - return True - fp = open(file_to_send, 'rb') - msg = MIMEText(fp.read(), 'html') - fp.close() - - msg['Subject'] = '[Build %s] ' % ( - distro_sku) + logScenario + ' Report' - msg['From'] = mailSender - msg['To'] = mailTo - - s = None - try: - s = smtplib.SMTP(smtpServer, smtpPort) - except Exception, e: - print "Unable to connect to Mail Server" - return False - s.ehlo() - try: - s.sendmail(mailSender, mailTo.split(","), msg.as_string()) - s.quit() - except smtplib.SMTPException, e: - print 'Error while sending mail' - return False - return True -# end send_mail - -if __name__ == "__main__": - #send_mail('sanity_params.ini','report/junit-noframes.html') - send_mail(sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/tools/stop_on_fail.py b/tools/stop_on_fail.py deleted file mode 100644 index c97f1c753..000000000 --- a/tools/stop_on_fail.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. -# - -import sys -import argparse -import ConfigParser -from lxml import etree as ET - -class PassParcentageCalculator(object): - - def __init__(self, args_str=None): - self._args = None - if not args_str: - args_str = ' '.join(sys.argv[1:]) - self._parse_args(args_str) - - self.stop_on_failure() - - # end __init__ - - def get_test_count(self,doc): - return int(self.get_attr_from_xml(doc,'tests')) - # end get_test_count - - def get_failure_count(self,doc): - return int(self.get_attr_from_xml(doc,'failures')) - # end get_failure_count - - def get_attr_from_xml(self,doc,attr): - root = doc.getroot() - count = root.get(attr) - return count - # end get_attr_from_xml - - def calculate_pass_parcentage(self,files): - self.test_count = 0 - self.fail_count = 0 - for file in self._args.files: - doc = ET.parse(file) - self.test_count += self.get_test_count(doc) - self.fail_count += self.get_failure_count(doc) - try: - self.percentage=(float(self.fail_count)*100/float(self.test_count)) - except Exception as e: - print 'Probably division by 0' - self.percentage = 0 - # end calculate_pass_parcentage - - def stop_on_failure(self): - files = self._args.files - self.calculate_pass_parcentage(files) - if self.percentage >= int(self._args.threshold): - print 'Failed tests %s percent corssed the expected limit %s percent'%(str(self.percentage),str(self._args.threshold)) - sys.exit(1) - else: - print 'Failures within limit %s percent'%(str(self._args.threshold)) - sys.exit(0) - # end stop_on_failure - - def _parse_args(self, args_str): - ''' - Eg. python stop_on_fail.py - --files result.xml - --threshold 12 - ''' - # Source any specified config/ini file - # Turn off help, so we print all options in response to -h - conf_parser = argparse.ArgumentParser(add_help=False) - - conf_parser.add_argument("-c", "--conf_file", - help="Specify config file", metavar="FILE") - args, remaining_argv = conf_parser.parse_known_args(args_str.split()) - - defaults = { - 'files': 'result.xml', - 'threshold': '12', - } - - if args.conf_file: - config = ConfigParser.SafeConfigParser() - config.read([args.conf_file]) - defaults.update(dict(config.items("DEFAULTS"))) - - # Override with CLI options - # Don't surpress add_help here so it will handle -h - parser = argparse.ArgumentParser( - # Inherit options from config_parser - parents=[conf_parser], - # print script description with -h/--help - description=__doc__, - # Don't mess with format of description - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - parser.set_defaults(**defaults) - - parser.add_argument( - "--files",nargs='+' - ,help="Files to be checked for test failed counts") - parser.add_argument("--threshold", help="Percentage of tests expected to be failed") - - self._args = parser.parse_args(remaining_argv) - - # end _parse_args - -# end class PassParcentageCalculator - - -def main(args_str=None): - PassParcentageCalculator(args_str) -# end main - -if __name__ == "__main__": - main() diff --git a/tools/tor/cacert.pem b/tools/tor/cacert.pem deleted file mode 100644 index 307c4add1..000000000 --- a/tools/tor/cacert.pem +++ /dev/null @@ -1,70 +0,0 @@ -Certificate: - Data: - Version: 1 (0x0) - Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=CA, O=Open vSwitch, OU=switchca, CN=OVS switchca CA Certificate (2015 May 26 19:45:13) - Validity - Not Before: May 26 14:15:13 2015 GMT - Not After : May 23 14:15:13 2025 GMT - Subject: C=US, ST=CA, O=Open vSwitch, OU=switchca, CN=OVS switchca CA Certificate (2015 May 26 19:45:13) - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:cd:04:88:31:46:06:3f:a9:bf:6f:f3:4a:e7:f5: - 13:6c:62:57:7c:2a:1c:17:7e:79:b3:90:8e:06:70: - 98:a9:15:98:cc:2f:0e:32:ab:45:cc:4d:a9:af:62: - 7c:33:42:1d:48:09:da:37:0a:80:57:ef:78:19:fd: - 3e:8f:01:21:d0:fc:7e:23:fe:90:1e:7b:ee:6e:55: - de:3e:16:55:85:3f:61:18:8e:4e:19:21:43:db:ce: - 4b:9f:38:0b:60:58:02:7c:55:89:44:b6:53:6c:c0: - 40:35:a2:3a:ad:fc:a9:ff:ba:4f:f3:d1:59:d7:2c: - f2:1a:eb:eb:46:0f:f6:89:35:6e:30:e6:49:a8:82: - e9:29:6d:40:bc:22:1d:8d:f7:1e:5c:45:f2:ee:e4: - 7a:13:b9:8d:b1:e4:9c:75:b2:a8:3e:51:28:0a:d4: - 54:1f:69:1f:69:34:54:8a:d0:ec:98:f5:3f:de:84: - c7:79:6b:fa:42:7e:1f:35:ec:e8:8e:7a:96:19:44: - 20:2b:fb:f0:3f:c1:90:70:ef:33:4a:9b:81:13:9d: - 1a:6d:0c:30:b6:f7:5b:49:ee:8e:c4:07:97:2a:21: - 85:b4:25:14:29:78:74:89:fe:7e:d2:07:da:76:89: - 9b:93:a9:de:0c:49:2b:d3:17:96:cf:b2:9d:0e:61: - 94:ef - Exponent: 65537 (0x10001) - Signature Algorithm: sha1WithRSAEncryption - ba:be:0b:30:62:cd:07:b0:bd:81:67:59:5f:71:d4:f0:d9:4f: - ac:ee:b5:80:59:a7:52:c3:8a:ca:bd:70:88:57:3e:b8:49:a0: - 39:62:71:40:40:4b:a9:9a:d7:ee:9a:27:23:97:dc:61:e6:44: - 14:27:61:15:42:9a:7d:47:f3:12:81:e4:96:a7:6e:39:39:b3: - 70:15:a4:1a:0f:e3:92:f1:9b:80:2c:b3:24:1f:74:95:a8:22: - 43:62:52:15:d5:03:13:0f:53:76:76:11:59:bd:1d:6b:58:1d: - fd:d7:e2:86:80:db:fa:96:49:e4:a9:d1:04:43:ed:fe:c0:d2: - d4:52:63:d4:ba:1a:2c:c8:da:43:2a:35:49:98:de:0f:f1:60: - 6f:8d:65:6e:42:31:1d:4a:93:77:cf:62:7e:5b:66:84:7f:75: - c7:ac:22:65:76:0f:83:a8:33:10:e3:30:ff:80:37:7a:12:3e: - d3:28:dd:09:51:2a:35:9e:82:9d:a8:fb:c8:e9:2b:07:2c:fe: - bb:19:67:38:72:33:88:4d:fa:c6:61:76:50:64:e7:2e:7f:7c: - 65:2d:72:d6:11:65:d6:c5:16:94:99:89:ea:ea:7f:1c:e8:47: - dc:f0:1d:4e:6f:75:fb:6d:71:af:84:4a:c3:0c:23:15:04:4a: - c0:db:12:ec ------BEGIN CERTIFICATE----- -MIIDeDCCAmACAQEwDQYJKoZIhvcNAQEFBQAwgYExCzAJBgNVBAYTAlVTMQswCQYD -VQQIEwJDQTEVMBMGA1UEChMMT3BlbiB2U3dpdGNoMREwDwYDVQQLEwhzd2l0Y2hj -YTE7MDkGA1UEAxMyT1ZTIHN3aXRjaGNhIENBIENlcnRpZmljYXRlICgyMDE1IE1h -eSAyNiAxOTo0NToxMykwHhcNMTUwNTI2MTQxNTEzWhcNMjUwNTIzMTQxNTEzWjCB -gTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUwEwYDVQQKEwxPcGVuIHZTd2l0 -Y2gxETAPBgNVBAsTCHN3aXRjaGNhMTswOQYDVQQDEzJPVlMgc3dpdGNoY2EgQ0Eg -Q2VydGlmaWNhdGUgKDIwMTUgTWF5IDI2IDE5OjQ1OjEzKTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAM0EiDFGBj+pv2/zSuf1E2xiV3wqHBd+ebOQjgZw -mKkVmMwvDjKrRcxNqa9ifDNCHUgJ2jcKgFfveBn9Po8BIdD8fiP+kB577m5V3j4W -VYU/YRiOThkhQ9vOS584C2BYAnxViUS2U2zAQDWiOq38qf+6T/PRWdcs8hrr60YP -9ok1bjDmSaiC6SltQLwiHY33HlxF8u7kehO5jbHknHWyqD5RKArUVB9pH2k0VIrQ -7Jj1P96Ex3lr+kJ+HzXs6I56lhlEICv78D/BkHDvM0qbgROdGm0MMLb3W0nujsQH -lyohhbQlFCl4dIn+ftIH2naJm5Op3gxJK9MXls+ynQ5hlO8CAwEAATANBgkqhkiG -9w0BAQUFAAOCAQEAur4LMGLNB7C9gWdZX3HU8NlPrO61gFmnUsOKyr1wiFc+uEmg -OWJxQEBLqZrX7ponI5fcYeZEFCdhFUKafUfzEoHklqduOTmzcBWkGg/jkvGbgCyz -JB90lagiQ2JSFdUDEw9TdnYRWb0da1gd/dfihoDb+pZJ5KnRBEPt/sDS1FJj1Loa -LMjaQyo1SZjeD/Fgb41lbkIxHUqTd89ifltmhH91x6wiZXYPg6gzEOMw/4A3ehI+ -0yjdCVEqNZ6Cnaj7yOkrByz+uxlnOHIziE36xmF2UGTnLn98ZS1y1hFl1sUWlJmJ -6up/HOhH3PAdTm91+21xr4RKwwwjFQRKwNsS7A== ------END CERTIFICATE----- diff --git a/tools/tor/contrail-ovs-tool.sh b/tools/tor/contrail-ovs-tool.sh deleted file mode 100644 index a635d7176..000000000 --- a/tools/tor/contrail-ovs-tool.sh +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env bash -# Tool to bringup and manage openvswitch switches -# Currenlty supported on Ubuntu 14.04 only -# Usage Example: -# bash -x contrail-ovs-tool.sh --name br0 -t 10.204.216.195 -r ssl:10.204.216.184:6632 -p /root/sc-privkey.pem -c /root/sc-cert.pem -b /tmp/br0-cacert.pem -T init - -# Note : Only one instance of openvswitch is runnable on a node -# An attempt was made where multiple independent openvswitch could run on the same node -# The intention was to use the same node for multiple testbeds -# But once a second ovs-vswitchd/ovsdb-server was started, the bridge interfaces were -# getting removed from kernel and would never get added again -# Possibly, similar to http://openvswitch.org/pipermail/discuss/2013-April/009623.html -# - -ovs_path="/usr/share/openvswitch/" -function usage { - echo "Usage: $0 [OPTION]..." - echo "Setup openvswitch" - - echo "-n, --name Name of the openvswitch" - echo "-t, --tunnel-ip Tunnel IP " - echo "-R, --restart Restart ovs processes" - echo "-r, --remote Remote ip (ptcp/ssl connect string)" - echo "-p, --privkey private key file path" - echo "-c, --certprivkey cert for private key file path" - echo "-b, --bootstrap-ca-cert Bootstrap CA Cert file path" - echo "-T, --task one of stop, start, restart, init" - echo "" -} - - -if ! options=$(getopt -o hn:t:Rr:p:c:b:T: -l help,name:,tunnel-ip:,restart,remote:,privkey:,certprivkey:,bootstrap-ca-cert:task: -- "$@") -then - # parse error - usage - exit 1 -fi - -restart=0 -task="init_ovs" - -eval set -- $options -while [ $# -gt 0 ]; do - case "$1" in - -h|--help) usage; exit;; - -n|--name) name=$2; shift;; - -t|--tunnel-ip) tunnel_ip=$2; shift;; - -R|--restart) restart=1;; - -r|--remote) remote=$2; shift;; - -p|--privkey) privkey=$2; shift;; - -c|--certprivkey) certprivkey=$2; shift;; - -b|--bootstrap-ca-cert) bootstrap_ca=$2; shift;; - -T|--task) task=$2; shift;; - esac - shift -done - -echo "remote : $remote" -echo "privkey: $privkey" -echo "pubkey: $certprivkey" -echo "Boostrap-ca-cert : $bootstrap_ca" -echo "tunnel ip : $tunnel_ip" -echo "name : $name" - -function die -{ - local message=$1 - [ -z "$message" ] && message="Died" - echo "${BASH_SOURCE[1]}: line ${BASH_LINENO[0]}: ${FUNCNAME[1]}: $message." >&2 - exit 1 -} - -function add_repo { - repo_string="deb http://ppa.launchpad.net/vshn/openvswitch/ubuntu trusty main" - grep -q "$repo_string" /etc/apt/sources.list || (echo "$repo_string" >> /etc/apt/sources.list && apt-get update) -} - -function install_openvswitch { - apt-get -y --force-yes install openvswitch-switch openvswitch-common openvswitch-vtep openvswitch-datapath-dkms python-openvswitch || die "Unable to install openvswitch" -} - -function start_vswitch_procs { - cmd_args="" - if test -n $privkey; then - cmd_args=$cmd_args" -p "$privkey - fi - if test -n $certprivkey; then - cmd_args=$cmd_args" -c "$certprivkey - fi - if test -n $bootstrap_ca; then - cmd_args=$cmd_args" --bootstrap-ca-cert="$bootstrap_ca - fi - ovsdb-server --pidfile=/var/run/openvswitch/ovsdb-server-${name}.pid --detach --log-file=/var/log/openvswitch/ovsdb-server-${name}.log -vinfo --remote=punix:/var/run/openvswitch/db-${name}.sock --remote=db:hardware_vtep,Global,managers --remote=$remote $cmd_args /etc/openvswitch/ovs-${name}.db /etc/openvswitch/vtep-${name}.db - common_arg=" --db unix:/var/run/openvswitch/db-${name}.sock " - #ovs-vsctl $common_arg set-controller $name punix:/var/run/openvswitch/${name}.controller - sleep 5 - ovs-vswitchd --log-file=/var/log/openvswitch/ovs-vswitchd-${name}.log -vinfo --pidfile=ovs-vswitchd-${name}.pid unix:/var/run/openvswitch/db-${name}.sock --detach - sleep 5 - ovs-vsctl $common_arg add-br $name - ifconfig $name up - vtep-ctl $common_arg add-ps $name - vtep-ctl $common_arg set Physical_Switch $name tunnel_ips=$tunnel_ip - python $ovs_path/scripts/ovs-vtep $common_arg --log-file=/var/log/openvswitch/ovs-vtep-${name}.log -v info --pidfile=/var/run/openvswitch/ovs-vtep-${name}.pid --detach $name -} - -function stop_vswitch_procs { - # Stop ovsdb-server - pid_folder="/var/run/openvswitch" - pkill -f ovs-${name}.db - rm -f $pid_folder/ovsdb-server-${name}.pid - # Stop ovs-vswitchd - pkill -f db-${name}.sock - rm -f $pid_folder/ovs-vswitchd-${name}.pid - # Stop ovs-vtep - pkill -f ovs-vtep-${name}.pid - rm -f $pid_folder/ovs-vtep-${name}.pid - service openvswitch-switch stop - sleep 2 -} - -function setup_openvswitch { - stop_vswitch_procs - rm -f /etc/openvswitch/ovs-${name}*.db /etc/openvswitch/vtep-${name}.db - ovsdb-tool create /etc/openvswitch/ovs-${name}.db $ovs_path/vswitch.ovsschema - ovsdb-tool create /etc/openvswitch/vtep-${name}.db $ovs_path/vtep.ovsschema - - start_vswitch_procs - -} - -function check_supported_platform { - if [ -f /etc/lsb-release ]; then - grep -q "14.04" /etc/lsb-release || die "Supported only on Ubuntu 14.04 " - else - die "Supported only on Ubuntu 14.04 " - fi -} - -function init_ovs { - check_supported_platform - add_repo - echo "manual" > /etc/init/openvswitch-vswitch.override - install_openvswitch - setup_openvswitch -} - -function restart_ovs { - stop_vswitch_procs - start_vswitch_procs -} - -function stop_ovs { - stop_vswitch_procs -} - -function start_ovs { - start_vswitch_procs -} - - -case $task in - "init") init_ovs - ;; - "restart") restart_ovs - ;; - "stop") stop_ovs - ;; - "start") start_ovs - ;; -esac diff --git a/tools/tor/ovs-vtep b/tools/tor/ovs-vtep deleted file mode 100755 index 260d5fe21..000000000 --- a/tools/tor/ovs-vtep +++ /dev/null @@ -1,531 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2013 Nicira, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Limitations: -# - Doesn't support multicast other than "unknown-dst" - -import argparse -import re -import subprocess -import sys -import time -import types - -import ovs.dirs -import ovs.util -import ovs.daemon -import ovs.unixctl.server -import ovs.vlog - - -VERSION = "0.99" - -root_prefix = "" -db = "" - -__pychecker__ = 'no-reuseattr' # Remove in pychecker >= 0.8.19. -vlog = ovs.vlog.Vlog("ovs-vtep") -exiting = False - -Tunnel_Ip = "" -Lswitches = {} -Bindings = {} -ls_count = 0 -tun_id = 0 - -def call_prog(prog, args_list): - cmd = [prog, "-vconsole:off"] + args_list - output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate() - if len(output) == 0 or output[0] == None: - output = "" - else: - output = output[0].strip() - return output - -def ovs_vsctl(args): - if db: - args = ' --db=%s ' % (db) + args - return call_prog("ovs-vsctl", args.split()) - -def ovs_ofctl(args): - return call_prog("ovs-ofctl", args.split()) - -def vtep_ctl(args): - if db: - args = ' --db=%s ' % (db) + args - return call_prog("vtep-ctl", args.split()) - - -def unixctl_exit(conn, unused_argv, unused_aux): - global exiting - exiting = True - conn.reply(None) - - -class Logical_Switch(object): - def __init__(self, ls_name): - global ls_count - self.name = ls_name - ls_count += 1 - self.short_name = "vtep_ls" + str(ls_count) - vlog.info("creating lswitch %s (%s)" % (self.name, self.short_name)) - self.ports = {} - self.tunnels = {} - self.local_macs = set() - self.remote_macs = {} - self.unknown_dsts = set() - self.tunnel_key = 0 - self.setup_ls() - - def __del__(self): - vlog.info("destroying lswitch %s" % self.name) - - def setup_ls(self): - column = vtep_ctl("--columns=tunnel_key find logical_switch " - "name=%s" % self.name) - tunnel_key = column.partition(":")[2].strip() - if (tunnel_key and type(eval(tunnel_key)) == types.IntType): - self.tunnel_key = tunnel_key - vlog.info("using tunnel key %s in %s" - % (self.tunnel_key, self.name)) - else: - self.tunnel_key = 0 - vlog.warn("invalid tunnel key for %s, using 0" % self.name) - - ovs_vsctl("--may-exist add-br %s" % self.short_name) - ovs_vsctl("br-set-external-id %s vtep_logical_switch true" - % self.short_name) - ovs_vsctl("br-set-external-id %s logical_switch_name %s" - % (self.short_name, self.name)) - - vtep_ctl("clear-local-macs %s" % self.name) - vtep_ctl("add-mcast-local %s unknown-dst %s" % (self.name, Tunnel_Ip)) - - ovs_ofctl("del-flows %s" % self.short_name) - ovs_ofctl("add-flow %s priority=0,action=drop" % self.short_name) - - def update_flood(self): - flood_ports = self.ports.values() - - # Traffic flowing from one 'unknown-dst' should not be flooded to - # port belonging to another 'unknown-dst'. - for tunnel in self.unknown_dsts: - port_no = self.tunnels[tunnel][0] - ovs_ofctl("add-flow %s table=1,priority=1,in_port=%s,action=%s" - % (self.short_name, port_no, ",".join(flood_ports))) - - # Traffic coming from a VTEP physical port should only be flooded to - # one 'unknown-dst' and to all other physical ports that belong to that - # VTEP device and this logical switch. - for tunnel in self.unknown_dsts: - port_no = self.tunnels[tunnel][0] - flood_ports.append(port_no) - break - - ovs_ofctl("add-flow %s table=1,priority=0,action=%s" - % (self.short_name, ",".join(flood_ports))) - - def add_lbinding(self, lbinding): - vlog.info("adding %s binding to %s" % (lbinding, self.name)) - port_no = ovs_vsctl("get Interface %s ofport" % lbinding) - self.ports[lbinding] = port_no - ovs_ofctl("add-flow %s in_port=%s,action=learn(table=1," - "priority=1000,idle_timeout=300,cookie=0x5000," - "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," - "output:NXM_OF_IN_PORT[]),resubmit(,1)" - % (self.short_name, port_no)) - - self.update_flood() - - def del_lbinding(self, lbinding): - vlog.info("removing %s binding from %s" % (lbinding, self.name)) - port_no = self.ports[lbinding] - ovs_ofctl("del-flows %s in_port=%s" % (self.short_name, port_no)); - del self.ports[lbinding] - self.update_flood() - - def add_tunnel(self, tunnel): - global tun_id - vlog.info("adding tunnel %s" % tunnel) - encap, ip = tunnel.split("/") - - if encap != "vxlan_over_ipv4": - vlog.warn("unsupported tunnel format %s" % encap) - return - - tun_id += 1 - tun_name = "vx" + str(tun_id) - - ovs_vsctl("add-port %s %s -- set Interface %s type=vxlan " - "options:key=%s options:remote_ip=%s" - % (self.short_name, tun_name, tun_name, self.tunnel_key, ip)) - - for i in range(10): - port_no = ovs_vsctl("get Interface %s ofport" % tun_name) - if port_no != "-1": - break - elif i == 9: - vlog.warn("couldn't create tunnel %s" % tunnel) - ovs_vsctl("del-port %s %s" % (self.short_name, tun_name)) - return - - # Give the system a moment to allocate the port number - time.sleep(0.5) - - self.tunnels[tunnel] = (port_no, tun_name) - - ovs_ofctl("add-flow %s table=0,priority=1000,in_port=%s," - "actions=resubmit(,1)" - % (self.short_name, port_no)) - - def del_tunnel(self, tunnel): - vlog.info("removing tunnel %s" % tunnel) - - port_no, tun_name = self.tunnels[tunnel] - ovs_ofctl("del-flows %s table=0,in_port=%s" - % (self.short_name, port_no)) - ovs_vsctl("del-port %s %s" % (self.short_name, tun_name)) - - del self.tunnels[tunnel] - - def update_local_macs(self): - flows = ovs_ofctl("dump-flows %s cookie=0x5000/-1,table=1" - % self.short_name).splitlines() - macs = set() - for f in flows: - mac = re.split(r'.*dl_dst=(.*) .*', f) - if len(mac) == 3: - macs.add(mac[1]) - - for mac in macs.difference(self.local_macs): - vlog.info("adding local ucast %s to %s" % (mac, self.name)) - vtep_ctl("add-ucast-local %s %s %s" % (self.name, mac, Tunnel_Ip)) - - for mac in self.local_macs.difference(macs): - vlog.info("removing local ucast %s from %s" % (mac, self.name)) - vtep_ctl("del-ucast-local %s %s" % (self.name, mac)) - - self.local_macs = macs - - def add_remote_mac(self, mac, tunnel): - port_no = self.tunnels.get(tunnel, (0,""))[0] - if not port_no: - return - - ovs_ofctl("add-flow %s table=1,priority=1000,dl_dst=%s,action=%s" - % (self.short_name, mac, port_no)) - - def del_remote_mac(self, mac): - ovs_ofctl("del-flows %s table=1,dl_dst=%s" % (self.short_name, mac)) - - def update_remote_macs(self): - remote_macs = {} - unknown_dsts = set() - tunnels = set() - parse_ucast = True - - mac_list = vtep_ctl("list-remote-macs %s" % self.name).splitlines() - for line in mac_list: - if (line.find("mcast-mac-remote") != -1): - parse_ucast = False - continue - - entry = re.split(r' (.*) -> (.*)', line) - if len(entry) != 4: - continue - - if parse_ucast: - remote_macs[entry[1]] = entry[2] - else: - if entry[1] != "unknown-dst": - continue - - unknown_dsts.add(entry[2]) - - tunnels.add(entry[2]) - - old_tunnels = set(self.tunnels.keys()) - - for tunnel in tunnels.difference(old_tunnels): - self.add_tunnel(tunnel) - - for tunnel in old_tunnels.difference(tunnels): - self.del_tunnel(tunnel) - - for mac in remote_macs.keys(): - if (self.remote_macs.get(mac) != remote_macs[mac]): - self.add_remote_mac(mac, remote_macs[mac]) - - for mac in self.remote_macs.keys(): - if not remote_macs.has_key(mac): - self.del_remote_mac(mac) - - self.remote_macs = remote_macs - - if (self.unknown_dsts != unknown_dsts): - self.unknown_dsts = unknown_dsts - self.update_flood() - - def update_stats(self): - # Map Open_vSwitch's "interface:statistics" to columns of - # vtep's logical_binding_stats. Since we are using the 'interface' from - # the logical switch to collect stats, packets transmitted from it - # is received in the physical switch and vice versa. - stats_map = {'tx_packets':'packets_to_local', - 'tx_bytes':'bytes_to_local', - 'rx_packets':'packets_from_local', - 'rx_bytes':'bytes_from_local'} - - # Go through all the logical switch's interfaces that end with "-l" - # and copy the statistics to logical_binding_stats. - for interface in self.ports.iterkeys(): - if not interface.endswith("-l"): - continue - vlan, pp_name, logical = interface.split("-") - uuid = vtep_ctl("get physical_port %s vlan_stats:%s" - % (pp_name, vlan)) - if not uuid: - continue - - for (mapfrom, mapto) in stats_map.iteritems(): - value = ovs_vsctl("get interface %s statistics:%s" - % (interface, mapfrom)).strip('"') - vtep_ctl("set logical_binding_stats %s %s=%s" - % (uuid, mapto, value)) - - def run(self): - self.update_local_macs() - self.update_remote_macs() - self.update_stats() - -def add_binding(ps_name, binding, ls): - vlog.info("adding binding %s" % binding) - - vlan, pp_name = binding.split("-") - pbinding = binding+"-p" - lbinding = binding+"-l" - - # Create a patch port that connects the VLAN+port to the lswitch. - # Do them as two separate calls so if one side already exists, the - # other side is created. - ovs_vsctl("add-port %s %s " - " -- set Interface %s type=patch options:peer=%s" - % (ps_name, pbinding, pbinding, lbinding)) - ovs_vsctl("add-port %s %s " - " -- set Interface %s type=patch options:peer=%s" - % (ls.short_name, lbinding, lbinding, pbinding)) - - port_no = ovs_vsctl("get Interface %s ofport" % pp_name) - patch_no = ovs_vsctl("get Interface %s ofport" % pbinding) - vlan_ = vlan.lstrip('0') - if vlan_: - ovs_ofctl("add-flow %s in_port=%s,dl_vlan=%s,action=strip_vlan,%s" - % (ps_name, port_no, vlan_, patch_no)) - ovs_ofctl("add-flow %s in_port=%s,action=mod_vlan_vid:%s,%s" - % (ps_name, patch_no, vlan_, port_no)) - else: - ovs_ofctl("add-flow %s in_port=%s,action=%s" - % (ps_name, port_no, patch_no)) - ovs_ofctl("add-flow %s in_port=%s,action=%s" - % (ps_name, patch_no, port_no)) - - # Create a logical_bindings_stats record. - if not vlan_: - vlan_ = "0" - vtep_ctl("set physical_port %s vlan_stats:%s=@stats --\ - --id=@stats create logical_binding_stats packets_from_local=0"\ - % (pp_name, vlan_)) - - ls.add_lbinding(lbinding) - Bindings[binding] = ls.name - -def del_binding(ps_name, binding, ls): - vlog.info("removing binding %s" % binding) - - vlan, pp_name = binding.split("-") - pbinding = binding+"-p" - lbinding = binding+"-l" - - port_no = ovs_vsctl("get Interface %s ofport" % pp_name) - patch_no = ovs_vsctl("get Interface %s ofport" % pbinding) - vlan_ = vlan.lstrip('0') - if vlan_: - ovs_ofctl("del-flows %s in_port=%s,dl_vlan=%s" - % (ps_name, port_no, vlan_)) - ovs_ofctl("del-flows %s in_port=%s" % (ps_name, patch_no)) - else: - ovs_ofctl("del-flows %s in_port=%s" % (ps_name, port_no)) - ovs_ofctl("del-flows %s in_port=%s" % (ps_name, patch_no)) - - ls.del_lbinding(lbinding) - - # Destroy the patch port that connects the VLAN+port to the lswitch - ovs_vsctl("del-port %s %s -- del-port %s %s" - % (ps_name, pbinding, ls.short_name, lbinding)) - - # Remove the record that links vlan with stats in logical_binding_stats. - vtep_ctl("remove physical_port %s vlan_stats %s" % (pp_name, vlan)) - - del Bindings[binding] - -def handle_physical(ps_name): - # Gather physical ports except the patch ports we created - ovs_ports = ovs_vsctl("list-ports %s" % ps_name).split() - ovs_port_set = set([port for port in ovs_ports if port[-2:] != "-p"]) - - vtep_pp_set = set(vtep_ctl("list-ports %s" % ps_name).split()) - - for pp_name in ovs_port_set.difference(vtep_pp_set): - vlog.info("adding %s to %s" % (pp_name, ps_name)) - vtep_ctl("add-port %s %s" % (ps_name, pp_name)) - - for pp_name in vtep_pp_set.difference(ovs_port_set): - vlog.info("deleting %s from %s" % (pp_name, ps_name)) - vtep_ctl("del-port %s %s" % (ps_name, pp_name)) - - new_bindings = set() - for pp_name in vtep_pp_set: - binding_set = set(vtep_ctl("list-bindings %s %s" - % (ps_name, pp_name)).splitlines()) - - for b in binding_set: - vlan, ls_name = b.split() - if ls_name not in Lswitches: - Lswitches[ls_name] = Logical_Switch(ls_name) - - binding = "%s-%s" % (vlan, pp_name) - ls = Lswitches[ls_name] - new_bindings.add(binding) - - if Bindings.has_key(binding): - if Bindings[binding] == ls_name: - continue - else: - del_binding(ps_name, binding, Lswitches[Bindings[binding]]) - - add_binding(ps_name, binding, ls) - - - dead_bindings = set(Bindings.keys()).difference(new_bindings) - for binding in dead_bindings: - ls_name = Bindings[binding] - ls = Lswitches[ls_name] - - del_binding(ps_name, binding, ls) - - if not len(ls.ports): - ovs_vsctl("del-br %s" % Lswitches[ls_name].short_name) - del Lswitches[ls_name] - -def setup(ps_name): - br_list = ovs_vsctl("list-br").split() - if (ps_name not in br_list): - ovs.util.ovs_fatal(0, "couldn't find OVS bridge %s" % ps_name, vlog) - - vtep_ctl('set physical_switch %s description="OVS VTEP Emulator"' % (ps_name)) - tunnel_ips = vtep_ctl("get physical_switch %s tunnel_ips" - % ps_name).strip('[]"').split(", ") - if len(tunnel_ips) != 1 or not tunnel_ips[0]: - ovs.util.ovs_fatal(0, "exactly one 'tunnel_ips' should be set", vlog) - - global Tunnel_Ip - Tunnel_Ip = tunnel_ips[0] - - ovs_ofctl("del-flows %s" % ps_name) - - # Remove any logical bridges from the previous run - for br in br_list: - if ovs_vsctl("br-get-external-id %s vtep_logical_switch" - % br) == "true": - # Remove the remote side of any logical switch - ovs_ports = ovs_vsctl("list-ports %s" % br).split() - for port in ovs_ports: - port_type = ovs_vsctl("get Interface %s type" - % port).strip('"') - if port_type != "patch": - continue - - peer = ovs_vsctl("get Interface %s options:peer" - % port).strip('"') - if (peer): - ovs_vsctl("del-port %s" % peer) - - ovs_vsctl("del-br %s" % br) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("ps_name", metavar="PS-NAME", - help="Name of physical switch.") - parser.add_argument("--root-prefix", metavar="DIR", - help="Use DIR as alternate root directory" - " (for testing).") - parser.add_argument("--version", action="version", - version="%s %s" % (ovs.util.PROGRAM_NAME, VERSION)) - parser.add_argument("--db", metavar="FILE", - help="Database connection method" - "Example : unix:/var/run/openvswitch/db-br0.sock") - - ovs.vlog.add_args(parser) - ovs.daemon.add_args(parser) - args = parser.parse_args() - ovs.vlog.handle_args(args) - ovs.daemon.handle_args(args) - - global root_prefix - global db - if args.root_prefix: - root_prefix = args.root_prefix - if args.db: - db = args.db - - ps_name = args.ps_name - - ovs.daemon.daemonize() - - ovs.unixctl.command_register("exit", "", 0, 0, unixctl_exit, None) - error, unixctl = ovs.unixctl.server.UnixctlServer.create(None, - version=VERSION) - if error: - ovs.util.ovs_fatal(error, "could not create unixctl server", vlog) - - setup(ps_name) - - while True: - unixctl.run() - if exiting: - break - - handle_physical(ps_name) - - for ls_name, ls in Lswitches.items(): - ls.run() - - poller = ovs.poller.Poller() - unixctl.wait(poller) - poller.timer_wait(1000) - poller.block() - - unixctl.close() - -if __name__ == '__main__': - try: - main() - except SystemExit: - # Let system.exit() calls complete normally - raise - except: - vlog.exception("traceback") - sys.exit(ovs.daemon.RESTART_EXIT_CODE) diff --git a/tools/tor/sc-cert.pem b/tools/tor/sc-cert.pem deleted file mode 100644 index b9a5d0a4d..000000000 --- a/tools/tor/sc-cert.pem +++ /dev/null @@ -1,70 +0,0 @@ -Certificate: - Data: - Version: 1 (0x0) - Serial Number: 3 (0x3) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=CA, O=Open vSwitch, OU=switchca, CN=OVS switchca CA Certificate (2015 May 26 19:45:13) - Validity - Not Before: May 26 14:19:05 2015 GMT - Not After : May 23 14:19:05 2025 GMT - Subject: C=US, ST=CA, O=Open vSwitch, OU=Open vSwitch certifier, CN=sc id:bd335593-9da1-41c7-a216-486dae463d61 - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:d7:dc:be:4b:4d:2f:3a:52:f4:88:89:a9:dd:6e: - a1:b1:2d:34:fb:59:a5:43:d9:06:be:6c:ef:a7:4e: - 7b:14:f9:18:d1:48:7c:b0:00:aa:77:8e:93:f4:a7: - 1c:a7:a7:89:40:da:27:17:19:99:8f:7f:91:f6:4c: - c5:a1:92:55:12:66:7d:23:00:54:2a:8b:b9:9b:26: - 96:d0:88:eb:ab:42:27:d5:fa:89:14:20:61:64:0b: - dc:7d:ac:34:38:71:f1:95:bb:c2:06:5a:8b:c1:69: - 56:b3:67:7f:1c:72:f5:48:0f:78:bf:70:a6:b2:6a: - a8:99:50:3f:38:4e:a6:cc:d5:91:27:d4:5a:f6:b4: - 5e:bc:e8:5a:25:89:f4:68:c9:a7:a9:40:16:f4:38: - 5d:76:13:0d:04:16:f6:72:05:d2:42:c2:58:05:9d: - f4:a5:84:a2:19:ec:84:4c:15:49:e0:21:81:7d:d4: - 97:24:69:34:1b:40:fb:7b:2f:f0:9e:c9:1d:cc:44: - 18:4f:bb:cb:4f:51:05:b2:2b:ed:34:a6:a7:e7:7c: - 9b:15:d2:dc:b9:5f:e8:84:fd:d4:e1:7f:ba:80:ec: - 2f:39:be:d5:2b:e3:4e:39:d8:9e:66:82:bd:fe:7f: - 7d:18:53:f5:f9:99:6c:08:09:6b:a4:5d:30:28:56: - e6:ed - Exponent: 65537 (0x10001) - Signature Algorithm: sha1WithRSAEncryption - 98:ac:70:73:75:cd:a9:31:5c:a0:9d:a6:57:5f:9b:82:c4:d3: - 13:44:d3:dc:8a:01:0b:a1:13:03:d3:a2:65:3f:1a:6f:07:79: - d8:e5:bb:72:64:03:fc:ee:ce:a7:5e:36:e1:99:e2:8a:46:97: - 0e:0f:b3:ca:c1:81:d4:eb:f7:f0:df:92:ca:4c:88:ad:82:1e: - 98:1f:7d:f1:20:42:5e:ad:18:ad:44:68:76:bb:f4:cf:15:92: - 19:d8:a0:7c:39:5e:96:07:21:ba:a7:66:a5:98:d0:fc:40:88: - 09:e9:9d:46:d3:a8:c8:92:b8:1c:d1:ba:7b:ff:45:85:fd:1e: - 03:4a:a6:88:72:dc:9d:93:2c:50:7d:b3:ee:db:98:c4:97:11: - 76:e5:3b:97:4d:c2:61:3a:f7:c3:2c:70:13:66:10:33:70:93: - de:b0:f7:be:31:9f:88:f2:42:40:e2:de:85:83:79:14:0c:d2: - 81:2d:33:a1:81:e0:52:b8:bc:31:48:e0:2f:06:b5:39:f9:50: - 6a:43:be:36:ff:e4:14:34:0a:d5:24:10:ba:b0:8e:10:01:da: - f8:44:f4:fd:5b:00:e8:b8:6e:a7:3a:9e:ac:dc:39:bf:dd:bb: - ab:35:70:08:2e:d3:c8:cf:82:d3:1c:63:8c:9e:fb:8c:ae:8d: - ff:d6:28:2e ------BEGIN CERTIFICATE----- -MIIDfjCCAmYCAQMwDQYJKoZIhvcNAQEFBQAwgYExCzAJBgNVBAYTAlVTMQswCQYD -VQQIEwJDQTEVMBMGA1UEChMMT3BlbiB2U3dpdGNoMREwDwYDVQQLEwhzd2l0Y2hj -YTE7MDkGA1UEAxMyT1ZTIHN3aXRjaGNhIENBIENlcnRpZmljYXRlICgyMDE1IE1h -eSAyNiAxOTo0NToxMykwHhcNMTUwNTI2MTQxOTA1WhcNMjUwNTIzMTQxOTA1WjCB -hzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUwEwYDVQQKEwxPcGVuIHZTd2l0 -Y2gxHzAdBgNVBAsTFk9wZW4gdlN3aXRjaCBjZXJ0aWZpZXIxMzAxBgNVBAMTKnNj -IGlkOmJkMzM1NTkzLTlkYTEtNDFjNy1hMjE2LTQ4NmRhZTQ2M2Q2MTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBANfcvktNLzpS9IiJqd1uobEtNPtZpUPZ -Br5s76dOexT5GNFIfLAAqneOk/SnHKeniUDaJxcZmY9/kfZMxaGSVRJmfSMAVCqL -uZsmltCI66tCJ9X6iRQgYWQL3H2sNDhx8ZW7wgZai8FpVrNnfxxy9UgPeL9wprJq -qJlQPzhOpszVkSfUWva0XrzoWiWJ9GjJp6lAFvQ4XXYTDQQW9nIF0kLCWAWd9KWE -ohnshEwVSeAhgX3UlyRpNBtA+3sv8J7JHcxEGE+7y09RBbIr7TSmp+d8mxXS3Llf -6IT91OF/uoDsLzm+1SvjTjnYnmaCvf5/fRhT9fmZbAgJa6RdMChW5u0CAwEAATAN -BgkqhkiG9w0BAQUFAAOCAQEAmKxwc3XNqTFcoJ2mV1+bgsTTE0TT3IoBC6ETA9Oi -ZT8abwd52OW7cmQD/O7Op1424ZniikaXDg+zysGB1Ov38N+SykyIrYIemB998SBC -Xq0YrURodrv0zxWSGdigfDlelgchuqdmpZjQ/ECICemdRtOoyJK4HNG6e/9Fhf0e -A0qmiHLcnZMsUH2z7tuYxJcRduU7l03CYTr3wyxwE2YQM3CT3rD3vjGfiPJCQOLe -hYN5FAzSgS0zoYHgUri8MUjgLwa1OflQakO+Nv/kFDQK1SQQurCOEAHa+ET0/VsA -6LhupzqerNw5v927qzVwCC7TyM+C0xxjjJ77jK6N/9YoLg== ------END CERTIFICATE----- diff --git a/tools/tor/sc-privkey.pem b/tools/tor/sc-privkey.pem deleted file mode 100644 index ff6e30b0a..000000000 --- a/tools/tor/sc-privkey.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA19y+S00vOlL0iImp3W6hsS00+1mlQ9kGvmzvp057FPkY0Uh8 -sACqd46T9Kccp6eJQNonFxmZj3+R9kzFoZJVEmZ9IwBUKou5myaW0Ijrq0In1fqJ -FCBhZAvcfaw0OHHxlbvCBlqLwWlWs2d/HHL1SA94v3CmsmqomVA/OE6mzNWRJ9Ra -9rRevOhaJYn0aMmnqUAW9DhddhMNBBb2cgXSQsJYBZ30pYSiGeyETBVJ4CGBfdSX -JGk0G0D7ey/wnskdzEQYT7vLT1EFsivtNKan53ybFdLcuV/ohP3U4X+6gOwvOb7V -K+NOOdieZoK9/n99GFP1+ZlsCAlrpF0wKFbm7QIDAQABAoIBAQCUAf9Ykmu3ifmx -RPL7qUekvGJ/lVAD3WMKXpdUGDO8ha/Gj/UumMZJg7FfJtY2/mDV3r6+0AbCpa3n -O2ynBDbgjvVucmNDaNqyzxrJkZhFrO6zgT1MV4dvmdJtOQ31Retnx32Z7au1ygxw -pp45W3ljF3iriwpBe3Bu7t7qNMOZ3fNj30lubQ4aAD4ipkgWeDBhCUU2g/PzGN8g -XyWEU2Utx8qwH2ZY36Jm2hBr8YrUR51Tq1hs+moD5nQcKJx/q9967T3PzRgDxpzm -EmloH21OVEvUNQa0L4hYa8H+12ZTojOl0QQpv4WxsM2XTTDoZez6k6k49dxIwn3P -o8vc7GfhAoGBAO+fK5OVzyzifx+k2NO8GFp1KcaJ90uuUod8+rJuQjo1tGhRBBpi -9RrOmfasApKavuGN1ULPHeA0ip81fGxQ0+HjCcCeFROfEyWswd7vCRcJvQuHoltl -n3Bs6/kSwqBoeUCunsf3SuFgbz8jsVkCQz9yfqk/hcwnVLsMiTqsHCO3AoGBAOad -1ju/M1ToDDUQDHI6ekxI85hhc4205taZq26Ea3EIA0TXgBC4wCOsgq9/4qPp5oMp -DftGpfA8e8uZcWHkoT4PWRojQWc98fEV3wGttMdXm/QjfckOiDPd+47n+tPv1nlG -x8J9ofQoRDtYuV/wqmmr2HfoEWgxG9YXBW826DJ7AoGBAK7dh11yi/gDaiXblOgf -cd5BdjUO5cry8gMx73lbATWdj4Q46zeeU6Q2dSEQMehuyTGs4Z6ZqwG9FhRNYflh -mt5TYwCP3G2vPrfVtL1Nz2JilvTdDFyKcqoGssXXLPUGiMXtKK5hBuImD8kR8RA1 -mIGqukgktS1g3nKKGXMZuodNAoGAKpMrEN6smFk6n7U1xwTavSieqBwdnCSwlR/L -kVyrBTQIfnYWc3/YIP3hJotCiSZOYxp6NVCGpAJWjHDoiL0Ps4hb18J4eZnrB6oV -AJdUD5Ux+nFu8GZZaNHrdieHnAbkHoGdsV9GPfANILOxjW+91kJedUyHoiemzvBz -FWQam+8CgYBsSzjikwDStwyo64vZZD5EawvxLkvodcOpZQrPKYrQQ409xwBOa74G -CFbWRnt1yZxjVQFDZdb7xKVguKA89mOZvYqoNBOCGdTYJv4eJMCiAnzWpOHGC2As -HiyrF5C22WXoxAx4ySIwv7wLGFtCevGjdmgUAPmoz7gzXZOkIbdMrw== ------END RSA PRIVATE KEY----- diff --git a/tools/tor/setup_tors.py b/tools/tor/setup_tors.py deleted file mode 100644 index 4b0bdc935..000000000 --- a/tools/tor/setup_tors.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import sys -import json -import ConfigParser -import ast - -from tor_fixture import ToRFixtureFactory -from physical_router_fixture import PhysicalRouterFixture -from common.contrail_test_init import ContrailTestInit - -if __name__ == "__main__": - init_obj = ContrailTestInit(sys.argv[1]) - init_obj.read_prov_file() - for (device, device_dict) in init_obj.physical_routers_data.iteritems(): - if device_dict['type'] == 'tor': - tor_obj = ToRFixtureFactory.get_tor( - device_dict['name'], - device_dict['mgmt_ip'], - vendor=device_dict['vendor'], - ssh_username=device_dict['ssh_username'], - ssh_password=device_dict['ssh_password'], - tunnel_ip=device_dict['tunnel_ip'], - ports=device_dict['ports'], - tor_ovs_port=device_dict['tor_ovs_port'], - tor_ovs_protocol=device_dict['tor_ovs_protocol'], - controller_ip=device_dict['controller_ip'], - bringup=True) - tor_obj.setUp() - if device_dict['type'] == 'router': - phy_router_obj = PhysicalRouterFixture( - device_dict['name'], device_dict['mgmt_ip'], - model=device_dict['model'], - vendor=device_dict['vendor'], - asn=device_dict['asn'], - ssh_username=device_dict['ssh_username'], - ssh_password=device_dict['ssh_password'], - mgmt_ip=device_dict['mgmt_ip'], - tunnel_ip=device_dict['tunnel_ip'], - ports=device_dict['ports'], - ) - phy_router_obj.setUp() - # end for - - - diff --git a/tools/update_testsuite_properties.py b/tools/update_testsuite_properties.py deleted file mode 100644 index af69f4454..000000000 --- a/tools/update_testsuite_properties.py +++ /dev/null @@ -1,107 +0,0 @@ -from email.mime.text import MIMEText -import smtplib -import subprocess -import ConfigParser -import xml.etree.ElementTree as ET -import sys - -def update_xml(config_file, xmlfile): - config = ConfigParser.ConfigParser() - config.read(config_file) - build_id = config.get('Test', 'Build') - timestamp = config.get('Test', 'timestamp') - report_loc = config.get('Test', 'Report') - topology = config.get('Test', 'Topology') - - result_tree = ET.parse(xmlfile) - ts_root = result_tree.getroot() - properties_elem = ET.Element('properties') - - try: - logs_location = config.get('Test', 'LogsLocation') - prop_elem = ET.Element('property') - prop_elem.set('name','LogsLocation') - prop_elem.set('value', logs_location) - properties_elem.append(prop_elem) - except ConfigParser.NoOptionError,e: - pass - try: - core_location = config.get('Test', 'CoreLocation') - prop_elem = ET.Element('property') - prop_elem.set('name','CoreLocation') - prop_elem.set('value', core_location) - properties_elem.append(prop_elem) - except ConfigParser.NoOptionError,e: - pass - - prop_elem = ET.Element('property') - prop_elem.set('name','Build') - prop_elem.set('value', build_id) - properties_elem.append(prop_elem) - - prop_elem = ET.Element('property') - prop_elem.set('name','Report') - prop_elem.set('value', report_loc) - properties_elem.append(prop_elem) - - prop_elem = ET.Element('property') - prop_elem.set('name','Topology') - prop_elem.set('value', topology) - properties_elem.append(prop_elem) - - try: - sm_pkg = config.get('Test', 'sm_pkg') - prop_elem = ET.Element('property') - prop_elem.set('name','sm_pkg') - prop_elem.set('value', sm_pkg) - properties_elem.append(prop_elem) - except ConfigParser.NoOptionError,e: - pass - - try: - contrail_pkg = config.get('Test', 'contrail_pkg') - prop_elem = ET.Element('property') - prop_elem.set('name','contrail_pkg') - prop_elem.set('value', contrail_pkg) - properties_elem.append(prop_elem) - except ConfigParser.NoOptionError,e: - pass - - try: - puppet_pkg = config.get('Test', 'puppet_pkg') - prop_elem = ET.Element('property') - prop_elem.set('name','puppet_pkg') - prop_elem.set('value', puppet_pkg) - properties_elem.append(prop_elem) - except ConfigParser.NoOptionError,e: - pass - - ts_root.append(properties_elem) - - try: - cores = config.get('Test', 'cores') - prop_elem = ET.Element('property') - prop_elem.set('name','cores') - prop_elem.set('value', cores) - properties_elem.append(prop_elem) - except ConfigParser.NoOptionError,e: - pass - try: - prop_elem = ET.Element('property') - prop_elem.set('name','BGP_STRESS_TEST_SUMMARY') - bgp_stress_test = config.get('Test', 'bgp stress test summary') - bgp_stress_test = "
".join(bgp_stress_test.split("\n")) - prop_elem.set('value', bgp_stress_test) - properties_elem.append(prop_elem) - except ConfigParser.NoOptionError,e: - pass - result_tree.write(xmlfile) -# end - - - -# end update_xml - -if __name__ == "__main__": - # accept report_details.ini, result.xml - update_xml(sys.argv[1], sys.argv[2]) diff --git a/tools/upload_to_webserver.py b/tools/upload_to_webserver.py deleted file mode 100644 index d1716a53e..000000000 --- a/tools/upload_to_webserver.py +++ /dev/null @@ -1,156 +0,0 @@ -import sys -from fabric.api import env, run , local -from fabric.operations import get, put -from fabric.context_managers import settings, hide -import os -import ConfigParser -import subprocess -from tcutils.util import read_config_option - -#monkey patch subprocess.check_output cos its not supported in 2.6 -if "check_output" not in dir( subprocess ): # duck punch it in! - def f(*popenargs, **kwargs): - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, it will be overridden.') - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise subprocess.CalledProcessError(retcode, cmd) - return output - subprocess.check_output = f - -def get_os_env(var, default=''): - if var in os.environ: - return os.environ.get(var) - else: - return default -# end get_os_env - -def upload_to_webserver(config_file, report_config_file, elem): - - jenkins_trigger = get_os_env('JENKINS_TRIGGERED') - config = ConfigParser.ConfigParser() - config.read(config_file) - web_server = read_config_option(config, 'WebServer', 'host', None) - web_server_report_path = read_config_option(config, 'WebServer', - 'reportPath', None) - web_server_log_path = read_config_option(config, 'WebServer', - 'logPath', None) - web_server_username = read_config_option(config, 'WebServer', 'username', - None) - web_server_password = read_config_option(config, 'WebServer', 'password', - None) - http_proxy = read_config_option(config, 'proxy', 'proxy_url', None) - - if not (web_server and web_server_report_path and web_server_log_path and \ - web_server_username and web_server_password): - print "Not all webserver details are available. Skipping upload." - return False - report_config = ConfigParser.ConfigParser() - report_config.read(report_config_file) - ts = report_config.get('Test', 'timestamp') - log_scenario = report_config.get('Test', 'logScenario') - build_id = report_config.get('Test', 'build') - distro_sku = report_config.get('Test','distro_sku') - branch = get_os_env('BRANCH', 'unknown-branch') - - test_type = get_os_env('TEST_TYPE','daily') - build_folder = build_id + '_' + ts - web_server_path = web_server_log_path + '/' + build_folder + '/' - - log = 'logs' - print "Web server log path %s"%web_server_path - - try: - with hide('everything'): - with settings(host_string=web_server, - user=web_server_username, - password=web_server_password, - warn_only=True, abort_on_prompts=False): - if jenkins_trigger: - # define report path - sanity_report = '%s/%s' % ( - web_server_report_path, test_type) - # report name in format - # email_subject_line+time_stamp - report_name = '%s %s' % (distro_sku.replace('"',''), - log_scenario) - report_file = "%s-%s.html" % ( - '-'.join(report_name.split(' ')), ts) - # create report path if doesnt exist - run('mkdir -p %s' % (sanity_report)) - # create folder by release name passed from jenkins - run('cd %s; mkdir -p %s' % - (sanity_report, branch)) - # create folder by build_number and create soft - # link to original report with custom name - run('cd %s/%s; mkdir -p %s; cd %s; ln -s %s/junit-noframes.html %s' - % (sanity_report, branch, build_id, build_id, - web_server_path, report_file)) - - if http_proxy: - # Assume ssl over http-proxy and use sshpass. - branch = build_id.split('-')[0] - subprocess.check_output( - "sshpass -p %s ssh %s@%s mkdir -p %s" % - (web_server_password, web_server_username, - web_server, web_server_path), - shell=True) - subprocess.check_output( - "sshpass -p %s scp %s %s@%s:%s" % - (web_server_password, elem, - web_server_username, web_server, - web_server_path), shell=True) - ci_job_type = os.environ.get('TAGS', None) - if 'ci_sanity_WIP' in ci_job_type: - web_server_path_ci = web_server_log_path + '/CI_WIP_JOBS/' - else: - web_server_path_ci = web_server_log_path + '/CI_JOBS/' - web_server_path_ci_build = web_server_path_ci + branch + '/' - web_server_path = web_server_path_ci_build + build_folder + '/' - subprocess.check_output( - "sshpass -p %s ssh %s@%s mkdir -p %s" % - (web_server_password, web_server_username, - web_server, web_server_path_ci), - shell=True) - subprocess.check_output( - "sshpass -p %s ssh %s@%s mkdir -p %s" % - (web_server_password, web_server_username, - web_server, web_server_path_ci_build), - shell=True) - subprocess.check_output( - "sshpass -p %s ssh %s@%s mkdir -p %s" % - (web_server_password, web_server_username, - web_server, web_server_path), - shell=True) - subprocess.check_output( - "sshpass -p %s scp -r /root/contrail-test/logs %s %s@%s:%s" % - (web_server_password, elem, - web_server_username, web_server, - web_server_path), shell=True) - else: - run('mkdir -p %s' % (web_server_path)) - output = put(elem, web_server_path) - put('logs', web_server_path) - put('result*.xml', web_server_path) - put(report_config_file, web_server_path) - if jenkins_trigger: - #run('cd %s/%s; mkdir -p %s; cd %s; ln -s %s/junit-noframes.html %s' - run('cd %s/%s; mkdir -p %s; cd %s; cp %s/%s .' - % (sanity_report, branch, build_id, build_id, - web_server_path, report_config_file)) - - except Exception,e: - print 'Error occured while uploading the logs to the Web Server ',e - return False - return True - -# end - -if __name__ == "__main__": - # accept sanity_params.ini, report_details.ini, result.xml - upload_to_webserver(sys.argv[1], sys.argv[2], sys.argv[3])