diff --git a/python/avi/migrationtools/ansible/ansible_config_converter.py b/python/avi/migrationtools/ansible/ansible_config_converter.py index 629d60bec6..28af1ceece 100644 --- a/python/avi/migrationtools/ansible/ansible_config_converter.py +++ b/python/avi/migrationtools/ansible/ansible_config_converter.py @@ -232,7 +232,9 @@ def build_ansible_objects(self, obj_type, objs, ansible_dict, inuse_list): Returns Ansible dict """ + for obj in objs: + task = deepcopy(obj) # Added tag for checking object ref. if isinstance(task, str): @@ -499,6 +501,7 @@ def generate_traffic(self, ansible_dict, f5server, f5username, f5password, def write_ansible_playbook(self, f5server=None, f5user=None, f5password=None, instance_type=None): + """ Create the ansible playbook based on output json :param f5server: Ip of f5 server @@ -514,7 +517,7 @@ def write_ansible_playbook(self, f5server=None, f5user=None, % self.outdir # Get the reference object list for not_in_use tag. inuse_list = [] - if not self.not_in_use: + if 'VirtualService' in self.avi_cfg and not self.not_in_use: inuse_list = filter_for_vs(self.avi_cfg) ad = deepcopy(ansible_dict) generate_traffic_dict = deepcopy(ansible_dict) diff --git a/python/avi/migrationtools/avi_converter.py b/python/avi/migrationtools/avi_converter.py index 1be740c487..237b7dd164 100644 --- a/python/avi/migrationtools/avi_converter.py +++ b/python/avi/migrationtools/avi_converter.py @@ -24,6 +24,7 @@ class AviConverter(object): user = None password = None tenant = None + prefix = None def print_pip_and_controller_version(self): pass @@ -45,7 +46,7 @@ def process_for_utils(self, avi_config): avi_config = cp.patch() # Check if vs_filter args present then execute vs_filter.py with args if self.vs_filter: - avi_config = filter_for_vs(avi_config, self.vs_filter) + avi_config = filter_for_vs(avi_config, self.vs_filter, self.prefix) return avi_config def upload_config_to_controller(self, avi_config): diff --git a/python/avi/migrationtools/avi_migration_utils.py b/python/avi/migrationtools/avi_migration_utils.py index b811160cf7..657a69c7c2 100644 --- a/python/avi/migrationtools/avi_migration_utils.py +++ b/python/avi/migrationtools/avi_migration_utils.py @@ -1,13 +1,16 @@ # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: Apache License 2.0 +import pandas import argparse +import ast import copy import getpass import logging import os import random import string +from functools import reduce from urllib.parse import urlparse, parse_qs from datetime import datetime from socket import gethostname @@ -16,6 +19,7 @@ import pexpect import yaml from OpenSSL import crypto +from openpyxl import load_workbook, Workbook import avi.migrationtools.f5_converter.converter_constants as conv_const @@ -27,6 +31,7 @@ warning_count = 0 error_count = 0 + def set_update_count(): global warning_count, error_count warning_count = 0 @@ -40,12 +45,13 @@ def update_count(type='warning'): elif type == 'error': error_count += 1 + def get_count(type='None'): if type == 'warning': return warning_count elif type == 'error': return error_count - return { 'warning': warning_count, 'error': error_count } + return {'warning': warning_count, 'error': error_count} class PasswordPromptAction(argparse.Action): @@ -58,6 +64,79 @@ def __call__(self, parser, args, values, option_string=None): class MigrationUtil(object): + def add_conv_status(self, **args): + pass + + def add_status_row(self, **args): + pass + + def get_conv_status(self, skipped, indirect_list, ignore_dict, input_object, + user_ignore=None, na_list=None): + """ + Update skipped list for conversion status + :param skipped: All skipped attributes after conversion + :param indirect_list: List of attrs to be mapped as indirect mapping + :param ignore_dict: Dict of default values for column skipped for defaults + :param input_object: Currant input object + :param user_ignore: List of attributes user wants not to be shown in skipped + :param na_list: List of attributes marked as not applicable + :return: Conversion status dict + """ + conv_status = dict() + user_ignore = [] if not user_ignore else user_ignore + na_list = [] if not na_list else na_list + + conv_status['user_ignore'] = [val for val in skipped if + val in user_ignore] + skipped = [attr for attr in skipped if attr not in user_ignore] + + conv_status['indirect'] = [val for val in skipped if + val in indirect_list] + skipped = [attr for attr in skipped if attr not in indirect_list] + + conv_status['na_list'] = [val for val in skipped if val in na_list] + skipped = [attr for attr in skipped if attr not in na_list] + + default_skip = [] + for key in ignore_dict.keys(): + val = input_object.get(key) + default_val = ignore_dict.get(key) + if key in skipped and val == default_val: + default_skip.append(key) + if default_skip: + skipped = [attr for attr in skipped if attr not in default_skip] + + conv_status['skipped'] = skipped + conv_status['default_skip'] = default_skip + if skipped: + status = conv_const.STATUS_PARTIAL + else: + status = conv_const.STATUS_SUCCESSFUL + conv_status['status'] = status + return conv_status + + def get_tenant_ref(self, name): + tenant = 'admin' + if name and name.startswith('/'): + parts = name.split('/', 2) + tenant = parts[1] + if not parts[2]: + LOG.warning('Invalid tenant ref : %s' % name) + name = parts[2] + elif name and '/' in name: + parts = name.split('/') + # Changed the index to get the tenant and name in case of + # prefixed name + tenant = parts[-2] + name = parts[-1] + if tenant.lower() == 'common': + tenant = 'admin' + if '/' in name: + name = name.split('/')[1] + if ' ' in tenant: + tenant = tenant.split(' ')[-1] + tenant.strip() + return tenant, name def create_self_signed_cert(self): # create a key pair @@ -96,6 +175,239 @@ def is_certificate_key_protected(self, key_file): except Exception as e: return False + def get_pool_skipped_list(self, avi_config, pool_group_name, csv_pool_rows, + csv_writer_dict_list, vs_ref, profile_csv_list): + """ + This method is used for getting pool skipped list. + :param avi_config: AVI dict + :param pool_group_name: Name of Pool group + :param csv_pool_rows: List of pool(NsxT type) csv rows + :param csv_writer_dict_list: List of nsxt csv rows + :param vs_ref: Name of VS + :param profile_csv_list: List of profile(NsxT type) csv rows + :return: + """ + pool_group_objects = list(filter(lambda pg: pg["name"] == pool_group_name, avi_config['PoolGroup'])) + pool_members = pool_group_objects[0]['members'] + skipped_setting = { + 'pools': [] + } + for pool_member in pool_members: + pool_name = self.get_name(pool_member['pool_ref']) + self.get_skipped_pool( + avi_config, pool_name, csv_pool_rows, csv_writer_dict_list, + vs_ref, profile_csv_list, skipped_setting) + if skipped_setting['pools']: + return skipped_setting + return None + + def get_skipped_pool(self, avi_config, pool_name, pool_csv_rows, + csv_writer_dict_list, vs_ref, profile_csv_list, + skipped_setting): + """ + This method get the skipped list for pool by going over the + references attached to it + :param avi_config: Converted Avi configuration + :param pool_name: name of the pool + :param pool_csv_rows: + :param csv_writer_dict_list: Result report dict + :param vs_ref: VS reference + :param profile_csv_list: + :param skipped_setting: User defined skipped settings + :return: skipped setting for pool + """ + pool_skipped_setting = {} + skipped_list = self.get_pool_skipped(pool_csv_rows, pool_name, vs_ref) + pool_object = [pool for pool in avi_config["Pool"] + if pool['name'] == pool_name] + if skipped_list: + pool_skipped_setting['pool_name'] = pool_name + pool_skipped_setting['pool_skipped_list'] = skipped_list + if pool_object: + if 'health_monitor_refs' in pool_object[0]: + health_monitor_skipped_setting = [] + for health_monitor_ref in pool_object[0]['health_monitor_refs']: + health_monitor_ref = self.get_name(health_monitor_ref) + monitor_csv_object = self.get_csv_object_list( + csv_writer_dict_list, ['monitor']) + skipped_list = self.get_csv_skipped_list( + monitor_csv_object, health_monitor_ref, vs_ref, + field_key='health_monitor') + if skipped_list: + health_monitor_skipped_setting.append( + {'health_monitor_name': health_monitor_ref, + 'monitor_skipped_list': skipped_list}) + if health_monitor_skipped_setting: + pool_skipped_setting['pool_name'] = pool_name + pool_skipped_setting['health_monitor'] = \ + health_monitor_skipped_setting + if pool_object[0].get('ssl_key_and_certificate_ref', None): + ssl_key_cert = self.get_name( + pool_object[0]['ssl_key_and_certificate_ref']) + LOG.debug('[SslKeyAndCertificate] certificate {}'.format(ssl_key_cert)) + sslkc_skip = self.get_csv_skipped_list( + profile_csv_list, ssl_key_cert, vs_ref, + field_key='ssl_cert_key') + if sslkc_skip: + LOG.debug('[SslKeyAndCertificate] Skipped Attribute {}'.format(sslkc_skip)) + pool_skipped_setting['pool_name'] = pool_name + pool_skipped_setting['ssl_key_and_certificate'] = sslkc_skip + else: + LOG.info('Ssl key and certificate ref is not found') + if pool_object[0].get('ssl_profile_ref', None): + name, skipped = self.get_ssl_profile_skipped( + profile_csv_list, pool_object[0]['ssl_profile_ref'], vs_ref) + if skipped: + pool_skipped_setting['pool_name'] = pool_name + pool_skipped_setting['ssl profile'] = {} + pool_skipped_setting['ssl profile']['name'] = name + pool_skipped_setting['ssl profile'][ + 'skipped_list'] = skipped + + if 'application_persistence_profile_ref' in pool_object[0] and \ + pool_object[0]['application_persistence_profile_ref']: + name, skipped = self.get_app_persistence_profile_skipped( + csv_writer_dict_list, pool_object[0], vs_ref) + if skipped: + pool_skipped_setting['pool_name'] = pool_name + pool_skipped_setting['Application Persistence profile'] = {} + pool_skipped_setting['Application Persistence profile'][ + 'name'] = name + pool_skipped_setting['Application Persistence profile'][ + 'skipped_list'] = skipped + + if pool_skipped_setting: + skipped_setting['pools'].append(pool_skipped_setting) + else: + LOG.debug('[PoolObject] Not Found for pool {}'.format(pool_name)) + + def get_pool_skipped(self, csv_objects, pool_name, vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param csv_objects: CSV row of object from xlsx report + :param pool_name: Name of pool + :param vs_ref: Name of VS + :return: Skipped list of csv row + """ + + for csv_object in csv_objects: + avi_object = self.format_string_to_json(csv_object['Avi Object']) + if 'pools' in avi_object: + pool_object = [pool for pool in avi_object['pools'] + if pool['name'] == pool_name] + if pool_object: + return self.get_and_update_csv_row(csv_object, vs_ref) + + def get_csv_skipped_list(self, csv_objects, name_of_object, vs_ref, + field_key=None): + """ + This method is used for getting skipped list from vs. + :param csv_objects: CSV row of object from xlsx report + :param name_of_object: Name of object + :param vs_ref: Name of VS + :param field_key: Key fromm avi json which is specific for object type + :return: Return skipped attribute list + """ + for csv_object in csv_objects: + avi_objects = self.format_string_to_json(csv_object['Avi Object']) + if isinstance(avi_objects, dict): + avi_objects = [avi_objects] + if not avi_objects: + avi_objects = [] + for avi_object_json in avi_objects: + object_found = False + if field_key: + if field_key in avi_object_json and 'Duplicate' not in \ + avi_object_json[field_key] and ( + avi_object_json[field_key]['name'] == + name_of_object): + object_found = True + else: + if avi_object_json.get('name') and \ + avi_object_json['name'] == name_of_object: + object_found = True + + if object_found: + return self.get_and_update_csv_row(csv_object, vs_ref) + + def get_and_update_csv_row(self, csv_object, vs_ref): + """ + This function defines that update csv row. + :param csv_object: csv object + :param vs_ref: Name of VS + :return: Skipped attribute list + """ + + if 'VS Reference' in csv_object and \ + vs_ref not in csv_object['VS Reference']: + csv_object['VS Reference'] += ',' + vs_ref + else: + csv_object['VS Reference'] = vs_ref + repls = ('[', ''), (']', '') + skipped_setting_csv = reduce( + lambda a, kv: a.replace(*kv), repls, csv_object['Skipped settings']) + if skipped_setting_csv: + return [skipped_setting_csv] + + def get_csv_object_list(self, csv_writer_dict_list, command_list): + """ + This method is used for getting csv object + :param csv_writer_dict_list: CSV row of object from xlsx report + :param command_list: List of netscaler commands + :return: List of CSV rows + """ + + csv_object = [row for row in csv_writer_dict_list if + row['Status'] in [conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL] and + '->' not in row['Avi Object'] and + row['NsxT type'] in command_list] + return csv_object + + def format_string_to_json(self, avi_string): + """ + This function defines that it convert string into json format to + convert into dict + :param avi_string: string to be converted + :return: Return converted string + """ + avi_string = avi_string.split('__/__')[0] + return ast.literal_eval(avi_string) + + def get_app_persistence_profile_skipped(self, csv_writer_dict_list, + pool_object, vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param csv_writer_dict_list: List of csv rows + :param pool_object: object of pool + :param vs_ref: Name of VS + :return: profile name and skipped attribute list + """ + + app_persistence_profile_name = self.get_name( + pool_object['application_persistence_profile_ref']) + csv_object = self.get_csv_object_list(csv_writer_dict_list, + ['persistence']) + skipped_list = self.get_csv_skipped_list( + csv_object, app_persistence_profile_name, vs_ref, + field_key='app_per_profile') + return app_persistence_profile_name, skipped_list + + def get_ssl_profile_skipped(self, profile_csv_list, ssl_profile_ref, + vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param profile_csv_list: List of profile(NsxT type) csv rows + :param ssl_profile_ref: Reference of ssl profile + :param vs_ref: Name of VS + :return: ssl profile name and skipped sttribute list + """ + + ssl_profile_name = self.get_name(ssl_profile_ref) + skipped_list = self.get_csv_skipped_list( + profile_csv_list, ssl_profile_name, vs_ref, field_key='ssl_profile') + return ssl_profile_name, skipped_list + def update_vs_complexity_level(self, vs_csv_row, virtual_service): """ This function defines that update complexity level of VS objects. @@ -108,9 +420,9 @@ def update_vs_complexity_level(self, vs_csv_row, virtual_service): """ if ('http_policies' in virtual_service and - virtual_service['http_policies']) or \ + virtual_service['http_policies']) or \ ('vs_datascripts' in virtual_service and - virtual_service['vs_datascripts']): + virtual_service['vs_datascripts']): vs_csv_row['Complexity Level'] = conv_const.COMPLEXITY_ADVANCED else: vs_csv_row['Complexity Level'] = conv_const.COMPLEXITY_BASIC @@ -153,10 +465,10 @@ def check_for_duplicates(self, src_obj, obj_list, obj_type, merge_object_mapping[obj_type]['no'] += 1 no = merge_object_mapping[obj_type]['no'] mid_name = ent_type and ('Merged-%s-%s-%s-%s' % (ent_type, - obj_type, ran_str, str(no))) or ( - 'Merged-%s-%s-%s' % (obj_type, ran_str, str(no))) - new_name = '%s-%s' %(prefix, mid_name) if prefix else \ - mid_name + obj_type, ran_str, str(no))) or ( + 'Merged-%s-%s-%s' % (obj_type, ran_str, str(no))) + new_name = '%s-%s' % (prefix, mid_name) if prefix else \ + mid_name tmp_obj["name"] = new_name return tmp_obj["name"], old_name return None, None @@ -217,7 +529,7 @@ def get_obj_type_from_ref(self, url): return url.split('/api/')[1].split('/')[0].split('?')[0] def get_object_ref(self, object_name, object_type, tenant='admin', - cloud_name='Default-Cloud', prefix=None): + cloud_name='Default-Cloud', prefix=None, cloud_tenant ="admin"): """ This function defines that to genarte object ref in the format of /api/object_type/?tenant=tenant_name&name=object_name&cloud=cloud_name @@ -233,7 +545,7 @@ def get_object_ref(self, object_name, object_type, tenant='admin', object_name = prefix + '-' + object_name cloud_supported_types = ['pool', 'poolgroup', 'vsvip', 'vrfcontext', - 'serviceenginegroup'] + 'serviceenginegroup', 'network'] if not cloud_name: cloud_name = "Default-Cloud" @@ -242,7 +554,7 @@ def get_object_ref(self, object_name, object_type, tenant='admin', if object_name not in tenants: tenants.append(object_name) elif object_type == 'cloud': - ref = '/api/%s/?tenant=admin&name=%s' % (object_type, object_name) + ref = '/api/%s/?tenant=%s&name=%s' % (object_type ,cloud_tenant, object_name) elif object_type == 'vrfcontext': ref = '/api/%s/?tenant=admin&name=%s&cloud=%s' % ( object_type, object_name, cloud_name) @@ -251,14 +563,14 @@ def get_object_ref(self, object_name, object_type, tenant='admin', object_type, tenant, object_name, cloud_name) else: ref = '/api/%s/?tenant=%s&name=%s' % ( - object_type, tenant, object_name) + object_type, tenant, object_name) # if cloud_name: # ref += '&cloud=%s' % cloud_name return ref # Print iterations progress def print_progress_bar(self, iteration, total, msg, prefix='', suffix='', - decimals=1, length=50, fill='#', printEnd = "\\r"): + decimals=1, length=50, fill='#', printEnd="\\"): """ Call in a loop to create terminal progress bar @params: @@ -294,9 +606,10 @@ def validate_value(self, entity_names, prop_name, value, limit_data, obj, valid = None new_value = value msgvar = valname and entity_names and '%s-->%s-->%s' % (valname, - '-->'.join(entity_names), prop_name) or valname and '%s-->%s' \ + '-->'.join(entity_names), + prop_name) or valname and '%s-->%s' \ % (valname, prop_name) or entity_names and '%s-->%s' % ( - '-->'.join(entity_names), prop_name) or '%s' % prop_name + '-->'.join(entity_names), prop_name) or '%s' % prop_name for key, val in limit_data.items(): pr = val.get(obj, {}) if not pr: @@ -309,9 +622,9 @@ def validate_value(self, entity_names, prop_name, value, limit_data, obj, break else: LOG.debug("Property '%s' is not present in generated yaml, reason " - "being the property doesn't have any attribute from the " - "list %s", msgvar, str(['default_value', 'range', - 'special_values', 'ref_type'])) + "being the property doesn't have any attribute from the " + "list %s", msgvar, str(['default_value', 'range', + 'special_values', 'ref_type'])) return None, None if new_value is not None: # commenting this since now in Python 3 strings are already in unicode format. @@ -360,7 +673,7 @@ def validate_value(self, entity_names, prop_name, value, limit_data, obj, LOG.debug("Value '%s' is fine", str(new_value)) else: LOG.debug("Type of value '%s' doesn't match with type '%s' " - "defined", str(type(new_value)), typ) + "defined", str(type(new_value)), typ) valid, new_value = None, None else: if p_key.get('required') == 'True': @@ -388,7 +701,7 @@ def get_to_prop(self, val, pr, entity_names, prop_name, limit_data): if v.get(ref): tr = v[ref] return self.get_to_prop(v, tr, entity_names, - prop_name, limit_data) + prop_name, limit_data) else: return else: @@ -432,9 +745,9 @@ def validate_prop(self, dictval, heir, limit_data, obj, valname=None): 'http_policy_set_ref', 'ssl_key_and_certificate_refs', 'vsvip_ref', 'description']: msvar = valname and heir and '%s-->%s-->%s' % (valname, - '-->'.join(heir), k) or valname and '%s-->%s' % ( - valname, k) or heir and '%s-->%s' % ('-->'.join(heir), - k) or '%s' % k + '-->'.join(heir), k) or valname and '%s-->%s' % ( + valname, k) or heir and '%s-->%s' % ('-->'.join(heir), + k) or '%s' % k LOG.debug("Skipping validation checks for '%s'", msvar) continue else: @@ -447,17 +760,17 @@ def validate_prop(self, dictval, heir, limit_data, obj, valname=None): heir and heir.pop() or None else: mgvar = valname and heir and '%s-->%s-->%s' % ( - valname, '-->'.join(heir), k) or valname \ + valname, '-->'.join(heir), k) or valname \ and '%s-->%s' % (valname, k) or heir and \ '%s-->%s' % ('-->'.join(heir), k) or \ '%s' % k LOG.debug("Property '%s' has value as a list %s, " - "not supported currently", mgvar, str(v)) - #valid, val = self.validate_value(heir, k, listval, - #limit_data) - #if valid is False: - #LOG.debug("Correcting the value for %s" % k) - #dictval[k] = val + "not supported currently", mgvar, str(v)) + # valid, val = self.validate_value(heir, k, listval, + # limit_data) + # if valid is False: + # LOG.debug("Correcting the value for %s" % k) + # dictval[k] = val elif isinstance(v, dict): k not in heir and heir.append(k) or None self.validate_prop(v, heir, limit_data, obj, valname) @@ -467,9 +780,9 @@ def validate_prop(self, dictval, heir, limit_data, obj, valname=None): obj, valname) if valid is False: mvar = valname and heir and '%s-->%s-->%s' % (valname, - '-->'.join(heir), k) or valname and '%s-->%s' % ( - valname, k) or heir and '%s-->%s' % ( - '-->'.join(heir), k) or '%s' % k + '-->'.join(heir), k) or valname and '%s-->%s' % ( + valname, k) or heir and '%s-->%s' % ( + '-->'.join(heir), k) or '%s' % k LOG.debug("Correcting the value for '%s' from '%s' to " "'%s'", mvar, str(v), str(val)) dictval[k] = val @@ -504,7 +817,7 @@ def make_graph(self, avi_config): """ avi_graph = nx.DiGraph() avi_graph.add_node('AVI', type='Tree') - for vs in avi_config['VirtualService']: + for vs in avi_config.get('VirtualService',[]): name = vs['name'] avi_graph.add_node(name, type='VS') avi_graph.add_edge('AVI', name) @@ -521,8 +834,10 @@ def find_and_add_ne(self, obj_dict, avi_config, avi_graph, vsname, depth): :param depth: Recursion depth to determine level in the vs reference tree """ + for key in obj_dict: - if (key.endswith('ref') and key not in ['cloud_ref', 'tenant_ref'])\ + + if (key.endswith('ref') and key not in ['cloud_ref', 'tenant_ref']) \ or key == 'ssl_profile_name': if not obj_dict[key]: continue @@ -546,7 +861,7 @@ def find_and_add_ne(self, obj_dict, avi_config, avi_graph, vsname, depth): sername = obj_dict[key] if avi_graph.has_node(sername): node_type = [n[1]['type'] for n in list( - avi_graph.nodes().data()) if n[0] == sername] + avi_graph.nodes().data()) if n[0] == sername] node_type = '{}-{}'.format(node_type[0], 'Server') avi_graph.add_node(sername, type=node_type) avi_graph.add_edge(vsname, sername) @@ -557,7 +872,7 @@ def find_and_add_ne(self, obj_dict, avi_config, avi_graph, vsname, depth): rule_name = obj_dict[key] if avi_graph.has_node(rule_name): node_type = [n[1]['type'] for n in list( - avi_graph.nodes().data()) if n[0] == rule_name] + avi_graph.nodes().data()) if n[0] == rule_name] node_type = '{}-{}'.format(node_type[0], 'Rule') avi_graph.add_node(rule_name, type=node_type) avi_graph.add_edge(vsname, rule_name) @@ -586,7 +901,7 @@ def search_ne(self, entity, name, avi_graph, avi_config, depth, vsname): found_obj = found_obj[0] if avi_graph.has_node(name): nod_type = [node[1]['type'] for node in list( - avi_graph.nodes().data()) if node[0] == name] + avi_graph.nodes().data()) if node[0] == name] nod_type = '{}-{}'.format(nod_type[0], path_key_map[entity]) avi_graph.add_node(name, type=nod_type) avi_graph.add_edge(vsname, name) @@ -604,7 +919,9 @@ def search_ne(self, entity, name, avi_graph, avi_config, depth, vsname): entity, name)) return depth += 1 - new_name = found_obj.get('name') + new_name="" + if found_obj: + new_name = found_obj.get('name') if new_name: self.find_and_add_ne(found_obj, avi_config, avi_graph, new_name, depth) @@ -643,3 +960,458 @@ def clone_app_profile_for_vs(self, app_prof_ref, app_prof_obj, vs_name, new_ref = self.get_object_ref( new_profile_name, 'applicationprofile', tenant) return new_ref + + def add_complete_conv_status(self, output_dir, avi_config, report_name, + vs_level_status): + + global csv_writer_dict_list + global ptotal_count + for status in conv_const.STATUS_LIST: + status_list = [row for row in csv_writer_dict_list if + row['Status'] == status] + print('%s: %s' % (status, len(status_list))) + print("Writing Excel Sheet For Converted Configuration...") + ptotal_count = ptotal_count + len(csv_writer_dict_list) + if vs_level_status: + self.vs_per_skipped_setting_for_references(avi_config) + self.correct_vs_ref(avi_config) + else: + # Update the complexity level of VS as Basic or Advanced + self.vs_complexity_level() + self.write_status_report_and_pivot_table_in_xlsx( + output_dir, report_name, vs_level_status) + + def write_status_report_and_pivot_table_in_xlsx( + self, output_dir, report_name, vs_level_status): + """ + This function defines that add status sheet and pivot table sheet in + xlsx format + :param output_dir: Path of output directory + :param report_name: filename to write report + :param vs_level_status: Flag to include VS wise detailed status or not + :return: None + """ + global ppcount + global ptotal_count + # List of fieldnames for headers + if vs_level_status: + fieldnames = ['F5 type', 'F5 SubType', 'F5 ID', 'Status', + 'Skipped settings', 'Indirect mapping', + 'Not Applicable', 'User Ignored', + 'Skipped for defaults', 'Complexity Level', + 'VS Reference', 'Overall skipped settings', + 'Avi Object', 'Needs Review'] + else: + fieldnames = ['F5 type', 'F5 SubType', 'F5 ID', 'Status', + 'Skipped settings', 'Indirect mapping', + 'Not Applicable', + 'User Ignored', 'Skipped for defaults', + 'Complexity Level', 'Avi Object', 'Needs Review'] + + # xlsx workbook + report_path = output_dir + os.path.sep + "%s-ConversionStatus.xlsx" % \ + report_name + status_wb = Workbook(report_path) + # xlsx worksheet + status_ws = status_wb.add_worksheet("Status Sheet") + # Lock the first row of xls report. + status_ws.freeze_panes(1, 0) + first_row = 0 + for header in fieldnames: + col = fieldnames.index(header) + status_ws.write(first_row, col, header) + row = 1 + for row_data in csv_writer_dict_list: + ppcount += 1 + for _key, _value in row_data.items(): + col = fieldnames.index(_key) + status_ws.write(row, col, _value) + # Added call for progress function. + msg = "excel sheet conversion started..." + self.print_progress_bar(ppcount, ptotal_count, msg, + prefix='Progress', suffix='') + row += 1 + status_wb.close() + # create dataframe for row list + df = pandas.DataFrame(csv_writer_dict_list, columns=fieldnames) + # create pivot table using pandas + pivot_table = \ + pandas.pivot_table(df, index=["Status", "F5 type", "F5 SubType"], + values=[], aggfunc=[len], fill_value=0) + # create dataframe for pivot table using pandas + pivot_df = pandas.DataFrame(pivot_table) + main_book = \ + load_workbook(report_path) + main_writer = pandas.ExcelWriter(report_path, engine='openpyxl') + main_writer.book = main_book + # Add pivot table in Pivot sheet + pivot_df.to_excel(main_writer, 'Pivot Sheet') + main_writer.save() + + def vs_complexity_level(self): + """ + This method calculate the complexity of vs. + :return: + """ + # Get the VS object list which is having status successful and partial. + vs_csv_objects = [row for row in csv_writer_dict_list + if row['Status'] in [conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL] + and row['F5 type'] == 'virtual'] + for vs_csv_object in vs_csv_objects: + virtual_service = self.format_string_to_json( + vs_csv_object['Avi Object']) + # Update the complexity level of VS as Basic or Advanced + self.update_vs_complexity_level(vs_csv_object, virtual_service) + + def correct_vs_ref(self, avi_config): + """ + This method corrects the reference of VS to different objects + :param avi_config: avi configuration dict + :return: + """ + global csv_writer_dict_list + avi_graph = self.make_graph(avi_config) + csv_dict_sub = [row for row in csv_writer_dict_list if row[ + 'F5 type'] != 'virtual' and row['Status'] in + (conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL)] + for dict_row in csv_dict_sub: + obj = dict_row['Avi Object'] + vs = [] + if obj.startswith('{'): + obj = eval(obj) + for key in obj: + for objs in obj[key]: + self.add_vs_ref(objs, avi_graph, vs) + elif obj.startswith('['): + obj = eval(obj) + for objs in obj: + for key in objs: + objval = objs[key] + self.add_vs_ref(objval, avi_graph, vs) + if vs: + dict_row['VS Reference'] = str(list(set(vs))) + else: + dict_row['VS Reference'] = conv_const.STATUS_NOT_IN_USE + + def add_vs_ref(self, obj, avi_graph, vs): + """ + Helper method for adding vs ref + :param obj: object + :param avi_graph: avi graph + :param vs: VS list + :return: + """ + tmplist = [] + if isinstance(obj, str) and obj.startswith('Duplicate of'): + obj_name = None + LOG.debug("Object has merged: %s" % obj) + else: + obj_name = obj.get('name', obj.get('hostname')) + if obj_name: + if avi_graph.has_node(obj_name): + LOG.debug("Checked predecessor for %s", obj_name) + predecessor = list(avi_graph.predecessors(obj_name)) + if predecessor: + self.get_predecessor(predecessor, avi_graph, vs, tmplist) + else: + LOG.debug("Object %s may be merged or orphaned", obj_name) + + def get_predecessor(self, predecessor, avi_graph, vs, tmplist): + """ + This method gets the predecessor of the object + :param predecessor: predecessor list + :param avi_graph: avi graph + :param vs: VS list + :param tmplist: temporary list of objects for which predecessors + are already evaluated + :return: + """ + if len(predecessor) > 1: + for node in predecessor: + if node in tmplist: + continue + nodelist = [node] + self.get_predecessor(nodelist, avi_graph, vs, tmplist) + elif len(predecessor): + node_obj = [nod for nod in list(avi_graph.nodes().data()) if + nod[0] == predecessor[0]] + if node_obj and (node_obj[0][1]['type'] == 'VS' or 'VS' in node_obj[ + 0][1]['type']): + LOG.debug("Predecessor %s found", predecessor[0]) + vs.extend(predecessor) + else: + tmplist.extend(predecessor) + LOG.debug("Checked predecessor for %s", predecessor[0]) + nodelist = list(avi_graph.predecessors(predecessor[0])) + self.get_predecessor(nodelist, avi_graph, vs, tmplist) + else: + LOG.debug("No more predecessor") + + def vs_per_skipped_setting_for_references(self, avi_config): + """ + This functions defines that Add the skipped setting per VS CSV row + :param avi_config: this method use avi_config for checking vs skipped + :return: None + """ + + # Get the count of vs fully migrated + global fully_migrated + global ptotal_count + global ppcount + fully_migrated = 0 + # Get the VS object list which is having status successful and partial. + vs_csv_objects = [row for row in csv_writer_dict_list + if row['Status'] in [conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL] + and row['F5 type'] == 'virtual'] + # Get the list of csv rows which has profile as F5 Type + profile_csv_list = self.get_csv_object_list( + csv_writer_dict_list, ['profile']) + ptotal_count = ptotal_count + len(vs_csv_objects) + for vs_csv_object in vs_csv_objects: + ppcount += 1 + skipped_setting = {} + virtual_service = self.format_string_to_json( + vs_csv_object['Avi Object']) + # Update the complexity level of VS as Basic or Advanced + self.update_vs_complexity_level(vs_csv_object, virtual_service) + vs_ref = virtual_service['name'] + repls = ('[', ''), (']', '') + # Get list of skipped setting attributes + skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv), repls, + vs_csv_object['Skipped settings']) + if skipped_setting_csv: + skipped_setting['virtual_service'] = [skipped_setting_csv] + # Get the skipped list for ssl key and cert + if 'ssl_key_and_certificate_refs' in virtual_service: + for ssl_key_and_certificate_ref in \ + virtual_service['ssl_key_and_certificate_refs']: + ssl_key_cert = self.get_name(ssl_key_and_certificate_ref) + ssl_kc_skip = self.get_csv_skipped_list( + profile_csv_list, ssl_key_cert, vs_ref, + field_key='ssl_cert_key') + if ssl_kc_skip: + skipped_setting['ssl cert key'] = {} + skipped_setting['ssl cert key']['name'] = ssl_key_cert + skipped_setting['ssl cert key'][ + 'skipped_list'] = ssl_kc_skip + + # Get the skipped list for ssl profile name. + # Changed ssl profile name to ssl profile ref. + if 'ssl_profile_ref' in virtual_service: + name, skipped = self.get_ssl_profile_skipped( + profile_csv_list, virtual_service['ssl_profile_ref'], + vs_ref) + if skipped: + skipped_setting['ssl profile'] = {} + skipped_setting['ssl profile']['name'] = name + skipped_setting['ssl profile']['skipped_list'] = skipped + # Get the skipped list for pool group. + if 'pool_group_ref' in virtual_service: + pool_group_name = self.get_name( + virtual_service['pool_group_ref']) + csv_pool_rows = self.get_csv_object_list(csv_writer_dict_list, + ['pool']) + pool_group_skipped_settings = self.get_pool_skipped_list( + avi_config, pool_group_name, csv_pool_rows, + csv_writer_dict_list, vs_ref, profile_csv_list) + if pool_group_skipped_settings: + skipped_setting['Pool Group'] = pool_group_skipped_settings + # Get the skipped list for pool. + if 'pool_ref' in virtual_service: + pool_skipped_settings = {'pools': []} + pool_name = self.get_name(virtual_service['pool_ref']) + csv_pool_rows = self.get_csv_object_list(csv_writer_dict_list, + ['pool']) + self.get_skipped_pool( + avi_config, pool_name, csv_pool_rows, csv_writer_dict_list, + vs_ref, profile_csv_list, pool_skipped_settings) + if pool_skipped_settings['pools']: + skipped_setting['Pool'] = pool_skipped_settings + # Get the skipepd list for http policy. + if 'http_policies' in virtual_service: + policy_csv_list = self.get_csv_object_list( + csv_writer_dict_list, ['policy', 'profile']) + for http_ref in virtual_service['http_policies']: + policy_set_name, skipped_list = self.get_policy_set_skipped( + policy_csv_list, http_ref['http_policy_set_ref'], + vs_ref) + if skipped_list: + skipped_setting['Httppolicy'] = {} + skipped_setting['Httppolicy']['name'] = policy_set_name + skipped_setting['Httppolicy'][ + 'skipped_list'] = skipped_list + # Get the http policy name + pool_csv_rows = \ + self.get_csv_object_list(csv_writer_dict_list, ['pool']) + for each_http_policy in avi_config['HTTPPolicySet']: + if each_http_policy['name'] == policy_set_name and 'http_request_policy' in each_http_policy: + for http_req in each_http_policy[ + 'http_request_policy']['rules']: + if http_req.get('switching_action', {}): + self.get_skip_pools_policy( + policy_set_name, http_req, + avi_config, pool_csv_rows, vs_ref, + profile_csv_list, skipped_setting) + + # # Get the skipped list for application_profile_ref. + if 'application_profile_ref' in virtual_service and 'admin:System' \ + not in virtual_service['application_profile_ref']: + name, skipped = self.get_application_profile_skipped( + profile_csv_list, + virtual_service['application_profile_ref'], + vs_ref) + if skipped: + skipped_setting['Application profile'] = {} + skipped_setting['Application profile'][ + 'name'] = name + skipped_setting['Application profile'][ + 'skipped_list'] = skipped + # # Get the skipped list for network profile ref. + if 'network_profile_ref' in virtual_service and 'admin:System' \ + not in virtual_service['network_profile_ref']: + name, skipped = self.get_network_profile_skipped( + profile_csv_list, virtual_service['network_profile_ref'], + vs_ref) + if skipped: + skipped_setting['Network profile'] = {} + skipped_setting['Network profile'][ + 'name'] = name + skipped_setting['Network profile'][ + 'skipped_list'] = skipped + # Update overall skipped setting of VS csv row + if skipped_setting: + vs_csv_object.update( + {'Overall skipped settings': str(skipped_setting)}) + else: + vs_csv_object.update( + {'Overall skipped settings': "FULLY MIGRATION"}) + fully_migrated += 1 + # Added call for progress function. + msg = "excel sheet conversion started..." + self.print_progress_bar(ppcount, ptotal_count, msg, + prefix='Progress', suffix='') + csv_objects = [row for row in csv_writer_dict_list + if row['Status'] in [ + conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL] + and row['F5 type'] != 'virtual'] + + # Update the vs reference not in used if objects are not attached to + # VS directly or indirectly + for row in csv_objects: + if 'VS Reference' not in row or row['VS Reference'] == '': + row['VS Reference'] = conv_const.STATUS_NOT_IN_USE + + def get_application_profile_skipped(self, profile_csv_list, app_profile_ref, + vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param profile_csv_list: List of profile(F5 type) csv rows + :param app_profile_ref: Reference of application profile + :param vs_ref: Name of VS + :return: application profile name and skipped sttribute list + """ + + app_profile_name = self.get_name(app_profile_ref) + skipped_list = self.get_csv_skipped_list( + profile_csv_list, app_profile_name, vs_ref, field_key='app_profile') + return app_profile_name, skipped_list + + def get_network_profile_skipped(self, profile_csv_list, network_profile_ref, + vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param profile_csv_list: List of profile(F5 type) csv rows + :param network_profile_ref: Reference of Network profile + :param vs_ref: Name of VS + :return: network profile name and skipped sttribute list + """ + + network_profile_name = self.get_name(network_profile_ref) + skipped_list = self.get_csv_skipped_list( + profile_csv_list, network_profile_name, vs_ref, + field_key='network_profile') + return network_profile_name, skipped_list + + def get_policy_set_skipped(self, profile_csv_list, policy_set_ref, vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param profile_csv_list: List of profile(F5 type) csv rows + :param policy_set_ref: Reference of policy set + :param vs_ref: Name of VS + :return: policy set name and skipped sttribute list + """ + + policy_set_name = self.get_name(policy_set_ref) + skipped_list = self.get_csv_skipped_list( + profile_csv_list, policy_set_name, vs_ref, field_key='policy_set') + return policy_set_name, skipped_list + + def get_skip_pools_policy(self, policy_set_name, http_req, avi_config, + pool_csv_rows, vs_ref, profile_csv_list, + skipped_setting): + if http_req['switching_action'].get('pool_group_ref'): + pool_group_name = self.get_name(http_req['switching_action'] + ['pool_group_ref']) + pool_group_skipped_settings = self.get_pool_skipped_list( + avi_config, pool_group_name, pool_csv_rows, + csv_writer_dict_list, vs_ref, profile_csv_list) + if pool_group_skipped_settings: + if 'Httppolicy' not in skipped_setting: + skipped_setting['Httppolicy'] = {} + skipped_setting['Httppolicy']['name'] = policy_set_name + skipped_setting['Httppolicy']['Pool Group'] =\ + pool_group_skipped_settings + elif http_req['switching_action'].get('pool_ref'): + pool_name = self.get_name(http_req['switching_action']['pool_ref']) + pool_skipped_settings = {'pools': []} + self.get_skipped_pool(avi_config, pool_name, pool_csv_rows, + csv_writer_dict_list, vs_ref, + profile_csv_list, pool_skipped_settings) + if pool_skipped_settings['pools']: + if 'Httppolicy' not in skipped_setting: + skipped_setting['Httppolicy'] = {} + skipped_setting['Httppolicy']['name'] = policy_set_name + skipped_setting['Httppolicy']['Pool'] = pool_skipped_settings + + def update_skip_duplicates(self, obj, obj_list, obj_type, converted_objs, + name, default_profile_name, merge_object_mapping, + ent_type, prefix, syslist): + + """ + Merge duplicate profiles + :param obj: Source object to find duplicates for + :param obj_list: List of object to search duplicates in + :param obj_type: Type of object to add in converted_objs status + :param converted_objs: Converted avi object or merged object name + :param name: Name of the object + :param default_profile_name : Name of root parent default profile + :param merge_object_mapping: merged object mappings + :param ent_type: Entity type + :param prefix: object name prefix + :param syslist: System object list + :return: + """ + dup_of = None + if isinstance(merge_object_mapping, dict): + merge_object_mapping[obj_type].update({name: name}) + # root default profiles are skipped for merging + if not name == default_profile_name or obj_type == 'ssl_profile': + dup_of, old_name = \ + self.check_for_duplicates(obj, obj_list, obj_type, + merge_object_mapping, ent_type, + prefix, syslist) + if dup_of: + converted_objs.append({obj_type: "Duplicate of %s" % dup_of}) + LOG.info( + "Duplicate profiles: %s merged in %s" % (obj['name'], dup_of)) + if isinstance(merge_object_mapping, dict): + if old_name in merge_object_mapping[obj_type].keys(): + merge_object_mapping[obj_type].update({old_name: dup_of}) + merge_object_mapping[obj_type].update({name: dup_of}) + else: + obj_list.append(obj) + converted_objs.append({obj_type: obj}) diff --git a/python/avi/migrationtools/avi_orphan_object.py b/python/avi/migrationtools/avi_orphan_object.py index 3214e6d257..3ce314a12e 100644 --- a/python/avi/migrationtools/avi_orphan_object.py +++ b/python/avi/migrationtools/avi_orphan_object.py @@ -201,10 +201,12 @@ def filter_for_vs(avi_config): global vs_ref_dict_g new_config = [] vs_ref_dict = dict() - for vs in avi_config['VirtualService']: + for vs in avi_config.get('VirtualService'): vs_flag = True if 'tenant_ref' in vs: link, tenant = get_name_and_entity(vs['tenant_ref']) + else : + tenant = 'admin' name = '%s-%s-%s' % (vs['name'], 'VirtualService', tenant) new_config.append(name) find_and_add_objects(vs, avi_config, new_config, vs_ref_dict, @@ -232,7 +234,8 @@ def wipe_out_not_in_use(avi_config): :param avi_config: :return: """ - use_obj = filter_for_vs(avi_config) + if 'VirtualService' in avi_config: + use_obj = filter_for_vs(avi_config) for key in DEFAULT_META_ORDER: if key not in avi_config: continue diff --git a/python/avi/migrationtools/f5_converter/conversion_util.py b/python/avi/migrationtools/f5_converter/conversion_util.py index 5a9a5212fd..e93c5d8408 100644 --- a/python/avi/migrationtools/f5_converter/conversion_util.py +++ b/python/avi/migrationtools/f5_converter/conversion_util.py @@ -30,51 +30,6 @@ class F5Util(MigrationUtil): - def get_conv_status(self, skipped, indirect_list, ignore_dict, f5_object, - user_ignore=None, na_list=None): - """ - Update skipped list for conversion status - :param skipped: All skipped attributes after conversion - :param indirect_list: List of attrs to be mapped as indirect mapping - :param ignore_dict: Dict of default values for column skipped for defaults - :param f5_object: Currant F5 object - :param user_ignore: List of attributes user wants not to be shown in skipped - :param na_list: List of attributes marked as not applicable - :return: Conversion status dict - """ - conv_status = dict() - user_ignore = [] if not user_ignore else user_ignore - na_list = [] if not na_list else na_list - - conv_status['user_ignore'] = [val for val in skipped if - val in user_ignore] - skipped = [attr for attr in skipped if attr not in user_ignore] - - conv_status['indirect'] = [val for val in skipped if - val in indirect_list] - skipped = [attr for attr in skipped if attr not in indirect_list] - - conv_status['na_list'] = [val for val in skipped if val in na_list] - skipped = [attr for attr in skipped if attr not in na_list] - - default_skip = [] - for key in ignore_dict.keys(): - f5_val = f5_object.get(key) - default_val = ignore_dict.get(key) - if key in skipped and f5_val == default_val: - default_skip.append(key) - if default_skip: - skipped = [attr for attr in skipped if attr not in default_skip] - - conv_status['skipped'] = skipped - conv_status['default_skip'] = default_skip - if skipped: - status = conv_const.STATUS_PARTIAL - else: - status = conv_const.STATUS_SUCCESSFUL - conv_status['status'] = status - return conv_status - def get_avi_pool_down_action(self, action): """ Maps Pool down action from F5 config to Avi Config @@ -1032,27 +987,6 @@ def add_vrf(self, avi_config, vrf, cloud_ref): vrf_obj['system_default'] = True vrf_list.append(vrf_obj) - def get_tenant_ref(self, name): - tenant = 'admin' - if name and name.startswith('/'): - parts = name.split('/', 2) - tenant = parts[1] - if not parts[2]: - LOG.warning('Invalid tenant ref : %s' % name) - name = parts[2] - elif name and '/' in name: - parts = name.split('/') - # Changed the index to get the tenant and name in case of - # prefixed name - tenant = parts[-2] - name = parts[-1] - if tenant.lower() == 'common': - tenant = 'admin' - if '/' in name: - name = name.split('/')[1] - if ' ' in tenant: - tenant = tenant.split(' ')[-1] - return tenant, name def get_app_profile_type(self, profile_name, avi_config): profiles = avi_config.get('ApplicationProfile', []) diff --git a/python/avi/migrationtools/nsxt_converter/alb_converter.py b/python/avi/migrationtools/nsxt_converter/alb_converter.py new file mode 100755 index 0000000000..0f660043bf --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/alb_converter.py @@ -0,0 +1,126 @@ +import argparse +import json +import logging +import os +import yaml +import avi.migrationtools.nsxt_converter.converter_constants as conv_const + +LOG = logging.getLogger(__name__) + +class ALBConverter: + def __init__(self, avi_config_file, output_file_path): + self.avi_config_file = avi_config_file + self.output_file_path = output_file_path + with open(os.path.dirname(__file__) + "/command_status.yaml") as stream: + nsxt_command_status = yaml.safe_load(stream) + self.nsxt_attributes = nsxt_command_status.get('NSXT') + + def convert(self): + + if not os.path.exists(self.output_file_path): + os.mkdir(self.output_file_path) + output_dir = os.path.normpath(self.output_file_path) + with open(self.avi_config_file, "r") as read_file: + avi_config = json.load(read_file) + + alb_config = self.convert_to_alb(avi_config) + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + output_config = output_dir + os.path.sep + "alb_config.json" + with open(output_config, "w", encoding='utf-8') as text_file: + json.dump(alb_config, text_file, indent=4) + + def convert_to_alb(self, avi_config): + alb_config = dict() + for key in avi_config.keys(): + if key in self.nsxt_attributes['SUPPORTED_ALB_OBJECTS']: + config = [] + supported_obj = avi_config[key] + for obj in supported_obj: + data = self.recursive_items(obj, {}) + config.append(data) + alb_config[self.nsxt_attributes['albObjectType'].get(key.lower())] = config + return alb_config + + def recursive_items(self, obj, data): + + for k, v in obj.items(): + if k not in self.nsxt_attributes['NOT_APPLICABLE']: + if type(v) is dict: + data[k] = self.recursive_items(v, {}) + elif not k.endswith("_refs") and type(v) is list: + tmp = [] + for iter_over_obj in v: + if type(iter_over_obj) is dict: + tmp.append(self.recursive_items(iter_over_obj, {})) + else: + tmp.append(iter_over_obj) + data[k] = tmp + elif k in self.nsxt_attributes['REPLACE_KEYS']: + + if k == self.nsxt_attributes['REPLACE_KEYS'][0]: + data['display_name'] = v + data['id'] = v + if k == self.nsxt_attributes['REPLACE_KEYS'][1]: + data['cloud_name'] = v.split("name=")[1] + if k == self.nsxt_attributes['REPLACE_KEYS'][2]: + data['vrf_name'] = v.split("name=")[1].split("&")[0] + if k == self.nsxt_attributes['REPLACE_KEYS'][3]: + data['vrf_context_name'] = v.split("name=")[1].split( + "&")[0] + if k == self.nsxt_attributes['REPLACE_KEYS'][4]: + data["tier1_path"] = v + elif k.endswith("_ref"): + if (v.split('/')[2] not in + self.nsxt_attributes['albObjectType'].keys()): + continue + object_type = self.nsxt_attributes['albObjectType'][ + v.split('/')[2]] + obj_name = v.split("name=")[1] + if object_type == "alb-application-persistence-profiles": + data['application_persistence_profile_path'] = "/infra/%s/%s" % ( + object_type, obj_name) + elif obj_name.__contains__("&cloud"): + name = obj_name.split("&cloud")[0] + data[k.replace("_ref", "_path")] = "/infra/%s/%s" % ( + object_type, name) + else: + data[k.replace("_ref", "_path")] = "/infra/%s/%s" % ( + object_type, obj_name) + elif k.endswith("_refs"): + list_of_paths = list() + for refs in v: + list_of_paths.append("/infra/%s/%s" % ( + self.nsxt_attributes['albObjectType'][ + refs.split('/')[2]], refs.split("name=")[1])) + data[k.replace("_refs", "_paths")] = list_of_paths + else: + data[k] = v + return data + + +if __name__ == "__main__": + HELP_STR = ''' + Converts AVI Config to ALB config. + Example to convert AVI config file to ALB config json: + alb_converter.py -f tmp_exported_config.json + ''' + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, + description=(HELP_STR)) + + # Added args for baseline profile json file + parser.add_argument('-f', '--avi_config_file', + help='absolute path for avi config file') + parser.add_argument('-o', '--output_file_path', + help='Folder path for output files to be created in', + ) + + args = parser.parse_args() + + output_file_path = args.output_file_path if args.output_file_path \ + else 'output-alb' + alb_converter = ALBConverter(args.avi_config_file, output_file_path) + alb_converter.convert() \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/base_client.py b/python/avi/migrationtools/nsxt_converter/base_client.py old mode 100755 new mode 100644 index 44ba216a52..a24127b690 --- a/python/avi/migrationtools/nsxt_converter/base_client.py +++ b/python/avi/migrationtools/nsxt_converter/base_client.py @@ -10,8 +10,6 @@ LOG = logging.getLogger(__name__) -requests.packages.urllib3.disable_warnings(InsecureRequestWarning) - class DetailedHttpError(requests.exceptions.HTTPError): def __init__(self, response): diff --git a/python/avi/migrationtools/nsxt_converter/cleanup.py b/python/avi/migrationtools/nsxt_converter/cleanup.py new file mode 100755 index 0000000000..679441ef1c --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/cleanup.py @@ -0,0 +1,107 @@ +# !/usr/bin/env python3 +import logging +import os +import json +import argparse +from datetime import datetime +from avi.migrationtools.avi_converter import AviConverter +from avi.migrationtools.avi_migration_utils import get_count +from avi.migrationtools.nsxt_converter.nsx_cleanup import NSXCleanup + +LOG = logging.getLogger(__name__) + + +class NsxtAlbCleanup(AviConverter): + def __init__(self, args): + ''' + + :param args: + ''' + self.nsxt_ip = args.nsxt_ip + self.nsxt_user = args.nsxt_user + self.nsxt_passord = args.nsxt_password + self.nsxt_port = args.nsxt_port + self.cleanup_vs_names = args.cleanup + self.output_file_path = args.output_file_path if args.output_file_path \ + else 'output' + + output_dir = os.path.normpath(self.output_file_path) + + # Load values from state file if not given on command line while executing script + if self.nsxt_ip: + output_path = output_dir + os.path.sep + self.nsxt_ip + os.path.sep + "output" + with open(output_path + os.path.sep + "state.json", 'r') as file: + data = json.load(file) + if not self.nsxt_user: + self.nsxt_user = data.get('nsxt_user') + if not self.nsxt_port: + self.nsxt_port = data.get('nsxt_port') + if not self.output_file_path: + self.output_file_path = data.get('output_file_path') + + input_path = None + self.input_data = None + if self.nsxt_ip: + input_path = output_dir + os.path.sep + self.nsxt_ip + os.path.sep + "input" + else: + input_path = output_dir + os.path.sep + "config-output" + os.path.sep + "input" + with open(input_path + os.path.sep + "config.json", 'r') as file: + self.input_data = json.load(file) + + def initiate_cleanup(self): + + if not os.path.exists(self.output_file_path): + os.mkdir(self.output_file_path) + self.init_logger_path() + + cleanup_msg = "Performing cleanup for applications" + LOG.debug(cleanup_msg) + print(cleanup_msg) + + if self.cleanup_vs_names: + nsx_c = NSXCleanup(self.nsxt_user, self.nsxt_passord, self.nsxt_ip, self.nsxt_port) + nsx_c.nsx_cleanup(self.cleanup_vs_names) + + if nsx_c.vs_not_found: + print_msg = "\033[93m"+"Warning: Following virtual service/s could not be found"+'\033[0m' + print(print_msg) + print(nsx_c.vs_not_found) + + print("Total Warning: ", get_count('warning')) + print("Total Errors: ", get_count('error')) + LOG.info("Total Warning: {}".format(get_count('warning'))) + LOG.info("Total Errors: {}".format(get_count('error'))) + + +if __name__ == "__main__": + HELP_STR = """ + Usage: + python nsxt_converter.py -n 192.168.100.101 -u admin -p password + """ + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, description=HELP_STR) + + # Added command line args to take skip type for ansible playbook + parser.add_argument('--cleanup', + help='comma separated vs names that we want to clear from nsx-t side', + required=True) + parser.add_argument('-n', '--nsxt_ip', + help='Ip of NSXT', required=True) + parser.add_argument('-u', '--nsxt_user', + help='NSX-T User name') + parser.add_argument('-p', '--nsxt_password', + help='NSX-T Password', required=True) + parser.add_argument('-port', '--nsxt_port', default=443, + help='NSX-T Port') + parser.add_argument('-o', '--output_file_path', + help='Folder path for output files to be created in', + ) + + start = datetime.now() + args = parser.parse_args() + nsxtalb_cleanup = NsxtAlbCleanup(args) + nsxtalb_cleanup.initiate_cleanup() + end = datetime.now() + print("The time of execution of above program is :", + str(end - start)) diff --git a/python/avi/migrationtools/nsxt_converter/command_status.yaml b/python/avi/migrationtools/nsxt_converter/command_status.yaml new file mode 100755 index 0000000000..72fad94e77 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/command_status.yaml @@ -0,0 +1,362 @@ +NSXT: + SUPPORTED_ALB_OBJECTS : + - "AlertScriptConfig" + - "AnalyticsProfile" + - "ApplicationPersistenceProfile" + - "ApplicationProfile" + - "Authprofiles" + - "AutoScaleLaunchConfig" + - "DnsPolicy" + - "ErrorPageBody" + - "ErrorPageProfile" + - "HardwareSecurityModuleGroup" + - "HealthMonitor" + - "HTTPPolicySet" + - "IpAddrGroup" + - "L4PolicySet" + - "NetworkProfile" + - "NetworkSecurityPolicy" + - "ObjectAccessPolicy" + - "PingAccessAgent" + - "PKIProfile" + - "Pool" + - "PoolGroup" + - "PoolGroupDeploymentPolicy" + - "PriorityLabels" + - "ProtocolParser" + - "SecurityPolicy" + - "ServerAutoScalePolicy" + - "CertificateManagementProfile" + - "SSLKeyAndCertificate" + - "SSLProfile" + - "SSOPolicy" + - "StringGroup" + - "TrafficCloneProfile" + - "VirtualService" + - "VSDataScriptSet" + - "VsVip" + - "WafCRS" + - "WafPolicy" + - "WafPolicyPSMGroup" + - "WafProfile" + - "Webhook" + + albObjectType : + "alertscriptconfig": "alb-alert-script-configs" + "analyticsprofile": "alb-analytics-profiles" + "applicationpersistenceprofile": "alb-application-persistence-profiles" + "applicationprofile": "alb-application-profiles" + "authprofile": "alb-auth-profiles" + "autoscalelaunchconfig": "alb-auto-scale-launch-configs" + "certificatemanagementprofile": "alb-certificate-management-profiles" + "dnspolicy": "alb-dns-policies" + "errorpagebody": "alb-error-page-bodies" + "errorpageprofile": "alb-error-page-profiles" + "httppolicyset": "alb-http-policy-sets" + "hardwaresecuritymodulegroup": "alb-hardware-security-module-groups" + "healthmonitor": "alb-health-monitors" + "ipaddrgroup": "alb-ip-addr-groups" + "l4policyset": "alb-l4-policy-sets" + "networkprofile": "alb-network-profiles" + "networksecuritypolicy": "alb-network-security-policies" + "pkiprofile": "alb-pki-profiles" + "pingaccessagent": "alb-ping-access-agents" + "pool": "alb-pools" + "poolgroup": "alb-pool-groups" + "poolgroupdeploymentpolicy": "alb-pool-group-deployment-policies" + "prioritylabels": "alb-priority-labels" + "protocolparser": "alb-protocol-parsers" + "sslkeyandcertificate": "alb-ssl-key-and-certificates" + "sslprofile": "alb-ssl-profiles" + "ssopolicy": "alb-sso-policies" + "securitypolicy": "alb-security-policies" + "serverautoscalepolicy": "alb-server-auto-scale-policies" + "stringgroup": "alb-string-groups" + "trafficcloneprofile": "alb-traffic-clone-profiles" + "vsdatascriptset": "alb-vs-data-script-sets" + "virtualservice": "alb-virtual-services" + "vsvip": "alb-vs-vips" + "wafcrs": "alb-waf-crs" + "wafpolicy": "alb-waf-policies" + "wafpolicypsmgroup": "alb-waf-policy-psm-groups" + "wafprofile": "alb-waf-profiles" + "objectaccesspolicy": "alb-object-access-policies" + "webhook": "alb-webhooks" + + NOT_APPLICABLE : + - 'url' + - 'uuid' + - 'tenant_ref' + + REPLACE_KEYS : + - 'name' + - 'cloud_ref' + - 'vrf_ref' + - 'vrf_context_ref' + - 'tier1_lr' + + + Monitor_Supported_Types: + - "LBHttpMonitorProfile" + - "LBHttpsMonitorProfile" + - "LBIcmpMonitorProfile" + - "LBTcpMonitorProfile" + - "LBUdpMonitorProfile" + + Monitor_Supported_Attributes: + - "monitor_port" + - "interval" + - "timeout" + - "rise_count" + - "fall_count" + - "display_name" + - "resource_type" + - "id" + + Monitor_http_attr: + - "request_url" + - "request_method" + - "request_version" + - "request_body" + - "response_status_codes" + - "response_body" + - "request_headers" + + Monitor_https_attr: + - "request_url" + - "request_method" + - "request_version" + - "request_body" + - "response_status_codes" + - "response_body" + - "server_ssl_profile_binding" + - "request_headers" + + Monitor_tcp_attr: + - "send" + - "receive" + + Monitor_udp_attr: + - "send" + - "receive" + + Monitor_ping_attr: + + Monitor_icmp_ignore: + - "data_length" + + Monitor_passive_indirect: + - "max_fails" + - "timeout" + + Monitor_server_ssl_indirect_attributes: + - "server_auth" + - "certificate_chain_depth" + + Monitor_server_ssl_supported_attributes: + - "ssl_profile_path" + - "server_auth_ca_paths" + - "client_certificate_path" + - "server_auth_crl_paths" + + Pool_supported_attr: + - "display_name" + - "members" + - "algorithm" + - "member_group" + - "active_monitor_paths" + - "snat_translation" + - "tcp_multiplexing_enabled" + - "tcp_multiplexing_number" + - "min_active_members" + - "resource_type" + - "id" + + Pool_supported_attr_convert_servers_config: + - "display_name" + - "ip_address" + - "port" + - "admin_state" + - "weight" + - "max_concurrent_connections" + - 'backup_member' + + Pool_supported_attr_convert_member_group: + - "group_path" + - "port" + + Pool_na_list: + - "max_ip_list_size" + - "ip_revision_filter" + + Application_Http_Profile_supported_attr: + - "x_forwarded_for" + - "http_redirect_to_https" + - "idle_timeout" + - "request_header_size" + - "request_body_size" + - "description" + - "display_name" + - "resource_type" + - "server_keep_alive" + - "id" + - "ntlm" + - "response_header_size" + - "http_redirect_to" + - "response_timeout" + + http_na_list: + - "response_buffering" + + tcp_na_list: + - "close_timeout" + + Network_Profile_supported_attr: + - "display_name" + - "resource_type" + - "id" + - "idle_timeout" + - 'ha_flow_mirroring_enabled' + - 'flow_mirroring_enabled' + + VS_supported_attr: + - 'application_profile_path' + - 'destination' + - 'pool_path' + - 'persist' + - 'source-address-translation' + - 'description' + - 'translate-port' + - 'source' + - 'rate-limit' + - 'connection-limit' + - 'rules' + - "partition" + - 'enabled' + - 'ports' + - 'max_concurrent_connections' + - 'ip_address' + - 'lb_persistence_profile_path' + - 'lb_service_path' + - 'display_name' + - 'id' + - 'resource_type' + - 'client_ssl_profile_binding' + - 'server_ssl_profile_binding' + - 'default_pool_member_ports' + - 'sorry_pool_path' + + VS_client_ssl_supported_attr: + - 'ssl_profile_path' + - 'default_certificate_path' + - 'client_auth' + - 'client_auth_ca_paths' + - 'client_auth_crl_paths' + + VS_server_ssl_supported_attr: + - 'ssl_profile_path' + - 'client_certificate_path' + - 'server_auth_crl_paths' + - 'client_certificate_path' + - 'server_auth_ca_paths' + + VS_na_list: + - "log_significant_event_only" + + VS_indirect_aatr: + - "access_log_enabled" + + VS_client_ssl_indirect_attr: + - "certificate_chain_depth" + + VS_server_ssl_indirect_attr: + - "certificate_chain_depth" + + SSLProfile_Client_Supported_Attributes: + - "ciphers" + - "protocols" + - "session_cache_enabled" + - "session_cache_timeout" + - "display_name" + - "id" + - "resource_type" + - 'prefer_server_ciphers' + + SSLProfile_Client_Indirect_Attributes: + - "is_secure" + - "is_fips" + + SSLProfile_Server_Supported_Attributes: + - "ciphers" + - "protocols" + - "session_cache_enabled" + - "session_cache_timeout" + - "display_name" + - "id" + - "resource_type" + + SSLProfile_Server_Indirect_Attributes: + - "is_secure" + - "is_fips" + + PersistenceProfile_Supported_Attributes: + - "display_name" + - "id" + - "resource_type" + + CookiePersistenceProfile_Supported_Attributes: + - "cookie_name" + - "cookie_max_idle" + - "cookie_fallback" + - "cookie_time" + + SourcePersistenceProfile_Supported_Attributes: + - "timeout" + + SourcePersistenceProfile_NA_Attributes: + - "ha_persistence_mirroring_enabled" + + Persistence_indirect_cookie: + - "cookie_garble" + + Persistence_na_attr: + - "persistence_shared" + + HttpPolicySetRules_Supported_Attributes: + - "match_conditions" + - "actions" + - "phase" + - "match_strategy" + + HttpPolicySetRules_Skiped_List_MatchingCondition: + - "LBHttpRequestBodyCondition" + - "LBTcpHeaderCondition" + - "LBVariableCondition" + - "LBSslSniCondition" + - "LBClientCertificateIssuerDnConditionDto" + - "LBClientCertificateSubjectDnConditionDto" + + HttpPolicySetRules_Skiped_List_Actions: + - "LBSslModeSelectionAction" + - "LBVariableAssignmentAction" + - "LBJwtAuthAction" + - "LBJwtCertificateKey" + - "LBJwtSymmetricKey" + - "LBJwtPublicKey" + + + Common_Na_List: + - "path" + - "relative_path" + - "parent_path" + - "unique_id" + - "realization_id" + - "marked_for_delete" + - "overridden" + - "_create_time" + - "_create_user" + - "_last_modified_time" + - "_last_modified_user" + - "_system_owned" + - "_protection" + - "_revision" \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/conversion_util.py b/python/avi/migrationtools/nsxt_converter/conversion_util.py new file mode 100755 index 0000000000..ccb4808b97 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/conversion_util.py @@ -0,0 +1,1249 @@ +# Copyright 2021 VMware, Inc. +# SPDX-License-Identifier: Apache License 2.0 +import copy +import logging +import os +from functools import reduce + +import pandas +import re +import random +from pkg_resources import parse_version +import avi.migrationtools.f5_converter.converter_constants as conv_const + +from xlsxwriter import Workbook +from openpyxl import load_workbook + +from avi.migrationtools.avi_migration_utils import MigrationUtil + + +LOG = logging.getLogger(__name__) +csv_writer_dict_list = [] + +# Added variable for checking progress and get overall object. +ppcount = 0 +ptotal_count = 0 +global fully_migrated +fully_migrated = 0 +used_pool_groups = {} +used_pool = {} + + +class NsxtConvUtil(MigrationUtil): + STATIC_PORT_MAP = { + "http": conv_const.HTTP_PORT, + "https": conv_const.HTTPS_PORT, + "ftp": conv_const.FTP_PORT, + "smtp": conv_const.SMTP_PORT, + "snmp": conv_const.SNMP_PORT, + "telnet": conv_const.TELNET_PORT, + "snmp-trap": conv_const.SNMP_TRAP_PORT, + "ssh": conv_const.SSH_PORT, + "xfer": conv_const.XFER_PORT, + "pcsync-https": conv_const.PCSYNC_HTTPS_PORT, + "macromedia-fcs": conv_const.MACROMEDIA_FCS_PORT, + "imap": conv_const.IMAP_PORT, + "pop3": conv_const.POP3_PORT, + "any": None + } + + def add_conv_status(self, nsxt_type, nsxt_sub_type, nsxt_id, conv_status, + avi_object=None, need_review=None): + """ + Adds as status row in conversion status csv + :param nsxt_type: Object type + :param nsxt_sub_type: Object sub type + :param nsxt_id: Name oconv_f object + :param conv_status: dict of conversion status + :param avi_object: Converted objectconverted avi object + """ + global csv_writer_dict_list + # Added space if nsxt_sub_type None for pivot table + row = { + 'NsxT type': nsxt_type, + 'NsxT SubType': nsxt_sub_type if nsxt_sub_type else ' ', + 'NsxT ID': nsxt_id, + 'Status': conv_status.get('status', ''), + 'Skipped settings': str(conv_status.get('skipped', '')), + 'Indirect mapping': str(conv_status.get('indirect', '')), + 'Not Applicable': str(conv_status.get('na_list', '')), + 'Avi Object': str(avi_object) + } + csv_writer_dict_list.append(row) + + def add_status_row(self, nsxt_type, nsxt_sub_type, nsxt_id, status, avi_obj=None): + """ + Adds as status row in conversion status csv + :param nsxt_type: Object type + :param nsxt_sub_type: Object sub type + :param nsxt_id: Name of object + :param status: conversion status + :param avi_obj: Converted avi object + """ + global csv_writer_dict_list + # Added space if nsxt_sub_type None for pivot table + row = { + 'NsxT type': nsxt_type, + 'NsxT SubType': nsxt_sub_type if nsxt_sub_type else ' ', + 'NsxT ID': nsxt_id, + 'Status': status + } + if avi_obj: + row.update({ + 'Avi Object': str(avi_obj) + }) + csv_writer_dict_list.append(row) + + def add_complete_conv_status(self, output_dir, avi_config, report_name, + vs_level_status): + + global csv_writer_dict_list + global ptotal_count + for status in conv_const.STATUS_LIST: + status_list = [row for row in csv_writer_dict_list if + row['Status'] == status] + print('%s: %s' % ('Total '+status, len(status_list))) + print("Writing Excel Sheet For Converted Configuration...") + ptotal_count = ptotal_count + len(csv_writer_dict_list) + if vs_level_status: + self.vs_per_skipped_setting_for_references(avi_config) + self.correct_vs_ref(avi_config) + else: + # Update the complexity level of VS as Basic or Advanced + self.vs_complexity_level() + self.write_status_report_and_pivot_table_in_xlsx( + output_dir, report_name, vs_level_status) + + def get_port_by_protocol(self, protocol): + """ + Instead of default ports for protocols nsxt config has protocol in + destination value for Avi object need to conver it to port number + :param protocol: protocol name + :return: integer value for protocol + """ + + return self.STATIC_PORT_MAP.get(protocol, None) + + def update_pool_for_service_port(self, pool_list, pool_name, hm_list, + sys_hm_list): + rem_hm = [] + pool = [obj for obj in pool_list if obj['name'] == pool_name] + if pool: + pool[0]['use_service_port'] = True + # Checking monitor ports if use_service_port is true + if pool[0].get('health_monitor_refs'): + for hm in pool[0]['health_monitor_refs']: + hm_name = self.get_name(hm) + hm_ob = [ob for ob in (hm_list + sys_hm_list) if + ob['name'] == hm_name] + if hm_ob and (not hm_ob[0].get('monitor_port')): + rem_hm.append(hm) + LOG.debug("Removing monitor reference of %s from pool" + " %s as 'use_service_port' is true but " + "monitor has no port", hm_name, + pool_name) + if rem_hm: + pool[0]['health_monitor_refs'] = [ + h_monitor for h_monitor in pool[0] + ['health_monitor_refs'] if h_monitor not in rem_hm] + + rem_hm = [self.get_name(hmonitor) for hmonitor in rem_hm] + csv_row = [cl for cl in csv_writer_dict_list if cl[ + 'NsxT type'] == 'pool' and self.get_tenant_ref( + cl['NsxT ID'])[1] == pool_name] + if csv_row: + if csv_row[0]['Skipped settings'] in ('[]', ''): + csv_row[0]['Skipped settings'] = str([{ + 'monitor': rem_hm}]) + else: + init_val = eval(csv_row[0]['Skipped settings']) + if not isinstance(init_val, list): + init_val = [init_val] + mon_val = [ + val['monitor'].extend(rem_hm) for val in + init_val if isinstance(val, dict) and + 'monitor' in val] + if bool(mon_val): + csv_row[0]['Skipped settings'] = str(init_val) + else: + init_val.append({'monitor': rem_hm}) + csv_row[0]['Skipped settings'] = str(init_val) + csv_row[0]['Status'] = conv_const.STATUS_PARTIAL + csv_row[0]['Avi Object'] = str({'pools': pool}) + + def write_status_report_and_pivot_table_in_xlsx( + self, output_dir, report_name, vs_level_status): + """ + This function defines that add status sheet and pivot table sheet in + xlsx format + :param output_dir: Path of output directory + :param report_name: filename to write report + :param vs_level_status: Flag to include VS wise detailed status or not + :return: None + """ + global ppcount + global ptotal_count + # List of fieldnames for headers + if vs_level_status: + fieldnames = ['NsxT type', 'NsxT SubType', 'NsxT ID', 'Status', + 'Skipped settings', 'Indirect mapping', + 'Not Applicable', 'Complexity Level', + 'VS Reference', 'Overall skipped settings', + 'Avi Object'] + else: + fieldnames = ['NsxT type', 'NsxT SubType', 'NsxT ID', 'Status', + 'Skipped settings', 'Indirect mapping', + 'Not Applicable', 'Complexity Level', 'Avi Object'] + + # xlsx workbook + report_path = output_dir + os.path.sep + "%s-ConversionStatus.xlsx" % \ + report_name + status_wb = Workbook(report_path) + # xlsx worksheet + status_ws = status_wb.add_worksheet("Status Sheet") + # Lock the first row of xls report. + status_ws.freeze_panes(1, 0) + first_row = 0 + for header in fieldnames: + col = fieldnames.index(header) + status_ws.write(first_row, col, header) + row = 1 + for row_data in csv_writer_dict_list: + ppcount += 1 + for _key, _value in row_data.items(): + col = fieldnames.index(_key) + status_ws.write(row, col, _value) + # Added call for progress function. + msg = "excel sheet conversion started..." + self.print_progress_bar(ppcount, ptotal_count, msg, + prefix='Progress', suffix='') + row += 1 + status_wb.close() + # create dataframe for row list + df = pandas.DataFrame(csv_writer_dict_list, columns=fieldnames) + # create pivot table using pandas + pivot_table = \ + pandas.pivot_table(df, index=["Status", "NsxT type", "NsxT SubType"], + values=[], aggfunc=[len], fill_value=0) + # create dataframe for pivot table using pandas + pivot_df = pandas.DataFrame(pivot_table) + main_book = \ + load_workbook(report_path) + main_writer = pandas.ExcelWriter(report_path, engine='openpyxl') + main_writer.book = main_book + # Add pivot table in Pivot sheet + pivot_df.to_excel(main_writer, 'Pivot Sheet') + main_writer.save() + + def vs_complexity_level(self): + """ + This method calculate the complexity of vs. + :return: + """ + # Get the VS object list which is having status successful and partial. + vs_csv_objects = [row for row in csv_writer_dict_list + if row['Status'] in [conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL] + and row['NsxT type'] == 'virtual'] + for vs_csv_object in vs_csv_objects: + virtual_service = self.format_string_to_json( + vs_csv_object['Avi Object']) + # Update the complexity level of VS as Basic or Advanced + self.update_vs_complexity_level(vs_csv_object, virtual_service) + + def vs_per_skipped_setting_for_references(self, avi_config): + """ + This functions defines that Add the skipped setting per VS CSV row + :param avi_config: this method use avi_config for checking vs skipped + :return: None + """ + + # Get the count of vs fully migrated + global fully_migrated + global ptotal_count + global ppcount + fully_migrated = 0 + # Get the VS object list which is having status successful and partial. + vs_csv_objects = [row for row in csv_writer_dict_list + if row['Status'] in [conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL] + and row['NsxT type'] == 'virtualservice'] + # Get the list of csv rows which has profile as NsxT type + profile_csv_list = self.get_csv_object_list( + csv_writer_dict_list, ['applicationprofile']) + ssl_profile_csv_list = self.get_csv_object_list( + csv_writer_dict_list, ['sslprofile']) + ssl_key_certificate_csv_list = self.get_csv_object_list( + csv_writer_dict_list, ['ssl_key_and_certificate']) + ptotal_count = ptotal_count + len(vs_csv_objects) + for vs_csv_object in vs_csv_objects: + ppcount += 1 + skipped_setting = {} + virtual_service = self.format_string_to_json( + vs_csv_object['Avi Object']) + # Update the complexity level of VS as Basic or Advanced + self.update_vs_complexity_level(vs_csv_object, virtual_service) + vs_ref = virtual_service['name'] + repls = ('[', ''), (']', '') + # Get list of skipped setting attributes + skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv), repls, + vs_csv_object['Skipped settings']) + if skipped_setting_csv: + skipped_setting['virtual_service'] = [skipped_setting_csv] + # Get the skipped list for ssl key and cert + if 'ssl_key_and_certificate_refs' in virtual_service: + for ssl_key_and_certificate_ref in \ + virtual_service['ssl_key_and_certificate_refs']: + ssl_key_cert = self.get_name(ssl_key_and_certificate_ref) + ssl_kc_skip = self.get_csv_skipped_list( + ssl_key_certificate_csv_list, ssl_key_cert, vs_ref, + field_key='ssl_cert_key') + if ssl_kc_skip: + skipped_setting['ssl cert key'] = {} + skipped_setting['ssl cert key']['name'] = ssl_key_cert + skipped_setting['ssl cert key'][ + 'skipped_list'] = ssl_kc_skip + + # Get the skipped list for ssl profile name. + # Changed ssl profile name to ssl profile ref. + if 'ssl_profile_ref' in virtual_service: + name, skipped = self.get_ssl_profile_skipped( + ssl_profile_csv_list, virtual_service['ssl_profile_ref'], + vs_ref) + if skipped: + skipped_setting['ssl profile'] = {} + skipped_setting['ssl profile']['name'] = name + skipped_setting['ssl profile']['skipped_list'] = skipped + # Get the skipped list for pool group. + if 'pool_group_ref' in virtual_service: + pool_group_name = self.get_name( + virtual_service['pool_group_ref']) + csv_pool_rows = self.get_csv_object_list(csv_writer_dict_list, + ['pool']) + pool_group_skipped_settings = self.get_pool_skipped_list( + avi_config, pool_group_name, csv_pool_rows, + csv_writer_dict_list, vs_ref, profile_csv_list) + if pool_group_skipped_settings: + skipped_setting['Pool Group'] = pool_group_skipped_settings + # Get the skipped list for pool. + if 'pool_ref' in virtual_service: + pool_skipped_settings = {'pools': []} + pool_name = self.get_name(virtual_service['pool_ref']) + csv_pool_rows = self.get_csv_object_list(csv_writer_dict_list, + ['pool']) + self.get_skipped_pool( + avi_config, pool_name, csv_pool_rows, csv_writer_dict_list, + vs_ref, profile_csv_list, pool_skipped_settings) + if pool_skipped_settings['pools']: + skipped_setting['Pool'] = pool_skipped_settings + # Get the skipepd list for http policy. + if 'http_policies' in virtual_service: + policy_csv_list = self.get_csv_object_list( + csv_writer_dict_list, ['policy', 'profile']) + for http_ref in virtual_service['http_policies']: + policy_set_name, skipped_list = self.get_policy_set_skipped( + policy_csv_list, http_ref['http_policy_set_ref'], + vs_ref) + if skipped_list: + skipped_setting['Httppolicy'] = {} + skipped_setting['Httppolicy']['name'] = policy_set_name + skipped_setting['Httppolicy'][ + 'skipped_list'] = skipped_list + # Get the http policy name + pool_csv_rows = \ + self.get_csv_object_list(csv_writer_dict_list, ['pool']) + for each_http_policy in avi_config['HTTPPolicySet']: + if each_http_policy['name'] == policy_set_name and 'http_request_policy' in each_http_policy: + for http_req in each_http_policy[ + 'http_request_policy']['rules']: + if http_req.get('switching_action', {}): + self.get_skip_pools_policy( + policy_set_name, http_req, + avi_config, pool_csv_rows, vs_ref, + profile_csv_list, skipped_setting) + + # # Get the skipped list for application_profile_ref. + if 'application_profile_ref' in virtual_service: + name, skipped = self.get_application_profile_skipped( + profile_csv_list, + virtual_service['application_profile_ref'], + vs_ref) + if skipped: + skipped_setting['Application profile'] = {} + skipped_setting['Application profile'][ + 'name'] = name + skipped_setting['Application profile'][ + 'skipped_list'] = skipped + # # Get the skipped list for network profile ref. + if 'network_profile_ref' in virtual_service and 'admin:System' \ + not in virtual_service['network_profile_ref']: + name, skipped = self.get_network_profile_skipped( + profile_csv_list, virtual_service['network_profile_ref'], + vs_ref) + if skipped: + skipped_setting['Network profile'] = {} + skipped_setting['Network profile'][ + 'name'] = name + skipped_setting['Network profile'][ + 'skipped_list'] = skipped + # Update overall skipped setting of VS csv row + if skipped_setting: + vs_csv_object.update( + {'Overall skipped settings': str(skipped_setting)}) + else: + vs_csv_object.update( + {'Overall skipped settings': "FULLY MIGRATION"}) + fully_migrated += 1 + # Added call for progress function. + msg = "excel sheet conversion started..." + self.print_progress_bar(ppcount, ptotal_count, msg, + prefix='Progress', suffix='') + csv_objects = [row for row in csv_writer_dict_list + if row['Status'] in [ + conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL] + and row['NsxT type'] != 'virtualservice'] + + # Update the vs reference not in used if objects are not attached to + # VS directly or indirectly + for row in csv_objects: + if 'VS Reference' not in row or row['VS Reference'] == '': + row['VS Reference'] = conv_const.STATUS_NOT_IN_USE + + def correct_vs_ref(self, avi_config): + """ + This method corrects the reference of VS to different objects + :param avi_config: avi configuration dict + :return: + """ + global csv_writer_dict_list + avi_graph = self.make_graph(avi_config) + csv_dict_sub = [row for row in csv_writer_dict_list if row[ + 'NsxT type'] != 'virtualservice' and row['Status'] in + (conv_const.STATUS_PARTIAL, + conv_const.STATUS_SUCCESSFUL)] + for dict_row in csv_dict_sub: + obj = dict_row['Avi Object'] + vs = [] + if obj.startswith('{'): + obj = eval(obj) + for key in obj: + for objs in obj[key]: + self.add_vs_ref(objs, avi_graph, vs) + elif obj.startswith('['): + obj = eval(obj) + for objs in obj: + for key in objs: + objval = objs[key] + self.add_vs_ref(objval, avi_graph, vs) + if vs: + dict_row['VS Reference'] = str(list(set(vs))) + else: + dict_row['VS Reference'] = conv_const.STATUS_NOT_IN_USE + + def get_vs_ssl_profiles(self, profiles, avi_config, prefix, + merge_object_mapping, sys_dict, f5_config): + """ + Searches for profile refs in converted profile config if not found + creates default profiles + :param profiles: profiles in f5 config assigned to VS + :param avi_config: converted avi config + :param prefix: prefix for objects + :param merge_object_mapping: Merged object mappings + :param sys_dict: System object dict + :return: returns list of profile refs assigned to VS in avi config + """ + # f5_profiles = f5_config.get("profile", {}) + vs_ssl_profile_names = [] + pool_ssl_profile_names = [] + if not profiles: + return vs_ssl_profile_names, pool_ssl_profile_names + if isinstance(profiles, str): + profiles = profiles.replace(" {}", "") + profiles = {profiles: None} + for key in profiles.keys(): + # Called tenant ref to get object name. + tenant, name = self.get_tenant_ref(key) + if prefix: + name = prefix + '-' + name + ssl_profile_list = avi_config.get("SSLProfile", []) + sys_ssl = sys_dict['SSLProfile'] + ssl_profiles = [ob for ob in sys_ssl if ob['name'] == + merge_object_mapping['ssl_profile'].get(name) + ] or [obj for obj in ssl_profile_list + if (obj['name'] == name or name in + obj.get("dup_of", []))] + if ssl_profiles: + cert_name = ssl_profiles[0].get('cert_name', None) + if not cert_name: + cert_name = name + ssl_key_cert_list = avi_config.get("SSLKeyAndCertificate", []) + sys_key_cert = sys_dict['SSLKeyAndCertificate'] + key_cert = [ob for ob in sys_key_cert if ob['name'] == + merge_object_mapping['ssl_cert_key'].get(cert_name) + ] or [obj for obj in ssl_key_cert_list if + (obj['name'] == cert_name or obj['name'] == + '%s-%s' % (cert_name, conv_const.PLACE_HOLDER_STR) or cert_name in + obj.get("dup_of", []))] + # key_cert = key_cert[0]['name'] if key_cert else None + if key_cert: + key_cert = self.get_object_ref( + key_cert[0]['name'], 'sslkeyandcertificate', + tenant=self.get_name(key_cert[0]['tenant_ref'])) + profile = profiles[key] + context = profile.get("context") if profile else None + if (not context) and isinstance(profile, dict): + if 'serverside' in profile: + context = 'serverside' + elif 'clientside' in profile: + context = 'clientside' + pki_list = avi_config.get("PKIProfile", []) + syspki = sys_dict['PKIProfile'] + pki_profiles = [ob for ob in syspki if ob['name'] == + merge_object_mapping['pki_profile'].get( + name)] or \ + [obj for obj in pki_list if + (obj['name'] == name or + name in obj.get("dup_of", []))] + pki_profile = pki_profiles[0]['name'] if pki_profiles else None + mode = 'SSL_CLIENT_CERTIFICATE_NONE' + if pki_profile: + mode = pki_profiles[0].pop('mode', + 'SSL_CLIENT_CERTIFICATE_NONE') + pki_profile = self.get_object_ref( + pki_profiles[0]["name"], 'pkiprofile', + tenant=(pki_profiles[0]['tenant_ref'])).split('name=')[-1] + if context == "clientside": + ssl_prof_ref = self.get_object_ref( + ssl_profiles[0]["name"], 'sslprofile', + tenant=(ssl_profiles[0]['tenant_ref'])).split('name=') + vs_ssl_profile_names.append({"profile": ssl_prof_ref, + "cert": key_cert, + "pki": pki_profile, + 'mode': mode}) + elif context == "serverside": + ssl_prof_ref = self.get_object_ref( + ssl_profiles[0]["name"], 'sslprofile', + tenant=(ssl_profiles[0]['tenant_ref'])).split('name=') + pool_ssl_profile_names.append( + {"profile": ssl_prof_ref, "cert": key_cert, + "pki": pki_profile, 'mode': mode}) + return vs_ssl_profile_names, pool_ssl_profile_names + + def get_service_obj(self, destination, avi_config, enable_ssl, + controller_version, tenant_name, cloud_name, prefix, + vs_name, input_vrf=None): + """ + Checks port overlapping scenario for port value 0 in F5 config + :param destination: IP and Port destination of VS + :param avi_config: Dict of avi config + :param enable_ssl: value to put in service objects + :param controller_version: Version of controller + :param tenant_name: Name of tenant + :param cloud_name: Name of cloud + :param prefix: name prefix + :param vs_name: Name of VS + :param input_vrf: Vrf context name + :return: services_obj, ip_addr of vs and ref of vsvip + """ + + parts = destination.split(':') + ip_addr = parts[0] + ip_addr = ip_addr.strip() + vrf = None + # Removed unwanted string from ip address + if '%' in ip_addr: + ip_addr, vrf = ip_addr.split('%') + # Added support to skip virtualservice with ip address any + if ip_addr == 'any': + LOG.debug("Skipped:VS with IP address: %s" % str(destination)) + return None, None, None, None + # Added check for IP V4 + matches = re.findall('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', ip_addr) + if not matches or ip_addr == '0.0.0.0': + LOG.warning( + 'Avi does not support IPv6 Generated random ipv4 for vs:' + ' %s' % ip_addr) + ip_addr = ".".join(map(str, ( + random.randint(0, 255) for _ in range(4)))) + port = parts[1] if len(parts) == 2 else conv_const.DEFAULT_PORT + # Get the list of vs which shared the same vip + if parse_version(controller_version) >= parse_version('17.1'): + # vs_dup_ips = \ + # [vs for vs in avi_config['VirtualService'] if + # vs['vip'][0]['ip_address']['addr'] == + # ip_addr] + vs_dup_ips = [] + for vs in avi_config['VirtualService']: + vs_ip = vs['vsvip_ref'].split('name=')[1].split('-')[0] + if ip_addr == vs_ip: + vs_dup_ips.append(vs) + else: + vs_dup_ips = \ + [vs for vs in avi_config['VirtualService'] if + vs['ip_address']['addr'] == ip_addr] + + if port == 'any': + port = '0' + if isinstance(port, str) and (not port.isdigit()): + port = self.get_port_by_protocol(port) + # Port is None then skip vs + if not port: + LOG.debug("Skipped:Port not supported %s" % str(parts[1])) + return None, None, None, None + if int(port) > 0: + for vs in vs_dup_ips: + service_updated = self.update_service(port, vs, enable_ssl) + if service_updated == 'duplicate_ip_port': + LOG.debug('Skipped: Duplicate IP-Port for vs %s', vs_name) + return None, None, None, None + if service_updated: + break + services_obj = [{'port': port, 'enable_ssl': enable_ssl}] + else: + used_ports = [] + for vs in vs_dup_ips: + for service in vs['services']: + if service.get('port_range_end', None): + used_ports.extend(range( + int(service['port']), + int(service['port_range_end']) + 1 + )) + else: + used_ports.append(int(service['port'])) + if used_ports and min(used_ports) == 1 and max(used_ports) == 65535: + LOG.debug('Skipped: Duplicate IP-Port for vs %s', vs_name) + return None, None, None, None + if used_ports: + services_obj = [] + if conv_const.PORT_END not in used_ports: + used_ports.append(conv_const.PORT_END + 1) + used_ports = sorted(used_ports, key=int) + start = conv_const.PORT_START + for i in range(len(used_ports)): + if start == used_ports[i]: + start += 1 + continue + end = int(used_ports[i]) - 1 + if end < start: + start += 1 + continue + services_obj.append({'port': start, + 'port_range_end': end, + 'enable_ssl': enable_ssl}) + start = int(used_ports[i]) + 1 + else: + services_obj = [ + {'port': 1, 'port_range_end': conv_const.PORT_END, + 'enable_ssl': enable_ssl}] + # Getting vrf ref + if vrf: + self.add_vrf(avi_config, vrf, cloud_name) + + vrf_config = avi_config['VrfContext'] + vrf_ref = self.get_vrf_context_ref(destination, vrf_config, + 'virtual service', vs_name, + cloud_name) + if input_vrf: + vrf_ref = self.get_object_ref(input_vrf, 'vrfcontext', + cloud_name=cloud_name) + if not vrf_ref: + vrf_ref = self.get_object_ref('global', 'vrfcontext', + cloud_name=cloud_name) + + updated_vsvip_ref = None + if parse_version(controller_version) >= parse_version('17.1'): + vs_vip_name = self.create_update_vsvip( + ip_addr, avi_config['VsVip'], + self.get_object_ref(tenant_name, 'tenant'), + self.get_object_ref(cloud_name, 'cloud', tenant=tenant_name), + prefix, + vrf_ref) + if vs_vip_name == '': + updated_vsvip_ref = '' + else: + updated_vsvip_ref = self.get_object_ref(vs_vip_name, 'vsvip', + tenant_name, cloud_name) + return services_obj, ip_addr, updated_vsvip_ref, vrf_ref + + def update_service(self, port, vs, enable_ssl): + """ + iterates over services of existing vs in converted list to update + services for port overlapping scenario + :param port: port for currant VS + :param vs: VS from converted config list + :param enable_ssl: value to put in service object + :return: boolean if service is updated or not + """ + service_updated = False + vs_new_service = [] + for service in vs['services']: + port_end = service.get('port_range_end', None) + if not port_end and int(service['port']) == int(port): + return 'duplicate_ip_port' + if port_end and (service['port'] <= int(port) <= port_end): + if port not in [conv_const.PORT_START, conv_const.PORT_END]: + if service['port'] == int(port) == port_end: + return 'duplicate_ip_port' + elif service['port'] == int(port): + service['port'] = int(port) + 1 + elif service['port_range_end'] == int(port): + service['port_range_end'] = int(port) - 1 + else: + new_port = int(port) + 1 + new_end = service['port_range_end'] + service['port_range_end'] = int(port) - 1 + new_service = {'port': new_port, + 'port_range_end': new_end, + 'enable_ssl': enable_ssl} + vs_new_service.append(new_service) + elif port == conv_const.PORT_START: + service['port'] = 2 + elif port == conv_const.PORT_END: + service['port_range_end'] = (conv_const.PORT_START - 1) + service_updated = True + break + vs['services'].extend(vs_new_service) + return service_updated + + def add_vrf(self, avi_config, vrf, cloud_ref): + vrf_name = 'vrf-%s' % vrf + vrf_list = avi_config['VrfContext'] + vrf_obj = [obj for obj in vrf_list if obj['name'] == vrf_name] + if not vrf_obj: + vrf_obj = { + "name": vrf_name, + "system_default": False, + "cloud_ref": self.get_object_ref(cloud_ref, 'cloud'), + "tenant_ref": self.get_object_ref('admin', 'tenant') + } + if vrf_name == 'global': + vrf_obj['system_default'] = True + vrf_list.append(vrf_obj) + + def create_update_vsvip(self, vip, vsvip_config, tenant_ref, cloud_ref, + prefix, vrf_ref): + """ + This functions defines that create or update VSVIP object. + :param vip: vip of VS + :param vsvip_config: List of vs object + :param tenant_ref: tenant reference + :param cloud_ref: cloud reference + :param prefix: Name prefix + :param vrf_ref: VRF reference + :return: None + """ + + name = vip + '-vsvip' + # Added prefix for objects + if prefix: + name = '%s-%s' % (prefix, name) + # Get the exsting vsvip object list if present + vsvip = [vip_obj for vip_obj in vsvip_config if vip_obj['name'] == name + and vip_obj.get('vrf_context_ref') == vrf_ref] + if vsvip: + diff_ten = [vips for vips in vsvip if vips['tenant_ref'] != + tenant_ref] + if diff_ten: + LOG.debug('VsVip %s is repeated with vrf %s but different ' + 'tenant %s', name, self.get_name(vrf_ref) if vrf_ref + else 'None', self.get_name(tenant_ref)) + name = '' + # If VSVIP object not present then create new VSVIP object. + else: + vsvip_object = { + "name": name, + "tenant_ref": tenant_ref, + "cloud_ref": cloud_ref, + "vip": [ + { + "vip_id": "0", + "ip_address": { + "type": "V4", + "addr": vip + } + } + ], + } + if vrf_ref: + vsvip_object["vrf_context_ref"] = vrf_ref + vsvip_config.append(vsvip_object) + + return name + + def get_vrf_context_ref(self, f5_entity_mem, vrf_config, entity_string, + entity_name, cloud): + """ + Searches for vrf context refs in converted pool config + :param f5_entity_mem: f5 entity or object like pool + :param vrf_config: converted vrf config + :param entity_string: entity string + :param entity_name: name of f5 entity + :param cloud: name of the cloud + :return: returns list of vrf refs assigned to entity in avi config + """ + vrf_ref = None + f5_entity_mem = ':' in f5_entity_mem and f5_entity_mem.split(':')[0] \ + or f5_entity_mem if f5_entity_mem else None + vrf = 'vrf-' + f5_entity_mem.split('%')[1] \ + if f5_entity_mem and '%' in f5_entity_mem else None + vrf_obj = [obj for obj in vrf_config if vrf and obj["name"] == vrf] + if vrf_obj: + vrf_ref = self.get_object_ref( + vrf_obj[0]['name'], 'vrfcontext', cloud_name=cloud) + else: + LOG.warning("VRF not found for %s %s" % (entity_string, + entity_name)) + return vrf_ref + + def get_vs_app_profiles(self, profiles, avi_config, tenant_ref, prefix, + oc_prof, enable_ssl, merge_object_mapping, + sys_dict): + """ + Searches for profile refs in converted profile config if not found + creates default profiles + :param profiles: profiles in f5 config assigned to VS + :param avi_config: converted avi config + :param tenant_ref: Tenant referance + :param prefix: prefix for objects + :param oc_prof: one connect profile + :param enable_ssl: VS ssl enabled flag + :param merge_object_mapping: Merged object mappings + :param sys_dict: System object dict + + :return: returns list of profile refs assigned to VS in avi config + """ + app_profile_refs = [] + app_prof_conf = dict() + app_profile_list = avi_config.get("ApplicationProfile", []) + unsupported_profiles = avi_config.get('UnsupportedProfiles', []) + sys_app = sys_dict['ApplicationProfile'] + if not profiles: + profiles = {} + if isinstance(profiles, str): + profiles = profiles.replace(" {}", "") + profiles = {profiles: None} + for name in profiles.keys(): + # Called tenant ref to get object name. + name = self.get_tenant_ref(name)[1] + # Added prefix for objects + if prefix: + name = '%s-%s' % (prefix, name) + app_profiles = [ob for ob in sys_app if ob['name'] == + merge_object_mapping['app_profile'].get(name)] or [ + obj for obj in app_profile_list if + (obj['name'] == name + or name in obj.get("dup_of", []))] + if app_profiles: + app_prof_name = app_profiles[0]['name'] + app_profile_refs.append(self.get_object_ref( + app_prof_name, 'applicationprofile', + tenant=(app_profiles[0]['tenant_ref']))).split('name=') + + if app_profiles[0].get('HTTPPolicySet', None): + app_prof_conf['policy_name'] = app_profiles[0]['HTTPPolicySet'] + if app_profiles[0].get('fallback_host', None): + app_prof_conf['f_host'] = app_profiles[0]['fallback_host'] + # prerequisite user need to create default auth profile + if app_profiles[0].get('realm', None): + app_prof_conf['realm'] = { + "type": "HTTP_BASIC_AUTH", + "auth_profile_ref": self.get_object_ref( + 'System-Default-Auth-Profile', 'authprofile', + tenant=self.get_name( + app_profiles[0]['tenant_ref'])), + "realm": app_profiles[0]['realm'] + } + + if not app_profile_refs: + not_supported = [key for key in profiles.keys() if + key in unsupported_profiles] + if not_supported: + LOG.warning( + 'Profiles not supported by Avi : %s' % not_supported) + return app_prof_conf + if enable_ssl: + app_profile_refs.append( + self.get_object_ref('System-SSL-Application', + 'applicationprofile', tenant='admin')) + app_prof_conf['app_prof'] = app_profile_refs + return app_prof_conf + else: + app_profile_refs.append( + self.get_object_ref('System-L4-Application', + 'applicationprofile', tenant='admin')) + app_prof_conf['app_prof'] = app_profile_refs + return app_prof_conf + # Added prefix for objects + if prefix: + value = '%s-%s' % (prefix, value) + default_app_profile = [ob for ob in sys_app if ob['name'] == + merge_object_mapping['app_profile'].get( + value)] or [ + obj for obj in app_profile_list if + (obj['name'] == value + or value in obj.get("dup_of", []))] + tenant = (default_app_profile[0]['tenant_ref']).split('name=') if \ + default_app_profile else '/api/tenant/?name=admin' + app_profile_refs.append( + self.get_object_ref(default_app_profile[0]['name'], + 'applicationprofile', tenant=tenant)) + app_prof_conf['app_prof'] = app_profile_refs + return app_prof_conf + + def get_vs_ntwk_profiles(self, profiles, avi_config, prefix, + merge_object_mapping, sys_dict): + """ + Searches for profile refs in converted profile config if not found + creates default profiles + :param profiles: profiles in f5 config assigned to VS + :param avi_config: converted avi config + :param prefix: prefix for objects + :param merge_object_mapping: merged object mappings + :param sys_dict: System object dict + :return: returns list of profile refs assigned to VS in avi config + """ + network_profile_names = [] + if not profiles: + return [] + if isinstance(profiles, str): + profiles = profiles.replace(" {}", "") + profiles = {profiles: None} + for name in profiles.keys(): + # Called tenant method to get object name + tenant, name = self.get_tenant_ref(name) + # Added prefix for objects + if prefix: + name = prefix + '-' + name + ntwk_prof_lst = avi_config.get("NetworkProfile") + sysnw = sys_dict['NetworkProfile'] + network_profiles = [ob for ob in sysnw if + ob['name'] == merge_object_mapping[ + 'network_profile'].get(name)] or \ + [obj for obj in ntwk_prof_lst if ( + obj['name'] == name or name in + obj.get("dup_of", []))] + if network_profiles: + network_profile_ref = self.get_object_ref( + network_profiles[0]['name'], 'networkprofile', + tenant=(network_profiles[0]['tenant_ref'])).split('name=') + network_profile_names.append(network_profile_ref) + return network_profile_names + + def get_application_profile_skipped(self, profile_csv_list, app_profile_ref, + vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param profile_csv_list: List of profile(F5 type) csv rows + :param app_profile_ref: Reference of application profile + :param vs_ref: Name of VS + :return: application profile name and skipped sttribute list + """ + + app_profile_name = self.get_name(app_profile_ref) + skipped_list = self.get_csv_skipped_list( + profile_csv_list, app_profile_name, vs_ref, field_key='application_http_profile') + return app_profile_name, skipped_list + + def get_vs_ntwk_profiles(self, profiles, avi_config, prefix, + merge_object_mapping, sys_dict): + """ + Searches for profile refs in converted profile config if not found + creates default profiles + :param profiles: profiles in f5 config assigned to VS + :param avi_config: converted avi config + :param prefix: prefix for objects + :param merge_object_mapping: merged object mappings + :param sys_dict: System object dict + :return: returns list of profile refs assigned to VS in avi config + """ + network_profile_names = [] + if not profiles: + return [] + if isinstance(profiles, str): + profiles = profiles.replace(" {}", "") + profiles = {profiles: None} + for name in profiles.keys(): + # Called tenant method to get object name + tenant, name = self.get_tenant_ref(name) + # Added prefix for objects + if prefix: + name = prefix + '-' + name + ntwk_prof_lst = avi_config.get("NetworkProfile") + sysnw = sys_dict['NetworkProfile'] + network_profiles = [ob for ob in sysnw if + ob['name'] == merge_object_mapping[ + 'network_profile'].get(name)] or \ + [obj for obj in ntwk_prof_lst if ( + obj['name'] == name or name in + obj.get("dup_of", []))] + if network_profiles: + network_profile_ref = self.get_object_ref( + network_profiles[0]['name'], 'networkprofile', + tenant=(network_profiles[0]['tenant_ref'])).split('name=') + network_profile_names.append(network_profile_ref) + return network_profile_names + + def get_app_persistence_profile_skipped(self, csv_writer_dict_list, + pool_object, vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param csv_writer_dict_list: List of csv rows + :param pool_object: object of pool + :param vs_ref: Name of VS + :return: profile name and skipped attribute list + """ + + app_persistence_profile_name = self.get_name( + pool_object['application_persistence_profile_ref']) + csv_object = self.get_csv_object_list(csv_writer_dict_list, + ['persistence']) + skipped_list = self.get_csv_skipped_list( + csv_object, app_persistence_profile_name, vs_ref, + field_key='app_per_profile') + return app_persistence_profile_name, skipped_list + + def get_ssl_profile_skipped(self, profile_csv_list, ssl_profile_ref, + vs_ref): + """ + This functions defines that get the skipped list of CSV row + :param profile_csv_list: List of profile(F5 type) csv rows + :param ssl_profile_ref: Reference of ssl profile + :param vs_ref: Name of VS + :return: ssl profile name and skipped sttribute list + """ + + ssl_profile_name = self.get_name(ssl_profile_ref) + skipped_list = self.get_csv_skipped_list( + profile_csv_list, ssl_profile_name, vs_ref, field_key='ssl_profile') + return ssl_profile_name, skipped_list + + def get_conv_status_by_obj_name(self, obj_name): + conv_status_list = list(filter(lambda obj: obj.get("NsxT ID") == obj_name, csv_writer_dict_list)) + conv_status = dict() + conv_status["status"] = conv_status_list[0]["Status"] + conv_status['skipped'] = conv_status_list[0]["Skipped settings"] + conv_status['indirect'] = conv_status_list[0]["Indirect mapping"] + conv_status['na_list'] = conv_status_list[0]['Not Applicable'] + return conv_status + + def clone_pool_if_shared(self, ref, avi_config, vs_name, tenant, p_tenant, + persist_type, controller_version, app_prof_ref,is_pool_group_used, + cloud_name='Default-Cloud', prefix=None): + """ + clones pool or pool group if its shard between multiple VS or partitions + in Nsxt + :param ref: reference of pool or pool group + :param avi_config: Avi configuration cloned pool or pool groups to be + added + :param vs_name: Name of the vs to be added + :param tenant: tenant name of vs + :param p_tenant: tenant name of pool + :param persist_type: persistence profile type + :param controller_version: + :param app_prof_ref: Application profile referance + :param sysdict: + :param cloud_name: + :param prefix: + :return: + """ + is_pool_group = False + pool_group_obj = None + # Added prefix for objects + if prefix: + ref = prefix + '-' + ref + # Search the pool or pool group with name in avi config for the same + # tenant as VS + pool_obj = [pool for pool in avi_config['Pool'] if pool['name'] == ref + and pool['tenant_ref'] == self.get_object_ref(tenant, + 'tenant')] + pool_per_ref = pool_obj[0].get( + 'application_persistence_profile_ref') if pool_obj else None + pool_per_name = self.get_name(pool_per_ref) if pool_per_ref else None + pool_per_types = [obj['persistence_type'] for obj in (avi_config[ + 'ApplicationPersistenceProfile']) if obj['name'] == + pool_per_name] if pool_per_name else [] + pool_per_type = pool_per_types[0] if pool_per_types else None + if not pool_obj: + pool_group_obj = [pool for pool in avi_config['PoolGroup'] + if pool['name'] == ref and + pool['tenant_ref'] == self.get_object_ref( + tenant, 'tenant')] + if pool_group_obj: + is_pool_group = True + if p_tenant: + shared_vs = [obj for obj in avi_config['VirtualService'] + if obj.get("pool_ref", "") == self.get_object_ref( + ref, 'pool', tenant=p_tenant, cloud_name=cloud_name)] + if not shared_vs: + shared_vs = [obj for obj in avi_config['VirtualService'] + if obj.get("pool_group_ref", "") == + self.get_object_ref( + ref, 'poolgroup', tenant=p_tenant, + cloud_name=cloud_name)] + else: + shared_vs = [obj for obj in avi_config['VirtualService'] + if obj.get("pool_ref", "") == self.get_object_ref( + ref, 'pool', tenant=tenant, cloud_name=cloud_name)] + if not shared_vs: + shared_vs = [obj for obj in avi_config['VirtualService'] + if obj.get("pool_group_ref", "") == + self.get_object_ref( + ref, 'poolgroup', tenant=tenant, + cloud_name=cloud_name)] + if not tenant == p_tenant : + if is_pool_group: + ref = self.clone_pool_group(ref, vs_name, avi_config, True, + tenant, cloud_name=cloud_name) + else: + ref = self.clone_pool(ref, vs_name, avi_config['Pool'], + True, tenant) + if pool_obj and not shared_vs: + if pool_obj[0].get('cloud_ref'): + if self.get_name(pool_obj[0]['cloud_ref']) != cloud_name: + ref = self.clone_pool(ref, vs_name, avi_config['Pool'], + True, tenant) + + if is_pool_group and not shared_vs: + if ref in is_pool_group_used.keys(): + if pool_group_obj[0].get('members'): + if pool_group_obj[0]['members'][0]['pool_ref'].split('cloud=')[-1] != cloud_name: + shared_vs = is_pool_group_used.get(ref) + is_pool_group = True + + if shared_vs: + if is_pool_group: + ref = self.clone_pool_group(ref, vs_name, avi_config, True, + tenant, cloud_name=cloud_name) + else: + shared_appref = shared_vs[0].get('application_profile_ref') + shared_apptype = None + if shared_appref: + shared_appname = self.get_name(shared_appref) + shared_appobjs = [ob for ob in (avi_config[ + 'ApplicationProfile']) if ob['name'] == + shared_appname] + shared_appobj = shared_appobjs[0] if shared_appobjs else {} + shared_apptype = shared_appobj['type'] if shared_appobj \ + else None + app_prof_name = self.get_name(app_prof_ref) + app_prof_objs = [appob for appob in (avi_config[ + 'ApplicationProfile']) if appob['name'] == + app_prof_name] + app_prof_obj = app_prof_objs[0] if app_prof_objs else {} + app_prof_type = app_prof_obj['type'] if app_prof_obj else None + + if self.is_pool_clone_criteria( + controller_version, app_prof_type, shared_apptype, + persist_type, pool_per_type, shared_appobj, + app_prof_obj): + LOG.debug('Cloned the pool %s for VS %s', ref, vs_name) + ref = self.clone_pool(ref, vs_name, avi_config['Pool'], + True, tenant) + else: + LOG.debug("Shared pool %s for VS %s", ref, vs_name) + + return ref, is_pool_group + + def is_pool_clone_criteria(self, controller_version, app_prof_type, + shared_apptype, persist_type, pool_per_type, + shared_appobj, app_prof_obj): + if parse_version(controller_version) < parse_version( + '17.1.6') or app_prof_type != 'APPLICATION_PROFILE_TYPE_HTTP' \ + or shared_apptype != app_prof_type or ( + persist_type != None and persist_type != + 'PERSISTENCE_TYPE_HTTP_COOKIE') or ( + pool_per_type != None and pool_per_type != + 'PERSISTENCE_TYPE_HTTP_COOKIE') or ( + shared_appobj.get('http_profile', {}).get( + 'connection_multiplexing_enabled') != app_prof_obj.get( + 'http_profile', {}).get('connection_multiplexing_enabled') or ( + shared_appobj.get('http_profile', {}).get( + 'cache_config') != app_prof_obj.get( + 'http_profile', {}).get('cache_config'))): + return True + else: + return False + + def clone_pool_group(self, pool_group_name, clone_for, avi_config, is_vs, + tenant='admin', cloud_name='Default-Cloud'): + """ + If pool is shared with other VS pool is cloned for other VS as Avi dose + not support shared pools with new pool name as - + :param pool_group_name: Name of the pool group to be cloned + :param clone_for: Name of the object/entity for pool group to be cloned + :param avi_config: new pool to be added to avi config + :param is_vs: True if clone is called for VS + :param tenant: if nsxt pool is shared across partition then coned for + tenant + :param cloud_name: + :return: new pool group name + """ + pg_ref = None + new_pool_group = None + for pool_group in avi_config['PoolGroup']: + if pool_group["name"] == pool_group_name: + new_pool_group = copy.deepcopy(pool_group) + break + if new_pool_group: + if pool_group_name in used_pool_groups: + used_pool_groups[pool_group_name] += 1 + else: + used_pool_groups[pool_group_name] = 1 + LOG.debug('Cloning pool group for %s', clone_for) + new_pool_group["name"] = '{}-{}'.format( + pool_group_name, used_pool_groups[pool_group_name]) + pg_ref = new_pool_group["name"] + new_pool_group["tenant_ref"] = self.get_object_ref(tenant, 'tenant') + avi_config['PoolGroup'].append(new_pool_group) + for index, member in enumerate(new_pool_group['members']): + pool_name = self.get_name(member['pool_ref']) + if not pool_name.startswith(pool_group_name): + del new_pool_group['members'][index] + continue + pool_name = self.clone_pool(pool_name, clone_for, + avi_config['Pool'], is_vs, tenant) + member['pool_ref'] = self.get_object_ref( + pool_name, 'pool', tenant=tenant, cloud_name=cloud_name) + return pg_ref + + def clone_pool(self, pool_name, clone_for, avi_pool_list, is_vs, + tenant=None): + """ + If pool is shared with other VS pool is cloned for other VS as Avi dose + not support shared pools with new pool name as - + :param pool_name: Name of the pool to be cloned + :param clone_for: Name of the VS for pool to be cloned + :param avi_pool_list: new pool to be added to this list + :param is_vs: True if this cloning is for VS + :param tenant: if pool is shared across partition then coned for tenant + :return: new pool object + """ + LOG.debug("Cloning pool %s for %s " % (pool_name, clone_for)) + new_pool = None + for pool in avi_pool_list: + if pool["name"] == pool_name: + new_pool = copy.deepcopy(pool) + break + if new_pool: + if pool_name in used_pool: + used_pool[pool_name] += 1 + else: + used_pool[pool_name] = 1 + LOG.debug('Cloning Pool for %s', clone_for) + new_pool["name"] = '{}-{}'.format(pool_name, used_pool[pool_name]) + if tenant: + new_pool["tenant_ref"] = self.get_object_ref(tenant, 'tenant') + if is_vs: + # removing config added from VS config to pool + new_pool["application_persistence_profile_ref"] = None + new_pool["ssl_profile_ref"] = None + new_pool["ssl_key_and_certificate_ref"] = None + new_pool["pki_profile_ref"] = None + if new_pool.get('placement_networks'): + del new_pool['placement_networks'] + avi_pool_list.append(new_pool) + pool_ref = new_pool["name"] + LOG.debug("Cloned pool successfully %s for %s " % ( + new_pool["name"], clone_for)) + return pool_ref \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/converter_constants.py b/python/avi/migrationtools/nsxt_converter/converter_constants.py new file mode 100644 index 0000000000..c4a632d77f --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/converter_constants.py @@ -0,0 +1,22 @@ +# Copyright 2021 VMware, Inc. +# SPDX-License-Identifier: Apache License 2.0 + +import yaml +import os + +STATUS_SKIPPED = 'SKIPPED' +STATUS_SUCCESSFUL = 'SUCCESSFUL' +STATUS_ERROR = 'ERROR' +HM_CUSTOM_KEY = 'healthmonitor_custom_config' +PLACE_HOLDER_STR = "auto_created" +OBJECT_TYPE_HTTP_POLICY_SET = "httppolicyset" + +def init(): + """ + This function defines that to initialize constant from yaml file + :return: None + """ + global nsxt_command_status + with open(os.path.dirname(__file__) + "/command_status.yaml") as stream: + nsxt_command_status = yaml.safe_load(stream) + return nsxt_command_status.get('NSXT') diff --git a/python/avi/migrationtools/nsxt_converter/custom_config.yaml b/python/avi/migrationtools/nsxt_converter/custom_config.yaml new file mode 100644 index 0000000000..f2336aa827 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/custom_config.yaml @@ -0,0 +1,9 @@ +healthmonitor_custom_config: + - monitor_name: prefix-default-http-lb-monitor + avi_config: + receive_timeout: 4 + failed_checks: 2 + send_interval: 10 + successful_checks: 2 + type: LBHttpMonitorProfile + diff --git a/python/avi/migrationtools/nsxt_converter/get_certificates.py b/python/avi/migrationtools/nsxt_converter/get_certificates.py new file mode 100644 index 0000000000..8034fd7957 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/get_certificates.py @@ -0,0 +1,74 @@ +import argparse +import parser +from requests.auth import HTTPBasicAuth + +import requests +import pprint + +pp = pprint.PrettyPrinter(indent=2) +def get_self_signed_certificate(args): + ''' + + :param args: + :return: + ''' + nsxt_user = args.nsxt_user + nsxt_password = args.nsxt_password + cert_name = args.cert_name + + print(nsxt_password,nsxt_user) + base_url = "http://127.0.0.1:7440/nsxapi/api/v1/trust-management/certificates" + headers = { + 'Content-Type': 'application/json', + 'x-nsx-username' : nsxt_user + } + + auth = HTTPBasicAuth(nsxt_user, nsxt_password) + + certificate_id = None + resp = requests.get(base_url, headers=headers, auth=auth) + print(resp) + if resp.status_code == 200: + for certificate in resp.json()['results']: + pp.pprint(certificate.get("display_name")) + if certificate.get("display_name") == cert_name: + certificate_id = certificate.get("id") + pp.pprint(certificate_id) + break + + data = {} + if certificate_id: + url = base_url + "/" + certificate_id + '/' + "?action=get_private" + resp = requests.get(url, headers=headers, auth=auth) + cert = resp.json() + data["key"] = cert['pem_encoded'] + body = { + "certificate" : cert['private_key'] + } + data['certificate'] = body + pp.pprint(data) + return data + + +def main(): + HELP_STR = """ + Usage: + python get_certificates.py -u admin -p Admin!23Admin -c test-cert + """ + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, description=HELP_STR) + parser.add_argument('-u', '--nsxt_user', required=True, + help='NSX-T User name') + parser.add_argument('-p', '--nsxt_password', required=True, + help='NSX-T Password') + parser.add_argument('-c', '--cert_name', required=True, + help='SSL Certificate name') + args = parser.parse_args() + print(args) + get_self_signed_certificate(args) + + + +if __name__ == '__main__': + main() diff --git a/python/avi/migrationtools/nsxt_converter/monitor_converter.py b/python/avi/migrationtools/nsxt_converter/monitor_converter.py old mode 100644 new mode 100755 index bcfab16f46..812ae7d5b8 --- a/python/avi/migrationtools/nsxt_converter/monitor_converter.py +++ b/python/avi/migrationtools/nsxt_converter/monitor_converter.py @@ -1,68 +1,534 @@ +import time, logging + import com.vmware.nsx_policy.model_client as model_client -def get_alb_response_codes(response_codes): - if not response_codes: - return None - HttpResponseCode = model_client.ALBHealthMonitorHttp - codes = list() - for code in response_codes: - if code<200: - if HttpResponseCode.HTTP_RESPONSE_CODE_1XX not in codes: - codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_1XX) - elif code>199 and code<300: - if HttpResponseCode.HTTP_RESPONSE_CODE_2XX not in codes: - codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_2XX) - elif code>299 and code<400: - if HttpResponseCode.HTTP_RESPONSE_CODE_3XX not in codes: - codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_3XX) - elif code>399 and code<500: - if HttpResponseCode.HTTP_RESPONSE_CODE_4XX not in codes: - codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_4XX) - elif code>499 and code<600: - if HttpResponseCode.HTTP_RESPONSE_CODE_5XX not in codes: - codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_5XX) - return codes - - -def update_alb_type(lb_hm, alb_hm): - - if lb_hm['resource_type'] == 'LBHttpMonitorProfile': +from avi.migrationtools.avi_migration_utils import update_count +from avi.migrationtools.nsxt_converter.conversion_util import NsxtConvUtil, csv_writer_dict_list +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +from avi.migrationtools.avi_migration_utils import MigrationUtil +import avi.migrationtools.nsxt_converter.converter_constants as final + +LOG = logging.getLogger(__name__) + +conv_utils = NsxtConvUtil() +common_avi_util = MigrationUtil() + + +class MonitorConfigConv(object): + def __init__(self, nsxt_monitor_attributes, object_merge_check, merge_object_mapping, sys_dict): + """ + + :param nsxt_monitor_attributes: NsxT monitor attributes from yaml file. + :param prefix: prefix for objects + :param object_merge_check: flag for merge objects + """ + self.supported_types = nsxt_monitor_attributes['Monitor_Supported_Types'] + self.tup = "time until up" + self.supported_attributes = \ + nsxt_monitor_attributes['Monitor_Supported_Attributes'] + self.dest_key = "dest" + self.http_attr = nsxt_monitor_attributes['Monitor_http_attr'] + self.https_attr = nsxt_monitor_attributes['Monitor_https_attr'] + self.tcp_attr = nsxt_monitor_attributes['Monitor_tcp_attr'] + self.udp_attr = nsxt_monitor_attributes['Monitor_udp_attr'] + self.ping_attr = nsxt_monitor_attributes['Monitor_ping_attr'] + self.common_na_attr = nsxt_monitor_attributes['Common_Na_List'] + self.icmp_ignore_attr = nsxt_monitor_attributes["Monitor_icmp_ignore"] + self.passive_indirect_attr = nsxt_monitor_attributes["Monitor_passive_indirect"] + self.server_ssl_indirect_attr = nsxt_monitor_attributes["Monitor_server_ssl_indirect_attributes"] + self.server_ssl_supported_attr = nsxt_monitor_attributes["Monitor_server_ssl_supported_attributes"] + self.object_merge_check = object_merge_check + self.merge_object_mapping = merge_object_mapping + self.sys_dict = sys_dict + self.monitor_count = 0 + self.certkey_count = 0 + self.pki_count = 0 + + def get_alb_response_codes(self, response_codes): + if not response_codes: + return None + HttpResponseCode = model_client.ALBHealthMonitorHttp + codes = list() + for code in response_codes: + if code < 200: + if HttpResponseCode.HTTP_RESPONSE_CODE_1XX not in codes: + codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_1XX) + elif code > 199 and code < 300: + if HttpResponseCode.HTTP_RESPONSE_CODE_2XX not in codes: + codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_2XX) + elif code > 299 and code < 400: + if HttpResponseCode.HTTP_RESPONSE_CODE_3XX not in codes: + codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_3XX) + elif code > 399 and code < 500: + if HttpResponseCode.HTTP_RESPONSE_CODE_4XX not in codes: + codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_4XX) + elif code > 499 and code < 600: + if HttpResponseCode.HTTP_RESPONSE_CODE_5XX not in codes: + codes.append(HttpResponseCode.HTTP_RESPONSE_CODE_5XX) + return codes + + def update_alb_type(self, lb_hm, alb_hm, skipped): + if lb_hm['resource_type'] == 'LBHttpMonitorProfile': + alb_hm['type'] = 'HEALTH_MONITOR_HTTP' + + alb_hm['http_monitor'] = dict( + http_request=lb_hm['request_url'], + http_request_body=lb_hm.get('request_body'), + http_response=lb_hm.get('response_body'), + http_response_code=self.get_alb_response_codes(lb_hm['response_status_codes']), + ) + skipped = [key for key in skipped if key not in self.http_attr] + elif lb_hm['resource_type'] == 'LBHttpsMonitorProfile': + alb_hm['type'] = 'HEALTH_MONITOR_HTTPS' + alb_hm['https_monitor'] = dict( + http_request=lb_hm['request_url'], + http_request_body=lb_hm.get('request_body'), + http_response=lb_hm.get('response_body'), + http_response_code=self.get_alb_response_codes(lb_hm['response_status_codes']), + + ) + + skipped = [key for key in skipped if key not in self.https_attr] + + elif lb_hm['resource_type'] == 'LBIcmpMonitorProfile': + alb_hm['type'] = 'HEALTH_MONITOR_PING' + elif lb_hm['resource_type'] == 'LBTcpMonitorProfile': + alb_hm['type'] = 'HEALTH_MONITOR_TCP' + elif lb_hm['resource_type'] == 'LBUdpMonitorProfile': + alb_hm['type'] = 'HEALTH_MONITOR_UDP' + return skipped + + def convert(self, alb_config, nsx_lb_config, prefix, tenant, custom_mapping): + converted_alb_ssl_certs = list() + converted_pki_profile = list() + alb_config['HealthMonitor'] = list() + converted_objs = [] + indirect = [] + u_ignore = [] + progressbar_count = 0 + custom_config = custom_mapping.get( + conv_const.HM_CUSTOM_KEY, dict() + ) if custom_mapping else dict() + skipped_list = [] + server_ssl_indirect_list = [] + converted_alb_monitor = [] + tenant_name, name = conv_utils.get_tenant_ref(tenant) + if not tenant: + tenant = tenant_name + total_size = len(nsx_lb_config['LbMonitorProfiles']) + print("Converting Monitors...") + LOG.info('[MONITOR] Converting Monitors...') + for lb_hm in nsx_lb_config['LbMonitorProfiles']: + try: + LOG.info('[MONITOR] Migration started for HM {}'.format(lb_hm['display_name'])) + progressbar_count += 1 + monitor_type, name = self.get_name_type(lb_hm) + if '/' in monitor_type: + monitor_type = monitor_type.split('/')[-1] + m_tenant, m_name = conv_utils.get_tenant_ref(name) + # Check if custom cofig present for this HM + r_hm = [obj for obj in custom_config if + obj['monitor_name'] == m_name] + if r_hm: + LOG.debug( + "Found custom config for %s replacing with custom config" + % m_name) + r_hm = r_hm[0] + avi_monitor = r_hm['avi_config'] + # Added prefix for objects + if prefix: + avi_monitor['name'] = prefix + '-' + m_name + else: + avi_monitor['name'] = m_name + if tenant: + m_tenant = tenant + avi_monitor['tenant_ref'] = conv_utils.get_object_ref( + m_tenant, 'tenant') + alb_config["HealthMonitor"].append(avi_monitor) + conv_utils.add_conv_status( + 'monitor', monitor_type, m_name, { + 'status': conv_const.STATUS_SUCCESSFUL + }, [{'health_monitor': avi_monitor}]) + continue + if lb_hm['resource_type'] == 'LBPassiveMonitorProfile': + indirect = self.passive_indirect_attr + conv_status = dict() + conv_status['status'] = conv_const.STATUS_SUCCESSFUL + conv_status['indirect'] = indirect + conv_utils.add_conv_status('monitor', lb_hm['resource_type'], lb_hm['display_name'], conv_status) + continue + + monitor_type, name = self.get_name_type(lb_hm) + skipped = [val for val in lb_hm.keys() + if val not in self.supported_attributes] + na_list = [val for val in lb_hm.keys() + if val in self.common_na_attr] + if prefix: + name = prefix + '-' + name + if self.object_merge_check: + if name in self.merge_object_mapping.keys(): + name = name+"-"+lb_hm["id"] + else: + monitor_temp = list(filter(lambda hm: hm["name"] == name, alb_config['HealthMonitor'])) + if monitor_temp: + name = name + "-" + lb_hm["id"] + alb_hm = dict( + name=name, + failed_checks=lb_hm['fall_count'], + receive_timeout=lb_hm['timeout'], + send_interval=lb_hm['interval'], + successful_checks=lb_hm.get('rise_count', None) + ) + if lb_hm.get('monitor_port', None): + alb_hm['monitor_port'] = lb_hm.get('monitor_port', None) + + alb_hm['tenant_ref'] = conv_utils.get_object_ref(tenant,'tenant') + server_ssl_indirect = [] + if monitor_type == "LBHttpMonitorProfile": + skipped = self.convert_http(lb_hm, alb_hm, skipped) + elif monitor_type == "LBHttpsMonitorProfile": + skipped, server_ssl_indirect = self.convert_https(lb_hm, alb_hm, skipped, alb_config, prefix, + tenant, converted_objs, + converted_alb_ssl_certs, converted_pki_profile) + elif monitor_type == "LBIcmpMonitorProfile": + u_ignore = self.icmp_ignore_attr + skipped = self.convert_icmp(lb_hm, alb_hm, skipped) + elif monitor_type == "LBTcpMonitorProfile": + skipped = self.convert_tcp(lb_hm, alb_hm, skipped) + elif monitor_type == "LBUdpMonitorProfile": + skipped = self.convert_udp(lb_hm, alb_hm, skipped) + + ignore_for_defaults = {} + skipped_list.append(skipped) + server_ssl_indirect_list.append(server_ssl_indirect) + if self.object_merge_check: + common_avi_util.update_skip_duplicates(alb_hm, + alb_config['HealthMonitor'], 'health_monitor', + converted_objs, name, None, self.merge_object_mapping, + monitor_type, prefix, + self.sys_dict['HealthMonitor']) + self.monitor_count += 1 + else: + alb_config['HealthMonitor'].append(alb_hm) + val = dict( + id=lb_hm["id"], + name=name, + resource_type=lb_hm['resource_type'], + alb_hm=alb_hm + + ) + converted_alb_monitor.append(val) + msg = "Monitor conversion started..." + conv_utils.print_progress_bar(progressbar_count, total_size, msg, + prefix='Progress', suffix='') + # time.sleep(1) + + LOG.info('[MONITOR] Migration completed for HM {}'.format(lb_hm['display_name'])) + except: + update_count('error') + LOG.error("[MONITOR] Failed to convert Monitor: %s" % lb_hm['display_name'], + exc_info=True) + conv_utils.add_status_row('monitor', None, lb_hm['display_name'], + conv_const.STATUS_ERROR) + + for index, skipped in enumerate(skipped_list): + conv_status = conv_utils.get_conv_status( + skipped_list[index], indirect, ignore_for_defaults, nsx_lb_config['LbMonitorProfiles'], + u_ignore, na_list) + na_list_hm = [val for val in na_list if val not in self.common_na_attr] + conv_status["na_list"] = na_list_hm + if server_ssl_indirect_list[index]: + conv_status["indirect"].append({"server_ssl": server_ssl_indirect_list[index]}) + name = converted_alb_monitor[index]['name'] + hm_id = converted_alb_monitor[index]['id'] + alb_mig_hm = converted_alb_monitor[index]['alb_hm'] + resource_type = converted_alb_monitor[index]['resource_type'] + if self.object_merge_check: + alb_mig_hm = [hm for hm in alb_config['HealthMonitor'] if + hm.get('name') == self.merge_object_mapping['health_monitor'].get(name)] + conv_utils.add_conv_status('monitor', resource_type, name, conv_status, + [{'health_monitor': alb_mig_hm[0]}]) + else: + conv_utils.add_conv_status('monitor', resource_type, name, conv_status, + [{'health_monitor': alb_mig_hm}]) + if len(conv_status['skipped']) > 0: + LOG.debug('[Monitor] Skipped Attribute {}:{}'.format(name, conv_status['skipped'])) + + for cert in converted_alb_ssl_certs: + indirect = [] + u_ignore = [] + ignore_for_defaults = {} + conv_status = conv_utils.get_conv_status( + [], indirect, ignore_for_defaults, [], + u_ignore, []) + conv_utils.add_conv_status('ssl_key_and_certificate', None, cert['name'], conv_status, + [{"ssl_cert_key": cert}]) + for pki_profile in converted_pki_profile: + indirect = [] + u_ignore = [] + ignore_for_defaults = {} + conv_status = conv_utils.get_conv_status( + [], indirect, ignore_for_defaults, [], + u_ignore, []) + conv_utils.add_conv_status('pki_profile', None, pki_profile['name'], conv_status, + [{"pki_profile": pki_profile}]) + + def get_name_type(self, lb_hm): + """ + + """ + return lb_hm['resource_type'], lb_hm['display_name'] + + def convert_http(self, lb_hm, alb_hm, skipped): alb_hm['type'] = 'HEALTH_MONITOR_HTTP' + http_request = self.update_http_request_for_avi(lb_hm) alb_hm['http_monitor'] = dict( - http_request=lb_hm['request_url'], + http_request=http_request, http_request_body=lb_hm.get('request_body'), http_response=lb_hm.get('response_body'), - http_response_code=get_alb_response_codes(lb_hm['response_status_codes']), + http_response_code=self.get_alb_response_codes(lb_hm['response_status_codes']), ) - elif lb_hm['resource_type'] == 'LBHttpsMonitorProfile': + + skipped = [key for key in skipped if key not in self.http_attr] + return skipped + + def convert_https(self, lb_hm, alb_hm, skipped, alb_config, prefix, tenant, converted_objs, + converted_alb_ssl_certs=None, converted_pki_profile=None): + if converted_alb_ssl_certs is None: + converted_alb_ssl_certs = [] + if converted_pki_profile is None: + converted_pki_profile = [] + indirect = [] alb_hm['type'] = 'HEALTH_MONITOR_HTTPS' + https_request = self.update_http_request_for_avi(lb_hm) alb_hm['https_monitor'] = dict( - http_request=lb_hm['request_url'], + http_request=https_request, http_request_body=lb_hm.get('request_body'), http_response=lb_hm.get('response_body'), - http_response_code=get_alb_response_codes(lb_hm['response_status_codes']), + http_response_code=self.get_alb_response_codes(lb_hm['response_status_codes']), ) - elif lb_hm['resource_type'] == 'LBIcmpMonitorProfile': + if lb_hm.get('server_ssl_profile_binding', None): + server_ssl_profile_binding = lb_hm.get('server_ssl_profile_binding', None) + ssl_profile_path = server_ssl_profile_binding["ssl_profile_path"] + ssl_profile_name = ssl_profile_path.split('/')[-1] + if prefix: + ssl_profile_name = prefix + '-' + ssl_profile_name + ssl_attributes = { + "ssl_profile_ref": conv_utils.get_object_ref( + ssl_profile_name, 'sslprofile', tenant=tenant) + } + + if server_ssl_profile_binding.get("client_certificate_path", None): + ca_cert_obj = self.update_ca_cert_obj(lb_hm['display_name'], alb_config, [], tenant, prefix, + cert_type='SSL_CERTIFICATE_TYPE_VIRTUALSERVICE') + ssl_attributes[ + "ssl_key_and_certificate_ref"] = "/api/sslkeyandcertificate/?tenant=%s&name=%s" % (tenant, ca_cert_obj.get( + "name")) + converted_alb_ssl_certs.append(ca_cert_obj) + + alb_hm["https_monitor"]['ssl_attributes'] = ssl_attributes + pki_profile = self.create_pki_profile(lb_hm, alb_hm["name"], tenant, alb_hm, converted_pki_profile) + if pki_profile: + pki_id = lb_hm["id"] + "-" + "pki" + pki_profile_name = pki_profile["name"] + if self.object_merge_check: + conv_utils.update_skip_duplicates(pki_profile, + alb_config['PKIProfile'], 'pki_profile', + converted_objs, pki_profile_name, None, + self.merge_object_mapping, None, prefix, + self.sys_dict['PKIProfile']) + self.pki_count += 1 + pki_profile_name = self.merge_object_mapping["pki_profile"].get(pki_profile_name) + else: + converted_objs.append({'pki_profile': pki_profile}) + alb_config['PKIProfile'].append(pki_profile) + alb_hm["pki_profile_ref"] = '/api/pkiprofile/?tenant=%s&name=%s' % (tenant, pki_profile_name) + server_ssl_skipped = [key for key in server_ssl_profile_binding.keys() + if key not in self.server_ssl_supported_attr] + server_ssl_indirect_list = self.server_ssl_indirect_attr + indirect = [val for val in server_ssl_skipped if val in server_ssl_indirect_list] + server_ssl_skipped = [val for val in server_ssl_skipped if val not in server_ssl_indirect_list] + if server_ssl_skipped: + skipped.append({"server_ssl": server_ssl_skipped}) + + skipped = [key for key in skipped if key not in self.https_attr] + return skipped, indirect + + def convert_icmp(self, lb_hm, alb_hm, skipped): alb_hm['type'] = 'HEALTH_MONITOR_PING' - elif lb_hm['resource_type'] == 'LBTcpMonitorProfile': + if self.ping_attr: + skipped = [key for key in skipped if key not in self.ping_attr] + + return skipped + + def convert_tcp(self, lb_hm, alb_hm, skipped): alb_hm['type'] = 'HEALTH_MONITOR_TCP' - elif lb_hm['resource_type'] == 'LbUdpMonitorProfile': + alb_hm["tcp_monitor"] = dict() + request = lb_hm.get("send", None) + # request = self.update_request_for_avi(request, False) + response = lb_hm.get("receive", None) + if response == 'none': + response = None + tcp_monitor = {"tcp_request": request, "tcp_response": response} + alb_hm["tcp_monitor"] = tcp_monitor + + # [skipped.append(key) for key in lb_hm.keys() if key not in self.tcp_attr] + skipped = [key for key in skipped if key not in self.tcp_attr] + + return skipped + + def convert_udp(self, lb_hm, alb_hm, skipped): alb_hm['type'] = 'HEALTH_MONITOR_UDP' + request = lb_hm.get("send", None) + # request = self.update_request_for_avi(request, False) + response = lb_hm.get("receive", None) + if response == 'none': + response = None + udp_monitor = {"udp_request": request, "udp_response": response} + alb_hm["udp_monitor"] = udp_monitor + # [skipped.append(key) for key in lb_hm.keys() if key not in self.udp_attr] + skipped = [key for key in skipped if key not in self.tcp_attr] + return skipped -def convert(alb_config, nsx_lb_config): - alb_config['HealthMonitor'] = list() + def update_ca_cert_obj(self, name, avi_config, converted_objs, tenant, prefix, cert_type='SSL_CERTIFICATE_TYPE_CA', + ca_cert=None): + """ + This method create the certs if certificate not present at location + it create placeholder certificate. + :return: + """ - for lb_hm in nsx_lb_config['LbMonitorProfiles']: - if lb_hm['resource_type'] == 'LBPassiveMonitorProfile': - continue - alb_hm = dict( - name=lb_hm['display_name'], - failed_checks=lb_hm['fall_count'], - receive_timeout=lb_hm['timeout'], - send_interval=lb_hm['interval'], - monitor_port=lb_hm.get('monitor_port', None), - ) - update_alb_type(lb_hm, alb_hm) + cert_name = [cert['name'] for cert in avi_config.get("SSLKeyAndCertificate", []) + if cert['name'].__contains__(name) and cert['type'] == cert_type] + + if cert_name: + LOG.warning( + 'SSL ca cert is already exist') + + for cert in avi_config.get("SSLKeyAndCertificate", []): + if cert['name'].__contains__(name) and cert['type'] == cert_type: + return cert + return None + + if not ca_cert: + key, ca_cert = conv_utils.create_self_signed_cert() + name = '%s-%s' % (name, final.PLACE_HOLDER_STR) + LOG.warning('Create self cerificate and key for : %s' % name) + + ssl_kc_obj = None + + if ca_cert: + cert = {"certificate": ca_cert if type(ca_cert) == str else ca_cert.decode()} + ssl_kc_obj = { + 'name': name, + 'tenant_ref': conv_utils.get_object_ref(tenant, 'tenant'), + 'key': key if type(key) == str else key.decode(), + 'certificate': cert, + 'type': 'SSL_CERTIFICATE_TYPE_VIRTUALSERVICE' + } + LOG.info('Added new ca certificate for %s' % name) + if ssl_kc_obj and self.object_merge_check: + if final.PLACE_HOLDER_STR not in ssl_kc_obj['name']: + conv_utils.update_skip_duplicates( + ssl_kc_obj, avi_config['SSLKeyAndCertificate'], + 'ssl_cert_key', converted_objs, name, None, + self.merge_object_mapping, None, prefix, + self.sys_dict['SSLKeyAndCertificate']) + else: + converted_objs.append({'ssl_cert_key': ssl_kc_obj}) + avi_config['SSLKeyAndCertificate'].append(ssl_kc_obj) + self.certkey_count += 1 + else: + converted_objs.append({'ssl_cert_key': ssl_kc_obj}) + avi_config['SSLKeyAndCertificate'].append(ssl_kc_obj) + return ssl_kc_obj + + def update_http_request_for_avi(self, lb_hm): + if lb_hm["request_version"] == "HTTP_VERSION_1_0": + version = "HTTP/1.0" + else: + version = "HTTP/1.1" + http_request = lb_hm["request_method"] + " " + lb_hm["request_url"] + " " + version + if lb_hm.get("request_headers"): + for header in lb_hm["request_headers"]: + header_set = header["header_name"] + ":" + header["header_value"] + http_request = http_request + "\r\n" + header_set + if lb_hm.get("exact_request"): + if lb_hm.get("request_body"): + http_request += "\r\n" + lb_hm["request_body"] + return http_request + + def create_pki_profile(self, lb_hm, name, tenant, alb_hm, converted_pki_profile): + if lb_hm["server_ssl_profile_binding"].get("server_auth_ca_paths"): + pki_server_profile = dict() + error = False + ca = self.get_ca_cert(lb_hm["server_ssl_profile_binding"].get("server_auth_ca_paths")) + if ca: + pki_server_profile["ca_certs"] = [{'certificate': ca}] + else: + error = True + if lb_hm["server_ssl_profile_binding"].get("server_auth_crl_paths"): + crl = self.get_crl_cert(lb_hm["server_ssl_profile_binding"].get("server_auth_crl_paths")) + if crl: + pki_server_profile["crls"] = [{'body': crl}] + else: + error = True + else: + pki_server_profile['crl_check'] = False + if not error: + pki_server_profile["name"] = name + "-server-pki" + pki_server_profile["tenant_ref"] = conv_utils.get_object_ref(tenant, "tenant") + converted_pki_profile.append(pki_server_profile) + return pki_server_profile + return False + + def get_ca_cert(self, ca_url): + ca_id = """-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgIQILuJ/vBaBpdK5/W07NmI5DANBgkqhkiG9w0BAQsFADBC +MRMwEQYKCZImiZPyLGQBGRYDbGFiMRUwEwYKCZImiZPyLGQBGRYFcmVwcm8xFDAS +BgNVBAMTC3JlcHJvLUFELUNBMB4XDTIwMDQyOTEzMTgwMVoXDTI1MDQyOTEzMjgw +MVowQjETMBEGCgmSJomT8ixkARkWA2xhYjEVMBMGCgmSJomT8ixkARkWBXJlcHJv +MRQwEgYDVQQDEwtyZXByby1BRC1DQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALj3ChNORETzK1qOIgcF6QMx2KUv/pXx7NQYin0mJgEaPcVD/lH4RR5z +ToswIetCbz3NeajJShfoNV17H/ovvH5iUnvrdajVl7kXM0QmAaLmosKU4BLHgrDd +LKKBDMKGw2MQWjjfBHJaH92Yg8+tdtoYCzouQn6ZDHp+7sXqtpngoRIQVHFYQNH2 +8gmkdDQQwp4fveeM7at6NktAB7uMTec6i63yigWrbvhqS0b/d6Y4aTVWH8qWwyCV +nd+7CsEwQk2Y1iopb0Cli5M1bppoJ6a17eONqCaYMb8qShZQZWKygDkfAYD9B9c4 +x0UpRsSUDiVv7Bdc6MrlEPu9dI01BmkCAwEAAaNRME8wCwYDVR0PBAQDAgGGMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGrxPxqDTdksMOqnt19O1VH0jpznMBAG +CSsGAQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBCwUAA4IBAQBU6HnUmwCShJtNEiL5 +IJFgMh55tp4Vi9E1+q3XI5RwOB700UwmfWUXmOKeeD3871gg4lhqfjDKSxNrRJ3m +CKuE4nwCSgK74BSCgWu3pTpSPjUgRED2IK/03jQCK2TuZgsTe20BUROnr+uRpORI +pVbIDevBvuggxDHfn7JYQE/SXrUaCplaZUjZz6WVHTkLEDfoPeTp5gUPA7x/V4MI +tHTkjIH8nND2pAJRCzLExl5Bf5PKqWjPOqaqyg+hDg2BXm70QOWIMqvxRt9TJAq4 +n6DBZ2ZDOhyFCejCDCSIbku76WGNeT8+0xXjCPaTNBL0AawR77uqa2KZpaCU7e84 +jhiq +-----END CERTIFICATE----- """ + + return ca_id + + def get_crl_cert(self, crl_url): + crl_id = """-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgIQILuJ/vBaBpdK5/W07NmI5DANBgkqhkiG9w0BAQsFADBC +MRMwEQYKCZImiZPyLGQBGRYDbGFiMRUwEwYKCZImiZPyLGQBGRYFcmVwcm8xFDAS +BgNVBAMTC3JlcHJvLUFELUNBMB4XDTIwMDQyOTEzMTgwMVoXDTI1MDQyOTEzMjgw +MVowQjETMBEGCgmSJomT8ixkARkWA2xhYjEVMBMGCgmSJomT8ixkARkWBXJlcHJv +MRQwEgYDVQQDEwtyZXByby1BRC1DQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALj3ChNORETzK1qOIgcF6QMx2KUv/pXx7NQYin0mJgEaPcVD/lH4RR5z +ToswIetCbz3NeajJShfoNV17H/ovvH5iUnvrdajVl7kXM0QmAaLmosKU4BLHgrDd +LKKBDMKGw2MQWjjfBHJaH92Yg8+tdtoYCzouQn6ZDHp+7sXqtpngoRIQVHFYQNH2 +8gmkdDQQwp4fveeM7at6NktAB7uMTec6i63yigWrbvhqS0b/d6Y4aTVWH8qWwyCV +nd+7CsEwQk2Y1iopb0Cli5M1bppoJ6a17eONqCaYMb8qShZQZWKygDkfAYD9B9c4 +x0UpRsSUDiVv7Bdc6MrlEPu9dI01BmkCAwEAAaNRME8wCwYDVR0PBAQDAgGGMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGrxPxqDTdksMOqnt19O1VH0jpznMBAG +CSsGAQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBCwUAA4IBAQBU6HnUmwCShJtNEiL5 +IJFgMh55tp4Vi9E1+q3XI5RwOB700UwmfWUXmOKeeD3871gg4lhqfjDKSxNrRJ3m +CKuE4nwCSgK74BSCgWu3pTpSPjUgRED2IK/03jQCK2TuZgsTe20BUROnr+uRpORI +pVbIDevBvuggxDHfn7JYQE/SXrUaCplaZUjZz6WVHTkLEDfoPeTp5gUPA7x/V4MI +tHTkjIH8nND2pAJRCzLExl5Bf5PKqWjPOqaqyg+hDg2BXm70QOWIMqvxRt9TJAq4 +n6DBZ2ZDOhyFCejCDCSIbku76WGNeT8+0xXjCPaTNBL0AawR77uqa2KZpaCU7e84 +jhiq +-----END CERTIFICATE----- """ - alb_config['HealthMonitor'].append(alb_hm) + return crl_id diff --git a/python/avi/migrationtools/nsxt_converter/nsx_cleanup.py b/python/avi/migrationtools/nsxt_converter/nsx_cleanup.py new file mode 100644 index 0000000000..e205512873 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/nsx_cleanup.py @@ -0,0 +1,136 @@ +import json + +from avi.migrationtools.nsxt_converter import nsxt_client as nsx_client_util +from avi.migrationtools.nsxt_converter.nsxt_util import NSXUtil + + +class NSXCleanup: + nsx_api_client = None + vs_not_found = list() + + def __init__(self, nsx_un, nsx_pw, nsx_ip, nsx_port): + self.nsx_api_client = nsx_client_util.create_nsx_policy_api_client( + nsx_un, nsx_pw, nsx_ip, nsx_port, auth_type=nsx_client_util.BASIC_AUTH) + self.nsx_util = NSXUtil(nsx_un, nsx_pw, nsx_ip, nsx_port) + self.nsx_lb_config = self.nsx_util.get_nsx_config() + + def nsx_cleanup(self, vs_names): + nsx_lb_config = self.nsx_lb_config + if vs_names and type(vs_names) == str: + virtual_services = vs_names.split(',') + elif vs_names and type(vs_names) == list: + virtual_services = vs_names + vs_attached_pools = [] + vs_attached_profiles = [] + vs_attached_persis = [] + vs_attached_client_ssl = [] + vs_attached_server_ssl = [] + pool_attached_monitor = [] + if nsx_lb_config.get("LbVirtualServers", None): + for vs_name in virtual_services: + vs_list = list(filter(lambda vs: vs["display_name"] == vs_name, nsx_lb_config['LbVirtualServers'])) + if vs_list: + for vs in vs_list: + if not vs["_system_owned"]: + if vs.get("pool_path"): + vs_attached_pools.append(vs['pool_path'].split('/')[-1]) + if vs.get("sorry_pool_path"): + vs_attached_pools.append(vs['sorry_pool_path'].split('/')[-1]) + if vs.get("lb_persistence_profile_path"): + vs_attached_persis.append(vs['lb_persistence_profile_path'].split('/')[-1]) + if vs.get("application_profile_path"): + vs_attached_profiles.append(vs['application_profile_path'].split('/')[-1]) + if vs.get("client_ssl_profile_binding"): + if vs['client_ssl_profile_binding'].get('ssl_profile_path'): + vs_attached_client_ssl.append( + vs['client_ssl_profile_binding']['ssl_profile_path'].split('/')[-1]) + if vs.get("server_ssl_profile_binding"): + if vs['server_ssl_profile_binding'].get('ssl_profile_path'): + vs_attached_server_ssl.append( + vs['server_ssl_profile_binding']['ssl_profile_path'].split('/')[-1]) + + self.nsx_api_client.infra.LbVirtualServers.delete(vs["id"]) + else: + self.vs_not_found.append(vs_name) + + nsx_lb_config = self.nsx_util.get_nsx_config() + + for persis_id in vs_attached_persis: + if nsx_lb_config.get("LbPersistenceProfiles", None): + persis_config = \ + list(filter(lambda pp: pp["id"] == persis_id, nsx_lb_config['LbPersistenceProfiles'])) + if not persis_config[0]["_system_owned"]: + vs_list = [vs["id"] for vs in nsx_lb_config["LbVirtualServers"] if + (vs.get("lb_persistence_profile_path") and + vs.get("lb_persistence_profile_path").split("/")[-1] == persis_id)] + if not vs_list: + self.nsx_api_client.infra.LbPersistenceProfiles.delete(persis_id) + for s_ssl_id in vs_attached_server_ssl: + if nsx_lb_config.get("LbServerSslProfiles", None): + s_ssl_config = \ + list(filter(lambda s_ssl: s_ssl["id"] == s_ssl_id, nsx_lb_config['LbServerSslProfiles'])) + if not s_ssl_config[0]["_system_owned"]: + vs_list = [vs["id"] for vs in nsx_lb_config["LbVirtualServers"] if + (vs.get("server_ssl_profile_binding") and + vs['server_ssl_profile_binding'].get('ssl_profile_path') and + vs['server_ssl_profile_binding']['ssl_profile_path'].split("/")[-1] == s_ssl_id)] + if not vs_list: + self.nsx_api_client.infra.LbServerSslProfiles.delete(s_ssl_id) + + for c_ssl_id in vs_attached_client_ssl: + if nsx_lb_config.get("LbClientSslProfiles", None): + c_ssl_config = \ + list(filter(lambda c_ssl: c_ssl["id"] == c_ssl_id, nsx_lb_config['LbClientSslProfiles'])) + if not c_ssl_config[0]["_system_owned"]: + vs_list = [vs["id"] for vs in nsx_lb_config["LbVirtualServers"] if + (vs.get("client_ssl_profile_binding") and + vs['client_ssl_profile_binding'].get('ssl_profile_path') and + vs['client_ssl_profile_binding']['ssl_profile_path'].split("/")[-1] == c_ssl_id)] + if not vs_list: + self.nsx_api_client.infra.LbClientSslProfiles.delete(c_ssl_id) + + for pr_id in vs_attached_profiles: + if nsx_lb_config.get("LbAppProfiles", None): + pr_config = \ + list(filter(lambda pr: pr["id"] == pr_id, nsx_lb_config['LbAppProfiles'])) + if not pr_config[0]["_system_owned"]: + vs_list = [vs["id"] for vs in nsx_lb_config["LbVirtualServers"] if + (vs.get("application_profile_path") and + vs.get("application_profile_path").split("/")[-1] == pr_id)] + if not vs_list: + self.nsx_api_client.infra.LbAppProfiles.delete(pr_id) + + for pool_id in vs_attached_pools: + if nsx_lb_config.get("LbAppProfiles", None): + pool_config = \ + list(filter(lambda pr: pr["id"] == pool_id, nsx_lb_config['LbPools'])) + if not pool_config[0]["_system_owned"]: + vs_list = [vs["id"] for vs in nsx_lb_config["LbVirtualServers"] if + (vs.get("pool_path") and + vs.get("pool_path").split("/")[-1] == pool_id)] + vs_sr_pool_list = [vs["id"] for vs in nsx_lb_config["LbVirtualServers"] if + (vs.get("sorry_pool_path") and + vs.get("sorry_pool_path").split("/")[-1] == pool_id)] + if not vs_list and not vs_sr_pool_list: + if pool_config[0].get("active_monitor_paths"): + active_monitor_list = pool_config[0].get("active_monitor_paths") + for monitor in active_monitor_list: + pool_attached_monitor.append(monitor.split('/')[-1]) + self.nsx_api_client.infra.LbPools.delete(pool_id) + nsx_lb_config = self.nsx_util.get_nsx_config() + for monitor_id in pool_attached_monitor: + if nsx_lb_config.get("LbMonitorProfiles", None): + monitor_config = \ + list(filter(lambda pr: pr["id"] == monitor_id, nsx_lb_config['LbMonitorProfiles'])) + if not monitor_config[0]["_system_owned"]: + pool_list = [] + for pool in nsx_lb_config["LbPools"]: + monitor_list = pool.get("active_monitor_paths", None) + if monitor_list: + pool_temp = [True for monitor in monitor_list if + monitor.split('/')[-1] == monitor_id] + if pool_temp: + pool_list.append(pool["id"]) + + if not pool_list: + self.nsx_api_client.infra.LbMonitorProfiles.delete(monitor_id) diff --git a/python/avi/migrationtools/nsxt_converter/nsxt_client.py b/python/avi/migrationtools/nsxt_converter/nsxt_client.py index 486ab5b58f..a5b67f747c 100644 --- a/python/avi/migrationtools/nsxt_converter/nsxt_client.py +++ b/python/avi/migrationtools/nsxt_converter/nsxt_client.py @@ -2,7 +2,6 @@ import requests -from com.vmware import nsx_client from com.vmware import nsx_policy_client from vmware.vapi.bindings.stub import ApiClient from vmware.vapi.bindings.stub import StubFactory @@ -22,10 +21,9 @@ def get_basic_auth_stub_config(user, password, nsx_host, tcp_port=443): session = requests.session() # Since the NSX manager default certificate is self-signed, - # we disable verification. This is dangerous and real code + # we deactivate verification. This is dangerous and real code # should verify that it is talking to a valid server. session.verify = False - requests.packages.urllib3.disable_warnings() nsx_url = 'https://%s:%s' % (nsx_host, tcp_port) connector = connect.get_requests_connector( @@ -41,7 +39,7 @@ def get_basic_auth_stub_config(user, password, nsx_host, tcp_port=443): def get_basic_auth_api_client(user, password, nsx_host, tcp_port=443): stub_config = get_basic_auth_stub_config( user, password, nsx_host, tcp_port) - stub_factory = nsx_client.StubFactory(stub_config) + stub_factory = nsx_policy_client.StubFactory(stub_config) return ApiClient(stub_factory) @@ -54,10 +52,9 @@ def get_session_auth_stub_config(user, password, nsx_host, tcp_port=443): session = requests.session() # Since the NSX manager default certificate is self-signed, - # we disable verification. This is dangerous and real code + # we deactivate verification. This is dangerous and real code # should verify that it is talking to a valid server. session.verify = False - requests.packages.urllib3.disable_warnings() nsx_url = 'https://%s:%s' % (nsx_host, tcp_port) resp = session.post(nsx_url + "/api/session/create", data={"j_username": user, "j_password": password}) @@ -78,7 +75,7 @@ def get_session_auth_stub_config(user, password, nsx_host, tcp_port=443): def get_session_auth_api_client(user, password, nsx_host, tcp_port=443): stub_config = get_session_auth_stub_config( user, password, nsx_host, tcp_port) - stub_factory = nsx_client.StubFactory(stub_config) + stub_factory = nsx_policy_client.StubFactory(stub_config) return ApiClient(stub_factory) @@ -96,11 +93,11 @@ def create_api_client(stub_factory_class, user, password, nsx_host, def create_nsx_api_client(user, password, nsx_host, tcp_port=443, auth_type=BASIC_AUTH): - return create_api_client(nsx_client, user, password, nsx_host, + return create_api_client(nsx_policy_client, user, password, nsx_host, tcp_port, auth_type) -def create_nsx_policy_api_client(user, password, nsx_host, tcp_port=443, - auth_type=BASIC_AUTH): - return create_api_client(nsx_policy_client, user, password, nsx_host, - tcp_port, auth_type) +def create_nsx_policy_api_client( + user, password, nsx_host, tcp_port=443, auth_type=BASIC_AUTH): + return create_api_client( + nsx_policy_client, user, password, nsx_host, tcp_port, auth_type) diff --git a/python/avi/migrationtools/nsxt_converter/nsxt_config_converter.py b/python/avi/migrationtools/nsxt_converter/nsxt_config_converter.py old mode 100644 new mode 100755 index 0bc6516cbf..cd58c67d14 --- a/python/avi/migrationtools/nsxt_converter/nsxt_config_converter.py +++ b/python/avi/migrationtools/nsxt_converter/nsxt_config_converter.py @@ -1,40 +1,200 @@ - -import sys -from avi.migrationtools.nsxt_converter import nsxt_client as nsx_client_util -from avi.migrationtools.nsxt_converter import monitor_converter -from vmware.vapi.bindings.struct import PrettyPrinter -from com.vmware.vapi.std.errors_client import NotFound -from com.vmware.nsx.loadbalancer_client import Pools -import com.vmware.nsx_policy.infra_client as infra_client -import com.vmware.nsx_policy.model_client as model_client -import random -from com.vmware.vapi.std.errors_client import Error +import json +import logging +import os +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +from avi.migrationtools.avi_migration_utils import update_count +from avi.migrationtools.avi_orphan_object import wipe_out_not_in_use +from avi.migrationtools.nsxt_converter import conversion_util +from avi.migrationtools.nsxt_converter.conversion_util import NsxtConvUtil +from avi.migrationtools.nsxt_converter.monitor_converter \ + import MonitorConfigConv from avi.migrationtools.nsxt_converter.nsxt_util import NSXUtil import os import json +from avi.migrationtools.nsxt_converter.alb_converter import ALBConverter +import avi.migrationtools.nsxt_converter.converter_constants as conv_const + +from avi.migrationtools.nsxt_converter.vs_converter import VsConfigConv +from avi.migrationtools.nsxt_converter.persistant_converter import PersistantProfileConfigConv + +from avi.migrationtools.nsxt_converter.pools_converter import PoolConfigConv +from avi.migrationtools.nsxt_converter.profile_converter \ + import ProfileConfigConv +from avi.migrationtools.nsxt_converter.ssl_profile_converter \ + import SslProfileConfigConv +LOG = logging.getLogger(__name__) -def convert(nsx_ip, nsx_un, nsx_pw, nsx_port, output_dir): - nsx_util = NSXUtil(nsx_un, nsx_pw, nsx_ip, nsx_port) - nsx_lb_config = nsx_util.get_nsx_config() - input_path = output_dir + os.path.sep + nsx_ip + os.path.sep + "input" - if not os.path.exists(input_path): - os.makedirs(input_path) +conv_utils = NsxtConvUtil() + +merge_object_mapping = { + 'ssl_profile': {'no': 0}, + 'app_profile': {'no': 0}, + 'network_profile': {'no': 0}, + 'app_per_profile': {'no': 0}, + 'pki_profile': {'no': 0}, + 'health_monitor': {'no': 0}, + 'ssl_cert_key': {'no': 0}, + 'ip_group': {'no': 0} +} + + +def convert(nsx_lb_config, input_path, output_path, tenant, prefix, + migrate_to, object_merge_check, controller_version, ssh_root_password, migration_input_config=None, + vs_state=False, vs_level_status=False, vrf=None, + segroup=None, not_in_use=True, custom_mapping=None, traffic_enabled=False,cloud_tenant="admin", + nsxt_ip=None, nsxt_passord=None): + + # load the yaml file attribute in nsxt_attributes. + nsxt_attributes = conv_const.init() input_config = input_path + os.path.sep + "config.json" with open(input_config, "w", encoding='utf-8') as text_file: json.dump(nsx_lb_config, text_file, indent=4) + avi_config_dict = dict() # Result Config + sys_dict = {} + + try: + merge_object_type = ['ApplicationProfile', 'NetworkProfile', + 'SSLProfile', 'PKIProfile', 'SSLKeyAndCertificate', + 'ApplicationPersistenceProfile', 'HealthMonitor', + 'IpAddrGroup'] + for key in merge_object_type: + sys_dict[key] = [] + avi_config_dict[key] = [] - alb_config = dict() # Result Config + monitor_converter = MonitorConfigConv(nsxt_attributes, object_merge_check, merge_object_mapping, sys_dict) + monitor_converter.convert(avi_config_dict, nsx_lb_config, prefix,tenant,custom_mapping) - monitor_converter.convert(alb_config, nsx_lb_config) + pool_converter = PoolConfigConv(nsxt_attributes, object_merge_check, merge_object_mapping, sys_dict) + pool_converter.convert(avi_config_dict, nsx_lb_config, prefix, tenant) + + profile_converter = ProfileConfigConv(nsxt_attributes, object_merge_check, merge_object_mapping, sys_dict) + profile_converter.convert(avi_config_dict, nsx_lb_config, prefix,tenant) + + ssl_profile_converter = SslProfileConfigConv(nsxt_attributes, object_merge_check, merge_object_mapping, sys_dict) + ssl_profile_converter.convert(avi_config_dict, nsx_lb_config, prefix,tenant) + + persist_conv = PersistantProfileConfigConv(nsxt_attributes, object_merge_check, merge_object_mapping, sys_dict) + persist_conv.convert(avi_config_dict, nsx_lb_config, prefix,tenant) + + vs_converter = VsConfigConv(nsxt_attributes,object_merge_check, merge_object_mapping,sys_dict, + nsxt_ip, nsxt_passord) + vs_converter.convert(avi_config_dict, nsx_lb_config, prefix, + tenant, vs_state, controller_version, traffic_enabled, cloud_tenant, ssh_root_password, + migration_input_config, + vrf, segroup) + + # Validating the aviconfig after generation + conv_utils.validation(avi_config_dict) + except Exception as e: + LOG.error(e) + update_count('warning') + LOG.error("Conversion error", exc_info=True) - output_path = output_dir + os.path.sep + nsx_ip + os.path.sep + "output" - if not os.path.exists(output_path): - os.makedirs(output_path) output_config = output_path + os.path.sep + "avi_config.json" + # with open(output_config, "w", encoding='utf-8') as text_file: + # json.dump(avi_config_dict, text_file, indent=4) + + # Add nsxt converter status report in xslx report + conv_utils.add_complete_conv_status( + output_path, avi_config_dict, "nsxt-report", vs_level_status) + + for key in avi_config_dict: + if key != 'META': + if key == 'VirtualService': + if vs_level_status: + LOG.info('Total Objects of %s : %s (%s migrated , %s full conversions)' + % (key,len(nsx_lb_config['LbVirtualServers']), len(avi_config_dict[key]), + conversion_util.fully_migrated)) + print('Total Objects of %s : %s (%s migrated , %s full conversions)' \ + % (key, len(nsx_lb_config['LbVirtualServers']), len(avi_config_dict[key]), + conversion_util.fully_migrated)) + else: + LOG.info('Total Objects of %s : %s (%s migrated)' + % (key, len(nsx_lb_config['LbVirtualServers']), len(avi_config_dict[key]))) + print('Total Objects of %s : %s (%s migrated)' \ + % (key,len(nsx_lb_config['LbVirtualServers']), len(avi_config_dict[key]))) + + continue + # Added code to print merged count. + elif object_merge_check and key == 'SSLProfile': + mergedfile = len(avi_config_dict[key]) - ssl_profile_converter.ssl_profile_count + profile_merged_message = \ + 'Total Objects of %s : %s (%s/%s profile merged)' % \ + (key, len(avi_config_dict[key]), abs(mergedfile), + ssl_profile_converter.ssl_profile_count) + LOG.info(profile_merged_message) + print(profile_merged_message) + continue + elif object_merge_check and key == 'HealthMonitor': + mergedmon = len(avi_config_dict[key]) - monitor_converter.monitor_count + monitor_merged_message = \ + 'Total Objects of %s : %s (%s/%s monitor merged)' % \ + (key, len(avi_config_dict[key]), abs(mergedmon), + monitor_converter.monitor_count) + LOG.info(monitor_merged_message) + print(monitor_merged_message) + continue + elif object_merge_check and key == 'ApplicationProfile': + merged_app_pr = len(avi_config_dict[key]) - profile_converter.app_pr_count + app_profile_merged_message = \ + 'Total Objects of %s : %s (%s/%s profile merged)' % \ + (key, len(avi_config_dict[key]), abs(merged_app_pr), + profile_converter.app_pr_count) + LOG.info(app_profile_merged_message) + print(app_profile_merged_message) + continue + elif object_merge_check and key == 'NetworkProfile': + merged_np_pr = len(avi_config_dict[key]) - profile_converter.np_pr_count + merged_message = \ + 'Total Objects of %s : %s (%s/%s profile merged)' % \ + (key, len(avi_config_dict[key]), abs(merged_np_pr), + profile_converter.np_pr_count) + LOG.info(merged_message) + print(merged_message) + continue + elif object_merge_check and key == 'ApplicationPersistenceProfile': + mergedfile = len(avi_config_dict[key]) - \ + persist_conv.app_per_count + profile_merged_message = \ + 'Total Objects of %s : %s (%s/%s profile merged)' % \ + (key, len(avi_config_dict[key]), abs(mergedfile), + persist_conv.app_per_count) + LOG.info(profile_merged_message) + print(profile_merged_message) + continue + elif object_merge_check and key == 'PKIProfile': + merged_pki_pr = len(avi_config_dict[key]) - (vs_converter.pki_count + monitor_converter.pki_count) + merged_message = \ + 'Total Objects of %s : %s (%s/%s pki profile merged)' % \ + (key, len(avi_config_dict[key]), abs(merged_pki_pr), + vs_converter.pki_count+monitor_converter.pki_count) + LOG.info(merged_message) + print(merged_message) + continue + elif object_merge_check and key == 'SSLKeyAndCertificate': + mergedfile = len(avi_config_dict[key]) - \ + (vs_converter.certkey_count + monitor_converter.certkey_count) + certkey_merged_message = \ + 'Total Objects of %s : %s (%s/%s cert key merged)' % \ + (key, len(avi_config_dict[key]), abs(mergedfile), + vs_converter.certkey_count) + LOG.info(certkey_merged_message) + print(certkey_merged_message) + continue + LOG.info('Total Objects of %s : %s' % (key, len( + avi_config_dict[key]))) + print('Total Objects of %s : %s' % (key, len( + avi_config_dict[key]))) + + # Check if flag true then skip not in use object + if not_in_use: + avi_config_dict = wipe_out_not_in_use(avi_config_dict) with open(output_config, "w", encoding='utf-8') as text_file: - json.dump(alb_config, text_file, indent=4) + json.dump(avi_config_dict, text_file, indent=4) + if migrate_to == 'NSX': + alb_converter = ALBConverter(output_config, output_path) + alb_converter.convert() - pp = PrettyPrinter() - pp.pprint(alb_config) \ No newline at end of file + return avi_config_dict diff --git a/python/avi/migrationtools/nsxt_converter/nsxt_converter.py b/python/avi/migrationtools/nsxt_converter/nsxt_converter.py old mode 100644 new mode 100755 index f5e9d2246f..d66b8f9bde --- a/python/avi/migrationtools/nsxt_converter/nsxt_converter.py +++ b/python/avi/migrationtools/nsxt_converter/nsxt_converter.py @@ -1,39 +1,394 @@ # !/usr/bin/env python3 +import json +import logging +import os +import sys +from datetime import datetime -from avi.migrationtools.nsxt_converter import nsxt_config_converter +import copy +import yaml + +from avi.migrationtools import avi_rest_lib +from avi.migrationtools.ansible.ansible_config_converter import \ + AviAnsibleConverterMigration +from avi.migrationtools.avi_converter import AviConverter +from avi.migrationtools.avi_migration_utils import get_count +from avi.migrationtools.avi_orphan_object import wipe_out_not_in_use +from avi.migrationtools.nsxt_converter import nsxt_config_converter, vs_converter import argparse +from avi.migrationtools.nsxt_converter.nsxt_util import NSXUtil +from avi.migrationtools.nsxt_converter.vs_converter import vs_list_with_snat_deactivated, vs_data_path_not_work, \ + vs_with_no_cloud_configured, vs_with_lb_skipped -def conver_lb_config(args): - output_file_path = args.output_file_path if args.output_file_path else 'output' - nsxt_config_converter.convert(args.nsxt_ip, args.nsxt_user, args.nsxt_passord, args.nsxt_port, output_file_path) +ARG_CHOICES = { + 'option': ['cli-upload', 'auto-upload'], + 'migrate_option': ['Avi', 'NSX'], + 'vs_state': ['enable', 'deactivate'] +} +LOG = logging.getLogger(__name__) +class NsxtConverter(AviConverter): + def __init__(self, args): + ''' -if __name__ == "__main__": + :param args: + ''' + self.nsxt_ip = args.nsxt_ip + self.nsxt_user = args.nsxt_user + self.nsxt_passord = args.nsxt_password + self.nsxt_port = args.nsxt_port + self.prefix = args.prefix + self.controller_ip = args.alb_controller_ip + self.user = args.alb_controller_user + self.password = args.alb_controller_password + self.tenant = args.alb_controller_tenant + self.not_in_use = args.not_in_use + self.ansible_skip_types = args.ansible_skip_types + self.controller_version = args.alb_controller_version + self.ansible_filter_types = args.ansible_filter_types + self.vs_level_status = args.vs_level_status + self.output_file_path = args.output_file_path if args.output_file_path \ + else 'output' + self.option = args.option + self.ansible = args.ansible + self.object_merge_check = args.no_object_merge + self.vs_state = args.vs_state + self.vs_filter = args.vs_filter + self.segroup = args.segroup + self.patch = args.patch + self.traffic_enabled = args.traffic_enabled + self.default_params_file = args.default_params_file + self.cloud_tenant = args.cloud_tenant + self.ssh_root_password = args.ssh_root_password + + def conver_lb_config(self, args): + + if not os.path.exists(self.output_file_path): + os.mkdir(self.output_file_path) + self.init_logger_path() + output_dir = os.path.normpath(self.output_file_path) + + is_download_from_host = False + args_copy = copy.deepcopy(args) + vars(args_copy).pop('nsxt_password') + vars(args_copy).pop('alb_controller_password') + output_path = None + if self.nsxt_ip: + output_path = output_dir + os.path.sep + self.nsxt_ip + os.path.sep + "output" + if not os.path.exists(output_path): + os.makedirs(output_path) + input_path = output_dir + os.path.sep + self.nsxt_ip + os.path.sep + "input" + if not os.path.exists(input_path): + os.makedirs(input_path) + is_download_from_host = True + else: + output_path = output_dir + os.path.sep + "config-output" + os.path.sep + "output" + if not os.path.exists(output_path): + os.makedirs(output_path) + input_path = output_dir + os.path.sep + "config-output" + os.path.sep + "input" + if not os.path.exists(input_path): + os.makedirs(input_path) + with open(output_path + os.path.sep + "state.json", 'w') as f: + f.write("%s" % json.dumps(vars(args_copy))) + nsx_lb_config = None + if is_download_from_host: + LOG.debug("Copying files from host") + print("Copying Files from Host...") + nsx_util = NSXUtil(self.nsxt_user, self.nsxt_passord, self.nsxt_ip, self.nsxt_port \ + , self.controller_ip, self.user, self.password, self.controller_version) + nsx_util.get_inventory() + nsx_util.get_pool_details() + nsx_util.write_output( + output_path, args.nsxt_ip) + nsx_lb_config = nsx_util.get_nsx_config() + LOG.debug("Copied input files") + + migration_input_config = None + if self.default_params_file: + try: + default_params_file = open(self.default_params_file, "r") + migration_input_config = default_params_file.read() + migration_input_config = json.loads(migration_input_config) + except: + print("\033[93m" + "Warning: Default parameter config file not found" + "\033[0m") + sys.exit() + + if not nsx_lb_config: + print('Not found NSX configuration file') + return + + if not self.cloud_tenant: + self.cloud_tenant = "admin" + alb_config = nsxt_config_converter.convert( + nsx_lb_config, input_path, output_path, self.tenant, + self.prefix, None, self.object_merge_check, self.controller_version, self.ssh_root_password, + migration_input_config, + self.vs_state, + self.vs_level_status, None, self.segroup, self.not_in_use, None, + self.traffic_enabled, self.cloud_tenant, + self.nsxt_ip, self.nsxt_passord) + + avi_config = self.process_for_utils(alb_config) + # Check if flag true then skip not in use object + # if self.not_in_use: + # avi_config = wipe_out_not_in_use(avi_config) + # output_path = (output_dir + os.path.sep + self.nsxt_ip + os.path.sep + + # "output") + self.write_output(avi_config, output_path, 'avi_config.json') + if self.ansible: + self.convert(avi_config, output_path) + if self.option == 'auto-upload': + self.upload_config_to_controller(avi_config) + if self.vs_filter: + filtered_vs_list=[] + virtual_services=[] + if self.vs_filter and type(self.vs_filter) == str: + virtual_services = self.vs_filter.split(',') + elif self.vs_filter and type(self.vs_filter) == list: + virtual_services = self.vs_filter + for vs_name in virtual_services: + if self.prefix: + if not vs_name.startswith(self.prefix): + vs_name = self.prefix + "-" + vs_name + filtered_vs_list.append(vs_name) + else: + filtered_vs_list = virtual_services + if vs_with_lb_skipped: + print_msg = "\033[93m"+"Warning: For following virtual service/s load balancer are skipped due to " \ + "unsupported LB configuration"+'\033[0m' + if self.vs_filter: + if list(set(vs_with_lb_skipped).intersection(set(filtered_vs_list))): + print(print_msg) + print(list(set(vs_with_lb_skipped).intersection(set(filtered_vs_list)))) + else: + print(print_msg) + print(vs_with_lb_skipped) + if vs_with_no_cloud_configured: + print_msg = "\033[93m"+"Warning: Following virtual service/s cloud is not configured"+'\033[0m' + if self.vs_filter: + if list(set(vs_with_no_cloud_configured).intersection(set(filtered_vs_list))): + print(print_msg) + print(list(set(vs_with_no_cloud_configured).intersection(set(filtered_vs_list)))) + else: + print(print_msg) + print(vs_with_no_cloud_configured) + if vs_list_with_snat_deactivated: + print_msg = '\033[93m' + "Warning: for following virtual service/s please follow steps giving in KB: " \ + "https://avinetworks.com/docs/21.1/migrating-nsx-transparent-lb-to-nsx-alb/" + \ + '\033[0m' + if self.vs_filter: + if list(set(vs_list_with_snat_deactivated).intersection(set(filtered_vs_list))): + print(print_msg) + print(list(set(vs_list_with_snat_deactivated).intersection(set(filtered_vs_list)))) + else: + print(print_msg) + print(vs_list_with_snat_deactivated) + if vs_data_path_not_work: + print_msg = "\033[93m"+"For following virtual service/s Data path won't work"+'\033[0m' + if self.vs_filter: + if list(set(vs_data_path_not_work).intersection(set(filtered_vs_list))): + print(print_msg) + print(list(set(vs_data_path_not_work).intersection(set(filtered_vs_list)))) + else: + print(print_msg) + print(vs_data_path_not_work) + print("Total Warning: ", get_count('warning')) + print("Total Errors: ", get_count('error')) + LOG.info("Total Warning: {}".format(get_count('warning'))) + LOG.info("Total Errors: {}".format(get_count('error'))) + + def upload_config_to_controller(self, alb_config): + avi_rest_lib.upload_config_to_controller( + alb_config, self.controller_ip, self.user, self.password, + self.tenant, self.controller_version) + + def convert(self, alb_config, output_path): + # output_path = (self.output_file_path + os.path.sep + self.nsxt_ip + + # os.path.sep + "output") + avi_traffic = AviAnsibleConverterMigration( + alb_config, output_path, self.prefix, self.not_in_use, + skip_types=self.ansible_skip_types, + controller_version=self.controller_version, + filter_types=self.ansible_filter_types) + avi_traffic.write_ansible_playbook( + self.nsxt_ip, self.nsxt_user, self.nsxt_passord, 'nsxt') + + +if __name__ == "__main__": HELP_STR = """ Usage: - python nsxt_converter.py -n 192.168.100.101 -u admin -p password + + Example to use -O or --option to auto upload config to controller after conversion: + nsxt_converter.py --option auto-upload + + Example to use -s or --vs_state option: + nsxt_converter.py -s enable + Usecase: Traffic enabled state of a VS after conversion to AVI (default value is disable). + + Example to use --alb_controller_version option: + nsxt_converter.py --alb_controller_version 21.1.4 + Usecase: To provide the version of controller for getting output in respective controller format. + + Example to use no object merge option: + nsxt_converter.py --no_object_merge + Usecase: When we don't need to merge two same object (based on their attribute values except name) + + Example to patch the config after conversion: + nsxt_converter.py --patch test/patch.yaml where patch.yaml file contains + : + - match_name: + patch: + name: + Usecase: Sample file test/patch.yaml + + Example to export a single VS: + nsxt_converter.py --vs_filter test_vs + + Example to skip avi object during playbook creation + nsxt_converter.py --ansible --ansible_skip_types DebugController + Usecase: + Comma separated list of Avi Object types to skip during conversion. + Eg. DebugController, ServiceEngineGroup will skip debugcontroller and + serviceengine objects + + Example to filter ansible object + nsxt_converter.py --ansible --ansible_filter_types virtualservice, pool + Usecase: + Comma separated list of Avi Objects types to include during conversion. + Eg. VirtualService , Pool will do ansible conversion only for + Virtualservice and Pool objects + + Example to use ansible option: + nsxt_converter.py --ansible + Usecase: To generate the ansible playbook for the avi configuration + which can be used for upload to controller + + Example to add the prefix to avi object name: + nsxt_converter.py --prefix abc + Usecase: When two configuration is to be uploaded to same controller then + in order to differentiate between the objects that will be uploaded in + second time. + + Example to use not_in_use option: + nsxt_converter.py --not_in_use + Usecase: Dangling object which are not referenced by any avi object will be removed + + Example to use vs level status option: + nsxt_converter.py --vs_level_status + Usecase: To get the vs level status for the avi objects in excel sheet + + Example to use segroup flag + nsxt_converter.py --segroup segroup_name + UseCase: To add / change segroup reference of vs + + Example to default param files + nsxt_converter.py --default_params_file test/default_params.json + UseCase: To set default parameters for migration. Sample file test/default_params.json + """ parser = argparse.ArgumentParser( - formatter_class=argparse.RawTextHelpFormatter, - description=HELP_STR) - - parser.add_argument('-n', '--nsxt_ip', required=True, - help='Ip of NSXT') - parser.add_argument('-u', '--nsxt_user', required=True, - help='NSX-T User name') - parser.add_argument('-p', '--nsxt_passord', required=True, + formatter_class=argparse.RawTextHelpFormatter, description=HELP_STR) + + # Create Ansible Script based on Flag + parser.add_argument('--ansible', + help='Flag for create ansible file', + action='store_true') + # Added command line args to take skip type for ansible playbook + parser.add_argument('--ansible_skip_types', + help='Comma separated list of Avi Object types to skip ' + 'during conversion.\n Eg. -s DebugController,' + 'ServiceEngineGroup will skip debugcontroller and ' + 'serviceengine objects') + # Added command line args to take skip type for ansible playbook + parser.add_argument('--ansible_filter_types', + help='Comma separated list of Avi Objects types to ' + 'include during conversion.\n Eg. -f ' + 'VirtualService, Pool will do ansible conversion ' + 'only for Virtualservice and Pool objects') + parser.add_argument('-c', '--alb_controller_ip', + help='controller ip for auto upload', required=True) + parser.add_argument('--alb_controller_version', + help='Target Avi controller version', default='21.1.4') + parser.add_argument('--alb_controller_user', + help='controller username for auto upload', required=True) + parser.add_argument('--alb_controller_password', + help='controller password for auto upload. Input ' + 'prompt will appear if no value provided') + parser.add_argument('-t', '--alb_controller_tenant', help='tenant name for auto upload', + default="admin") + parser.add_argument('--cloud_tenant', help="tenant for cloud ref") + parser.add_argument('-i', '--default_params_file', + help='absolute path for nsx-t default params file') + parser.add_argument('-n', '--nsxt_ip', + help='Ip of NSXT', required=True) + parser.add_argument('-u', '--nsxt_user', + help='NSX-T User name', required=True) + parser.add_argument('-p', '--nsxt_password', help='NSX-T Password') parser.add_argument('-port', '--nsxt_port', default=443, help='NSX-T Port') + parser.add_argument( '--ssh_root_password', + help='ssh root Password') + # Added not in use flag + parser.add_argument('--not_in_use', + help='Flag for skipping not in use object', + action="store_false") + parser.add_argument('--no_object_merge', + help='Flag for object merge', action='store_false') parser.add_argument('-o', '--output_file_path', help='Folder path for output files to be created in', ) + parser.add_argument('-O', '--option', choices=ARG_CHOICES['option'], + help='Upload option cli-upload generates Avi config ' + + 'file auto upload will upload config to ' + + 'controller') + # json file location and patch location + parser.add_argument('--patch', help='Run config_patch please provide ' + 'location of patch.yaml') + parser.add_argument('--prefix', help='Prefix for objects') + parser.add_argument('--segroup', + help='Update the available segroup ref with the ' + 'custom ref') + parser.add_argument('--traffic_enabled', + help='Traffic Enabled on all migrated VS VIPs', + action="store_true") + # Added command line args to execute vs_filter.py with vs_name. + parser.add_argument('--vs_filter', + help='comma separated names of virtualservices.\n' + 'Note: If patch data is supplied, vs_name should match ' + 'the new name given in it' + ) + parser.add_argument('--vs_level_status', action='store_true', + help='Add columns of vs reference and overall skipped ' + 'settings in status excel sheet') + parser.add_argument('-s', '--vs_state', choices=ARG_CHOICES['vs_state'], + help='State of created VS') - args = parser.parse_args() - conver_lb_config(args) + start = datetime.now() + args = parser.parse_args() + if not args.nsxt_password: + if os.environ.get('nsxt_password'): + args.nsxt_password = os.environ.get('nsxt_password') + else: + print("\033[91m"+'ERROR: please provide nsxt password either through environment variable or as a script parameter'+"\033[0m") + exit() + if not args.alb_controller_password: + if os.environ.get('alb_controller_password'): + args.alb_controller_password= os.environ.get('alb_controller_password') + else: + print('\033[91m'+'ERROR: please provide alb_controller_password either through environment variable or as a script parameter'+"\033[0m") + exit() + if not args.ssh_root_password: + if os.environ.get('ssh_root_password'): + args.ssh_root_password = os.environ.get('ssh_root_password') + nsxt_converter = NsxtConverter(args) + nsxt_converter.conver_lb_config(args) + end = datetime.now() + print("The time of execution of above program is :", + str(end - start)) diff --git a/python/avi/migrationtools/nsxt_converter/nsxt_util.py b/python/avi/migrationtools/nsxt_converter/nsxt_util.py old mode 100644 new mode 100755 index 2f87478867..a6a8b67dbf --- a/python/avi/migrationtools/nsxt_converter/nsxt_util.py +++ b/python/avi/migrationtools/nsxt_converter/nsxt_util.py @@ -1,22 +1,746 @@ +import ipaddress +import os +from datetime import datetime +import random + +import copy +import xlsxwriter +import logging from avi.migrationtools.nsxt_converter import nsxt_client as nsx_client_util +import pprint +from avi.sdk.avi_api import ApiSession -class NSXUtil(): +pp = pprint.PrettyPrinter(indent=4) +vs_details = {} +controller_details = {} + +LOG = logging.getLogger(__name__) + + +def is_segment_configured_with_subnet(vs_id, cloud_name): + vs_config = vs_details[vs_id] + network_type = vs_config["Network"] + if network_type == "Vlan": + segment_list = vs_config.get("Segments") + if segment_list: + for vs_segment in segment_list: + seg_id = vs_segment["name"] + session = ApiSession.get_session(controller_details.get("ip"), controller_details.get("username"), + controller_details.get("password"), tenant="admin", + api_version=controller_details.get("version")) + cloud = session.get("cloud/").json()["results"] + cloud_id = [cl.get("uuid") for cl in cloud if cl.get("name") == cloud_name] + segment_list = session.get("network/?&cloud_ref.uuid=" + cloud_id[0]).json()["results"] + segment = [seg for seg in segment_list if seg.get("name") == seg_id] + if segment and segment[0].get("configured_subnets"): + if segment[0].get("configured_subnets")[0].get("prefix"): + if segment[0].get("configured_subnets")[0].get("static_ip_ranges"): + return True, segment[0], network_type, "Both are configured" + else: + return False, segment[0], network_type, "static ip pool is not configured" + else: + return False, segment[0], network_type, "ip subnet is not configured" + return False, None, network_type, "overlay" + + +def is_vlan_configured_with_bgp(cloud_name, tenant, vlan_segment): + session = ApiSession.get_session(controller_details.get("ip"), controller_details.get("username"), + controller_details.get("password"), tenant="admin", + api_version=controller_details.get("version")) + cloud = session.get("cloud/").json()["results"] + cloud_id = [cl.get("uuid") for cl in cloud if cl.get("name") == cloud_name] + """ + Check if VLAN network is configured as a BGP peer + https:///api/vrfcontext/?exclude=name.in&name.in=management&cloud_ref.uuid= + """ + network_info = session. \ + get_object_by_name('vrfcontext', 'global', + params={"exclude": "name.in", + "name.in": "management", + "cloud_ref.uuid": cloud_id[0]}) + # LOG.info("ALB Plugin : vlan_configured_with_bgp : {}".format(network_info)) + if network_info: + if network_info.get("bgp_profile"): + if network_info["bgp_profile"].get("peers"): + bgp_peers = network_info["bgp_profile"].get("peers") + is_vlan_bgp_peer = [peer for peer in bgp_peers if peer.get("network_ref") == vlan_segment.get("url")] + if is_vlan_bgp_peer: + return True + return False + + +def get_name_and_entity(url): + """ + Parses reference string to extract object type and + :param url: reference url to be parsed + :return: entity and object name + """ + if url: + parsed = url.split('/') + return parsed[-2], parsed[-1] + + return '', '' + + +def get_vs_cloud_name(vs_id): + if vs_details.get(vs_id): + return vs_details[vs_id]["Cloud"] + return None + +def get_vs_cloud_type(vs_id): + if vs_details.get(vs_id): + return vs_details[vs_id]["Network"] + return None + +def get_lb_service_name(vs_id): + if vs_details.get(vs_id): + return vs_details[vs_id]["lb_name"] + return None + + +def get_lb_skip_reason(vs_id): + if vs_details.get(vs_id): + return vs_details.get(vs_id).get("lb_skip_reason") + return None + +def get_object_segments(vs_id, obj_ip): + vs = vs_details.get(vs_id, None) + if not vs: + return None + segments = [] + if vs.get("Segments"): + seg_list = vs.get("Segments") + for seg in seg_list: + seg_name = seg["name"] + for subnet in seg["subnet"]: + if subnet.get("network_range"): + network_range = subnet["network_range"] + a_network = ipaddress.ip_network(network_range, False) + address_in_network = ipaddress.ip_address(obj_ip) in a_network + if address_in_network: + return [dict( + seg_name=seg_name, + subnets=subnet)] + return None + + +def get_certificate_data(certificate_ref, nsxt_ip, ssh_root_password): + import paramiko + import json + + ssh = paramiko.SSHClient() + ssh.load_system_host_keys() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(nsxt_ip, username='root', password=ssh_root_password, allow_agent=False, look_for_keys=False) + + data = None + cmd = "curl --header 'Content-Type: application/json' --header 'x-nsx-username: admin' " \ + "http://'admin':'{}'@127.0.0.1:7440/nsxapi/api/v1/trust-management/certificates".\ + format(ssh_root_password) + stdin, stdout, stderr = ssh.exec_command(cmd) + output_dict = '' + for line in stdout.read().splitlines(): + output_dict += line.decode() + + output_dict = json.loads(output_dict) + + LOG.info("output_dict for certificate_ref {}".format(certificate_ref)) + for cert_data in output_dict['results']: + if 'tags' in cert_data.keys(): + cert_id = cert_data['tags'][0]['tag'].split('/')[-1] + else: + cert_id = cert_data['id'] + + if cert_id == certificate_ref: + cert_command = cmd + "/" + cert_data['id'] + '/' + "?action=get_private" + cert_stdin, cert_stdout, cert_stderr = ssh.exec_command(cert_command) + cert_dict = '' + for line in cert_stdout.read().splitlines(): + cert_dict += line.decode() + + cert_dict = json.loads(cert_dict) + LOG.debug("cert_dict for certificate_ref {}".format(certificate_ref)) + if 'private_key' in cert_dict: + return cert_dict['private_key'], cert_dict['pem_encoded'] + + ssh.close() + stdin.close() + return data + + +class NSXUtil(): nsx_api_client = None + nsxt_ip = None + nsxt_pw = None - def __init__(self, nsx_un, nsx_pw, nsx_ip, nsx_port): + def __init__(self, nsx_un, nsx_pw, nsx_ip, nsx_port, c_ip=None, c_un=None, c_pw=None, c_vr=None): self.nsx_api_client = nsx_client_util.create_nsx_policy_api_client( - nsx_un, nsx_pw, nsx_ip, nsx_port, auth_type=nsx_client_util.SESSION_AUTH) + nsx_un, nsx_pw, nsx_ip, nsx_port, auth_type=nsx_client_util.BASIC_AUTH) + if c_ip and c_un and c_pw and c_vr: + self.session = ApiSession.get_session(c_ip, c_un, c_pw, tenant="admin", api_version=c_vr) + controller_details["ip"] = c_ip + controller_details["password"] = c_pw + controller_details["username"] = c_un + controller_details["version"] = c_vr + controller_details["session"] = self.session + + self.nsxt_ip = nsx_ip + self.nsxt_pw = nsx_pw + + self.cloud = self.session.get("cloud/").json()["results"] + self.avi_vs_object = [] + self.avi_object_temp = {} + self.avi_pool_object = [] + self.enabled_pool_list = [] + self.lb_services = {} def get_nsx_config(self): nsx_lb_config = dict() nsx_lb_config["LBServices"] = self.nsx_api_client.infra.LbServices.list().to_dict().get("results", []) - nsx_lb_config["LbMonitorProfiles"] = self.nsx_api_client.infra.LbMonitorProfiles.list().to_dict().get("results", []) + nsx_lb_config["LbMonitorProfiles"] = self.nsx_api_client.infra.LbMonitorProfiles.list().to_dict().get("results", + []) nsx_lb_config["LbPools"] = self.nsx_api_client.infra.LbPools.list().to_dict().get("results", []) + nsx_lb_config["LbAppProfiles"] = self.nsx_api_client.infra.LbAppProfiles.list().to_dict().get("results", []) + nsx_lb_config["LbClientSslProfiles"] = self.nsx_api_client.infra.LbClientSslProfiles.list().to_dict()["results"] + nsx_lb_config["LbServerSslProfiles"] = self.nsx_api_client.infra.LbServerSslProfiles.list().to_dict()["results"] + nsx_lb_config["LbPersistenceProfiles"] = self.nsx_api_client.infra.LbPersistenceProfiles.list().to_dict()[ + "results"] + nsx_lb_config['LbVirtualServers'] = self.nsx_api_client.infra.LbVirtualServers.list().to_dict().get('results', + []) return nsx_lb_config -if __name__ == "__main__": - nsx_util = NSXUtil('admin', 'Admin!23Admin', '10.168.204.70', '443') - nsx_lb_config = nsx_util.get_nsx_config() - print(nsx_lb_config) \ No newline at end of file + def nsx_cleanup(self): + nsx_lb_config = self.get_nsx_config() + if nsx_lb_config.get("LbVirtualServers", None): + for vs in nsx_lb_config["LbVirtualServers"]: + if not vs["_system_owned"]: + self.nsx_api_client.infra.LbVirtualServers.delete(vs["id"]) + + if nsx_lb_config.get("LbPersistenceProfiles", None): + for persis in nsx_lb_config["LbPersistenceProfiles"]: + if not persis["_system_owned"]: + self.nsx_api_client.infra.LbPersistenceProfiles.delete(persis["id"]) + if nsx_lb_config.get("LbServerSslProfiles", None): + for server_ssl in nsx_lb_config["LbServerSslProfiles"]: + if not server_ssl["_system_owned"]: + self.nsx_api_client.infra.LbServerSslProfiles.delete(server_ssl["id"]) + if nsx_lb_config.get("LbClientSslProfiles", None): + for client_ssl in nsx_lb_config["LbClientSslProfiles"]: + if not client_ssl["_system_owned"]: + self.nsx_api_client.infra.LbClientSslProfiles.delete(client_ssl["id"]) + if nsx_lb_config.get("LbAppProfiles", None): + for app in nsx_lb_config["LbAppProfiles"]: + if not app["_system_owned"]: + self.nsx_api_client.infra.LbAppProfiles.delete(app["id"]) + + if nsx_lb_config.get("LbPools", None): + for pool in nsx_lb_config["LbPools"]: + if not pool["_system_owned"]: + self.nsx_api_client.infra.LbPools.delete(pool["id"]) + + if nsx_lb_config.get("LbMonitorProfiles", None): + for monitor in nsx_lb_config["LbMonitorProfiles"]: + if not monitor["_system_owned"]: + self.nsx_api_client.infra.LbMonitorProfiles.delete(monitor["id"]) + + def cutover_vs(self, vs_list): + virtual_service = self.get_all_virtual_service() + + # Get list of all ALB VS's + self.alb_vs_list = dict() + self.alb_all_vs_list = self.session.get("virtualservice/").json()["results"] + for vs in self.alb_all_vs_list: + self.alb_vs_list[vs["name"]] = vs + + for nsxt_vs in virtual_service: + vs_body = self.nsx_api_client.infra.LbVirtualServers.get(nsxt_vs["id"]) + if (vs_list and nsxt_vs['display_name'] in vs_list) or (not vs_list) and not nsxt_vs["system_owned"]: + vs_body.lb_service_path = None + vs_body.enabled = False + self.nsx_api_client.infra.LbVirtualServers.update(nsxt_vs["id"], vs_body) + + for alb_vs in self.alb_vs_list: + if alb_vs == nsxt_vs["display_name"]: + vs_obj = self.alb_vs_list[alb_vs] + vs_obj["traffic_enabled"] = True + self.session.put("virtualservice/{}".format(vs_obj.get("uuid")), vs_obj) + break + + def rollback_vs(self, vs_list, input_data): + virtual_service = self.get_all_virtual_service() + + # Get list of all ALB VS's + self.alb_vs_list = dict() + self.alb_all_vs_list = self.session.get("virtualservice/").json()["results"] + for vs in self.alb_all_vs_list: + self.alb_vs_list[vs["name"]] = vs + + vs_lb_mapping_list = dict() + nsxt_vs_list = input_data['LbVirtualServers'] + for vs in nsxt_vs_list: + vs_lb_mapping_list['{}_{}'.format(vs["id"], vs["display_name"])] \ + = vs['lb_service_path'] + + for nsxt_vs in virtual_service: + vs_body = self.nsx_api_client.infra.LbVirtualServers.get(nsxt_vs["id"]) + if (vs_list and nsxt_vs["display_name"] in vs_list) or (not vs_list) and not nsxt_vs["system_owned"]: + lb_service_path = vs_lb_mapping_list.get("{}_{}".format(nsxt_vs["id"], + nsxt_vs["display_name"])) + vs_body.lb_service_path = lb_service_path + vs_body.enabled = True + self.nsx_api_client.infra.LbVirtualServers.update(nsxt_vs["id"], vs_body) + + for alb_vs in self.alb_vs_list: + if alb_vs == nsxt_vs["display_name"]: + vs_obj = self.alb_vs_list[alb_vs] + vs_obj["traffic_enabled"] = False + self.session.put("virtualservice/{}".format(vs_obj.get("uuid")), vs_obj) + break + + def get_cloud_type(self, avi_cloud_list, tz_id, seg_id): + is_seg_present = False + for cl in avi_cloud_list: + if cl.get("vtype") == "CLOUD_NSXT": + if cl.get("nsxt_configuration"): + if cl["nsxt_configuration"].get("transport_zone"): + tz = cl["nsxt_configuration"].get("transport_zone") + elif cl["nsxt_configuration"].get("data_network_config"): + tz = cl["nsxt_configuration"]["data_network_config"].get("transport_zone") + if cl["nsxt_configuration"]["data_network_config"].get("tz_type") == "OVERLAY": + tz_type = "OVERLAY" + data_netwrk = cl["nsxt_configuration"]["data_network_config"] + if data_netwrk.get("tier1_segment_config"): + if data_netwrk["tier1_segment_config"].get("manual"): + tier1_lrs = data_netwrk["tier1_segment_config"]["manual"].get("tier1_lrs") + if tier1_lrs: + is_seg_present = [True for tier in tier1_lrs if + get_name_and_entity(tier.get("segment_id"))[-1] == seg_id] + elif cl["nsxt_configuration"]["data_network_config"].get("tz_type") == "VLAN": + tz_type = "VLAN" + data_netwrk = cl["nsxt_configuration"]["data_network_config"] + vlan_seg = data_netwrk.get("vlan_segments") + is_seg_present = [True for seg in vlan_seg if get_name_and_entity(seg)[-1] == seg_id] + if tz.find("/") != -1: + tz = tz.split("/")[-1] + if tz == tz_id and is_seg_present: + return cl.get("name") + + return "Cloud Not Found" + + def get_lb_services_details(self): + lb_services = self.nsx_api_client.infra.LbServices.list().to_dict().get('results', []) + for lb in lb_services: + if not lb.get("connectivity_path"): + continue + tier = get_name_and_entity(lb["connectivity_path"])[-1] + ls_id = self.nsx_api_client.infra.tier_1s.LocaleServices.list(tier).results[0].id + interface_list = self.nsx_api_client.infra.tier_1s.locale_services.Interfaces.list(tier, ls_id).results + network = None + tz_id = None + cloud_name = None + lb_details = [] + if len(interface_list): + interface = interface_list[0].id + segment_id = get_name_and_entity(interface_list[0].segment_path)[-1] + segment = self.nsx_api_client.infra.Segments.get(segment_id) + tz_path = segment.transport_zone_path + tz_id = get_name_and_entity(tz_path)[-1] + cloud_name = self.get_cloud_type(self.cloud, tz_id, segment_id) + if hasattr(segment, "vlan_ids") and segment.vlan_ids: + network = "Vlan" + else: + network = "Overlay" + + if network == "Overlay" and len(interface_list) > 0: + self.lb_services[lb["id"]] = { + "lb_name": lb["id"], + "lb_skip_reason": "Overlay Network having Service Interfaces is not supported" + } + continue + + for intrf in interface_list: + segment_id = get_name_and_entity(intrf.segment_path)[-1] + subnets = [] + for subnet in intrf.subnets: + subnets.append({ + "network_range": (str(subnet.ip_addresses[0]) + "/" + str(subnet.prefix_len)) + }) + segments = { + "name": segment_id, + "subnet": subnets} + lb_details.append(segments) + + else: + segment_list = self.nsx_api_client.infra.Segments.list().to_dict().get('results', []) + for seg in segment_list: + if seg.get("connectivity_path"): + gateway_name = get_name_and_entity(seg["connectivity_path"])[-1] + if gateway_name == tier: + tz_path = seg.get("transport_zone_path") + tz_id = get_name_and_entity(tz_path)[-1] + cloud_name = self.get_cloud_type(self.cloud, tz_id, seg.get("id")) + if seg.get("vlan_ids"): + network = "Vlan" + else: + network = "Overlay" + if seg.get("subnets"): + subnets = [] + for subnet in seg["subnets"]: + subnets.append({ + "network_range": subnet["network"] + }) + segments = { + "name": seg.get("id"), + "subnet": subnets} + lb_details.append(segments) + if cloud_name == "Cloud Not Found": + continue + break + + if not (network and cloud_name): + self.lb_services[lb["id"]] = { + "lb_name": lb["id"], + "lb_skip_reason": "No segments or service interfaces configured" + } + continue + + self.lb_services[lb["id"]] = { + "lb_name": lb["id"], + "Network": network, + "Cloud": cloud_name, + } + if lb_details: + self.lb_services[lb["id"]]["Segments"] = lb_details + + def get_all_virtual_service(self): + """ + :return:list of virtual server objects + """ + virtual_services = self.nsx_api_client.infra.LbVirtualServers.list().to_dict().get('results', []) + return virtual_services + + def get_all_pool(self): + """ + returns the list of all pools + """ + pool = self.nsx_api_client.infra.LbPools.list().to_dict().get("results", []) + return pool + + def get_inventory(self): + self.get_lb_services_details() + # lb_vs_config = lb_vs_config["LbVirtualServers"] + virtual_service = self.get_all_virtual_service() + vs_stats = dict() + vs_with_rules = 0 + normal_vs = 0 + enab_vs = 0 + disab_vs = 0 + vs_stats["vs_count"] = len(virtual_service) + for vs in virtual_service: + vs_object = { + 'name': vs["display_name"], + 'id': vs["id"] + } + if vs.get("lb_service_path"): + lb = get_name_and_entity(vs["lb_service_path"])[-1] + lb_details = self.lb_services.get(lb) + if lb_details: + vs_object["Network"] = lb_details.get("Network") + vs_object["Cloud"] = lb_details.get("Cloud") + vs_object['Segments'] = lb_details.get('Segments') + vs_object["Cloud_type"] = lb_details.get("Cloud_type") + vs_object['lb_name'] = lb + vs_object['lb_skip_reason'] = lb_details.get("lb_skip_reason") + # lb_details["vs_name"] = vs["display_name"] + vs_details[vs["id"]] = vs_object + if vs["enabled"]: + vs_object["enabled"] = True + else: + vs_object["enabled"] = False + if vs.get('pool_path'): + pool = vs.get("pool_path") + pool_partition, pool_name = get_name_and_entity(pool) + if pool_name: + vs_object['pool'] = { + 'name': pool_name + } + self.enabled_pool_list.append(pool_name) + pool_obj = self.nsx_api_client.infra.LbPools.get(pool_name) + vs_object["pool"]["pool_id"] = pool_obj.id + if pool_obj.active_monitor_paths: + health_monitors = [ + get_name_and_entity(monitors)[1] + for monitors in pool_obj.active_monitor_paths + if monitors + ] + if health_monitors: + vs_object['pool']['health_monitors'] = \ + health_monitors + if pool_obj.members: + members = [ + { + 'name': pool_member.display_name, + 'address': pool_member.ip_address, + 'state': pool_member.admin_state + } + for pool_member in + pool_obj.members if pool_member + ] + if members: + vs_object['pool']['members'] = members + if vs_object["enabled"]: + vs_object['pool']["vs_enabled"] = vs_object["enabled"] + if vs.get("application_profile_path"): + profile_name = get_name_and_entity(vs["application_profile_path"])[1] + vs_object["profiles"] = profile_name + prof_obj_list = self.nsx_api_client.infra.LbAppProfiles.list().to_dict().get("results", []) + prof_obj = [prof for prof in prof_obj_list if prof["display_name"] == profile_name] + prof_type = prof_obj[0].get("resource_type") + if prof_type == "LBHttpProfile": + vs_type = "L7" + else: + vs_type = "L4" + vs_object["vs_type"] = vs_type + + if vs.get('rules'): + vs_object["rules"] = True + vs_with_rules += 1 + else: + vs_object["rules"] = False + normal_vs += 1 + if vs.get("enabled"): + enab_vs += 1 + else: + disab_vs += 1 + self.avi_object_temp[vs_object['id']] = vs_object + self.avi_vs_object.append(self.avi_object_temp) + vs_stats["complex_vs"] = vs_with_rules + vs_stats["normal_vs"] = normal_vs + vs_stats["enabled_vs"] = enab_vs + vs_stats["disabled_vs"] = disab_vs + + def get_pool_details(self): + temp_pool_list = {} + pool_list = self.get_all_pool() + for pool in pool_list: + pool_obj = { + 'name': pool["display_name"], + 'id': pool["id"] + } + if pool["display_name"] in self.enabled_pool_list: + pool_obj["enabled"] = "connected" + else: + pool_obj["disabled"] = "disconnected" + temp_pool_list[pool_obj["name"]] = pool_obj + self.avi_pool_object.append(temp_pool_list) + + def write_output(self, path, nsx_ip): + # Print the Summary + workbook = xlsxwriter.Workbook( + path + os.sep + '{}_discovery_data.xlsx'.format(nsx_ip)) + + bold = workbook.add_format({'bold': True}) + deactivated = workbook.add_format({'font_color': 'red'}) + enabled = workbook.add_format({'font_color': 'green'}) + + large_heading = workbook.add_format({'bold': True, 'font_size': '20'}) + large_heading.set_align('center') + + worksheet_summary = workbook.add_worksheet('Summary') + worksheet_summary.merge_range(3, 4, 3, 7, 'Summary', large_heading) + worksheet_summary.set_row(3, 40) + worksheet_summary.set_column(5, 6, width=24) + + worksheet_summary.write(5, 5, "Ip Address", bold) + worksheet_summary.write(5, 6, str(nsx_ip)) + + worksheet_summary.write(6, 5, "Created on", bold) + worksheet_summary.write(6, 6, str(datetime.now()).split('.')[0]) + + total_vs = total_pools = total_enabled_vs = total_enabled_pools = total_complex_vs = 0 + total_disabled_pools = 0 + total_disabled_vs = 0 + total_vs_in_vlan = 0 + total_vs_in_overlay = 0 + total_l4_vs = 0 + total_l7_vs = 0 + + obj_data = self.avi_vs_object[0] + total_input = self.avi_vs_object + pool_obj_data = self.avi_pool_object[0] + pool_list = [] + vs_list = [] + + for vs_id in obj_data.keys(): + total_vs = total_vs + 1 + vsval = obj_data[vs_id] + if vsval.get("rules"): + total_complex_vs += 1 + if vsval.get("vs_type") == "L4": + total_l4_vs += 1 + else: + total_l7_vs += 1 + if vsval.get('pool'): + if vsval['pool'].get('members'): + pool_details = vsval['pool']['members'][0] + pool_list.append({ + 'name': vsval["pool"]['name'], + 'status': pool_details.get('state'), + 'vs_enabled': vsval["enabled"], + "id": vsval["pool"]["pool_id"] + }) + else: + pool_list.append({ + 'name': vsval["pool"]['name'], + 'status': vsval["enabled"], + 'vs_enabled': vsval["enabled"], + "id": vsval["pool"]["pool_id"] + }) + + worksheet = workbook.add_worksheet('VS') + worksheet_pool = workbook.add_worksheet('Pools') + + # writing pools + row = 0 + col = 1 + worksheet_pool.write('A1', 'Name', bold) + worksheet_pool.write('B1', "Enabled", bold) + worksheet_pool.write('C1', 'Status', bold) + for pool in pool_obj_data: + total_pools += 1 + pool_val = pool_obj_data[pool] + row = row + 1 + worksheet_pool.write(row, 0, pool_val['name'], bold) + if pool_val.get("enabled"): + worksheet_pool.write(row, 1, pool_val['enabled'], enabled) + elif pool_val.get("disabled"): + worksheet_pool.write(row, 1, pool_val['disabled'], deactivated) + pool_status = self.nsx_api_client.infra.realized_state.RealizedEntities. \ + list(intent_path="/infra/lb-pools/" + pool_val["id"]).to_dict()["results"][0]["runtime_status"] + if pool_status == "UP": + worksheet_pool.write(row, 2, pool_status, enabled) + else: + worksheet_pool.write(row, 2, pool_status, deactivated) + if pool_status == "UP" and pool_val.get("enabled"): + total_enabled_pools += 1 + else: + total_disabled_pools += 1 + col += 1 + + row, col = 0, 1 + + # write vs details + worksheet.write('A1', 'Name', bold) + worksheet.write('B1', 'Enabled', bold) + worksheet.write('C1', "Type", bold) + worksheet.write('D1', "Complexity", bold) + worksheet.write('E1', 'Status', bold) + worksheet.write("F1", "Network", bold) + worksheet.write("G1", "Cloud", bold) + init = 0 + for vs_id in obj_data.keys(): + row += 1 + vsval = obj_data[vs_id] + vs_id = vsval["id"] + vs_name = vsval["name"] + worksheet.write(row, 0, vs_name, bold) + status = vsval["enabled"] + v = "N" + if status: + v = "Y" + worksheet.write(row, 1, v, enabled) + else: + worksheet.write(row, 1, v, deactivated) + worksheet.write(row, 2, vsval["vs_type"]) + complexity = "Basic" + if vsval.get("rules"): + complexity = "Advanced" + worksheet.write(row, 3, complexity) + vs_status = self.nsx_api_client.infra.realized_state.RealizedEntities. \ + list(intent_path="/infra/lb-virtual-servers/" + vs_id).to_dict()["results"][0]["runtime_status"] + if vs_status == "UP": + worksheet.write(row, 4, vs_status, enabled) + elif vs_status == "DISABLED": + worksheet.write(row, 4, "DEACTIVATED", deactivated) + else: + worksheet.write(row, 4, vs_status, deactivated) + if vs_status == "UP" and v == "Y": + total_enabled_vs += 1 + else: + total_disabled_vs += 1 + network = vsval.get("Network") + worksheet.write(row, 5, network) + if network == "Vlan": + total_vs_in_vlan += 1 + if network == "Overlay": + total_vs_in_overlay += 1 + cloud = vsval.get("Cloud") + worksheet.write(row, 6, cloud) + + # adding some more summary + worksheet_summary.write(9, 5, "Total vs", bold) + worksheet_summary.write(9, 6, str(total_vs)) + + worksheet_summary.write(10, 5, "Total vs UP", bold) + worksheet_summary.write(10, 6, str(total_enabled_vs)) + + worksheet_summary.write(11, 5, "Total pools", bold) + worksheet_summary.write(11, 6, str(total_pools)) + + worksheet_summary.write(12, 5, "Total pools UP", bold) + worksheet_summary.write(12, 6, str(total_enabled_pools)) + + worksheet_summary.write(13, 5, "Total complex vs", bold) + worksheet_summary.write(13, 6, str(total_complex_vs)) + + worksheet_summary.write(14, 5, "Total l4 vs", bold) + worksheet_summary.write(14, 6, str(total_l4_vs)) + + worksheet_summary.write(15, 5, "Total l7 vs", bold) + worksheet_summary.write(15, 6, str(total_l7_vs)) + + worksheet_summary.write(16, 5, "Total vs in VLAN", bold) + worksheet_summary.write(16, 6, str(total_vs_in_vlan)) + + worksheet_summary.write(17, 5, "Total vs in OVERLAY", bold) + worksheet_summary.write(17, 6, str(total_vs_in_overlay)) + + print("====================") + print(" Summary") + print("====================") + print("Total vs: ", total_vs) + print("Total vs UP: ", total_enabled_vs) + print("Total pools: ", total_pools) + print("Total pools UP: ", total_enabled_pools) + print("Total complex vs: ", total_complex_vs) + print("Total l4 vs: ", total_l4_vs) + print("Total l7 vs: ", total_l7_vs) + print("Total vs in VLAN", total_vs_in_vlan) + print("Total vs in OVERLAY", total_vs_in_overlay) + + print("--------------------") + + workbook.close() + + def upload_alb_config(self, alb_config): + if alb_config.get("alb-health-monitors"): + self.upload_monitor_alb_config(alb_config.get("alb-health-monitors")) + + def upload_monitor_alb_config(self, alb_hm_config): + + for hm in alb_hm_config: + is_create_hm = False + try: + hm_obj = self.nsx_api_client.infra.AlbHealthMonitors.get(hm["id"]) + print(hm_obj) + except Exception as e: + print(e) + is_create_hm = True + if is_create_hm: + try: + alb_hm_obj = self.nsx_api_client.infra.AlbHealthMonitors.update(hm["id"], hm) + print(alb_hm_obj) + except Exception as e: + print(e) diff --git a/python/avi/migrationtools/nsxt_converter/persistant_converter.py b/python/avi/migrationtools/nsxt_converter/persistant_converter.py new file mode 100644 index 0000000000..80e9070b3e --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/persistant_converter.py @@ -0,0 +1,203 @@ +import logging +import os + +from avi.migrationtools.avi_migration_utils import update_count +from avi.migrationtools.nsxt_converter.conversion_util import NsxtConvUtil +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +from avi.migrationtools.avi_migration_utils import MigrationUtil + +LOG = logging.getLogger(__name__) + +conv_utils = NsxtConvUtil() +common_avi_util = MigrationUtil() + + +class PersistantProfileConfigConv(object): + def __init__(self, nsxt_profile_attributes, object_merge_check, merge_object_mapping, sys_dict): + """ + + """ + self.supported_attr = nsxt_profile_attributes['PersistenceProfile_Supported_Attributes'] + self.supported_attr_cookie = nsxt_profile_attributes['CookiePersistenceProfile_Supported_Attributes'] + self.supported_attr_source = nsxt_profile_attributes['SourcePersistenceProfile_Supported_Attributes'] + self.common_na_attr = nsxt_profile_attributes['Common_Na_List'] + self.na_attr_source = nsxt_profile_attributes["SourcePersistenceProfile_NA_Attributes"] + self.indirect_attr_cookie = nsxt_profile_attributes["Persistence_indirect_cookie"] + self.persistence_na_attr = nsxt_profile_attributes["Persistence_na_attr"] + self.object_merge_check = object_merge_check + self.merge_object_mapping = merge_object_mapping + self.sys_dict = sys_dict + self.app_per_count = 0 + + def convert(self, alb_config, nsx_lb_config, prefix, tenant): + alb_config["ApplicationPersistenceProfile"] = list() + converted_objs = [] + skipped_list = [] + converted_alb_pp = [] + na_list = [] + indirect = [] + # Added variable to get total object count. + progressbar_count = 0 + total_size = len(nsx_lb_config['LbPersistenceProfiles']) + print("\nConverting Persistence Profile ...") + LOG.info('[ApplicationPersistenceProfile] Converting Profiles...') + for lb_pp in nsx_lb_config["LbPersistenceProfiles"]: + try: + LOG.info('[ApplicationPersistenceProfile] Migration started for {}'.format(lb_pp['display_name'])) + progressbar_count += 1 + if lb_pp['resource_type'] == 'LBGenericPersistenceProfile': + conv_utils.add_status_row('persistence', lb_pp['resource_type'], lb_pp['display_name'], + conv_const.STATUS_SKIPPED) + continue + tenant_name, name = conv_utils.get_tenant_ref(tenant) + if not tenant: + tenant = tenant_name + pp_type, name = self.get_name_type(lb_pp) + + if prefix: + name = prefix + '-' + name + if self.object_merge_check: + if name in self.merge_object_mapping['app_per_profile'].keys(): + name = name+"-"+lb_pp["id"] + else: + pp_temp = list(filter(lambda pp: pp["name"] == name, alb_config['ApplicationPersistenceProfile'])) + if pp_temp: + name = name + "-" + lb_pp["id"] + alb_pp = dict( + name=name + ) + skipped = [val for val in lb_pp.keys() + if val not in self.supported_attr] + + cookie_skipped_list, source_skipped_list = [], [] + if pp_type == "LBCookiePersistenceProfile": + na_attrs = [val for val in lb_pp.keys() + if val in self.common_na_attr or val in self.persistence_na_attr] + na_list.append(na_attrs) + skipped, cookie_skipped_list = self.convert_cookie(lb_pp, alb_pp, skipped, tenant) + elif pp_type == "LBSourceIpPersistenceProfile": + na_attrs = [val for val in lb_pp.keys() + if val in self.common_na_attr or val in self.na_attr_source + or val in self.persistence_na_attr] + na_list.append(na_attrs) + skipped = self.convert_source(lb_pp, alb_pp, skipped, tenant) + indirect = self.indirect_attr_cookie + + if cookie_skipped_list: + skipped.append(cookie_skipped_list) + if source_skipped_list: + skipped.append(source_skipped_list) + + skipped_list.append(skipped) + ## + if self.object_merge_check: + common_avi_util.update_skip_duplicates(alb_pp, + alb_config['ApplicationPersistenceProfile'], + 'app_per_profile', + converted_objs, name, None, self.merge_object_mapping, + pp_type, prefix, + self.sys_dict['ApplicationPersistenceProfile']) + self.app_per_count += 1 + else: + alb_config['ApplicationPersistenceProfile'].append(alb_pp) + + val = dict( + id = lb_pp["id"], + name=name, + resource_type=lb_pp['resource_type'], + alb_pp=alb_pp + + ) + converted_alb_pp.append(val) + ### + + msg = "ApplicationPersistenceProfile conversion started..." + conv_utils.print_progress_bar(progressbar_count, total_size, msg, + prefix='Progress', suffix='') + + LOG.info('[ApplicationPersistenceProfile] Migration completed for HM {}'.format(lb_pp['display_name'])) + + except Exception as e: + LOG.error( + "[ApplicationPersistenceProfile] Failed to convert ApplicationPersistenceProfile: {}".format(e)) + update_count('error') + LOG.error("[ApplicationPersistenceProfile] Failed to convert ApplicationPersistenceProfile: %s" % lb_pp[ + 'display_name'], + exc_info=True) + conv_utils.add_status_row('persistence', None, lb_pp['display_name'], + conv_const.STATUS_ERROR) + + u_ignore = [] + ignore_for_defaults = {} + for index, skipped in enumerate(skipped_list): + conv_status = conv_utils.get_conv_status( + skipped_list[index], indirect, ignore_for_defaults, nsx_lb_config['LbPersistenceProfiles'], + u_ignore, na_list[index]) + app_per_na_list = [val for val in na_list[index] if val not in self.common_na_attr] + conv_status["na_list"] = app_per_na_list + name = converted_alb_pp[index]['name'] + pp_id = converted_alb_pp[index]['id'] + alb_mig_pp = converted_alb_pp[index]['alb_pp'] + resource_type = converted_alb_pp[index]['resource_type'] + if self.object_merge_check: + alb_mig_pp = [pp for pp in alb_config['ApplicationPersistenceProfile'] if + pp.get('name') == self.merge_object_mapping['app_per_profile'].get(name)] + conv_utils.add_conv_status('persistence', resource_type, name, conv_status, + [{'app_per_profile': alb_mig_pp[0]}]) + else: + conv_utils.add_conv_status('persistence', resource_type, name, conv_status, + [{'app_per_profile': alb_mig_pp}]) + if len(conv_status['skipped']) > 0: + LOG.debug('[ApplicationPersistenceProfile] Skipped Attribute {}:{}'.format(name, + conv_status['skipped'])) + + def get_name_type(self, lb_pp): + """ + + """ + return lb_pp['resource_type'], lb_pp['display_name'] + + def convert_cookie(self, lb_pp, alb_pp, skipped, tenant): + http_cookie_persistence_profile = {} + skipped_list = [] + final_skiped_attr = [] + if lb_pp.get("cookie_name"): + http_cookie_persistence_profile["cookie_name"] = lb_pp.get("cookie_name") + + if lb_pp.get("cookie_time", None): + http_cookie_persistence_profile["timeout"] = lb_pp.get("cookie_time")['cookie_max_idle'] + for index, i in enumerate(skipped): + if i == "cookie_time": + del skipped[index] + _skipped = [key for key in lb_pp.get("cookie_time").keys() + if key not in self.supported_attr_cookie] + for keys in _skipped: + final_skiped_attr.append(keys) + + alb_pp['http_cookie_persistence_profile'] = http_cookie_persistence_profile + alb_pp['tenant_ref'] = conv_utils.get_object_ref( + tenant, 'tenant') + alb_pp['persistence_type'] = "PERSISTENCE_TYPE_HTTP_COOKIE" + if lb_pp.get("cookie_fallback"): + alb_pp["server_hm_down_recovery"] = "HM_DOWN_PICK_NEW_SERVER" + else: + alb_pp["server_hm_down_recovery"] = "HM_DOWN_CONTINUE_PERSISTENT_SERVER" + if final_skiped_attr: + skipped_list.append({lb_pp['display_name']: final_skiped_attr}) + skipped = [key for key in skipped if key not in self.supported_attr_cookie] + if lb_pp.get("cookie_mode", None) == "INSERT": + skipped.remove("cookie_mode") + return skipped, skipped_list + + def convert_source(self, lb_pp, alb_pp, skipped, tenant): + ip_persistence_profile = {} + if lb_pp.get("timeout"): + ip_persistence_profile["ip_persistent_timeout"] = lb_pp.get("timeout") + + alb_pp['ip_persistence_profile'] = ip_persistence_profile + alb_pp['tenant_ref'] = conv_utils.get_object_ref( + tenant, 'tenant') + alb_pp['persistence_type'] = "PERSISTENCE_TYPE_CLIENT_IP_ADDRESS" + + skipped = [key for key in skipped if key not in self.supported_attr_source] + return skipped diff --git a/python/avi/migrationtools/nsxt_converter/policy_converter.py b/python/avi/migrationtools/nsxt_converter/policy_converter.py new file mode 100644 index 0000000000..1945a15a22 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/policy_converter.py @@ -0,0 +1,423 @@ +import copy +import logging + +from avi.migrationtools.avi_migration_utils import MigrationUtil +from avi.migrationtools.nsxt_converter.conversion_util import NsxtConvUtil +from avi.migrationtools.avi_migration_utils import update_count +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +import avi.migrationtools.nsxt_converter.converter_constants as final + +LOG = logging.getLogger(__name__) + +conv_utils = NsxtConvUtil() +common_avi_util = MigrationUtil() + +class PolicyConfigConverter(object): + def __init__(self, nsxt_profile_attributes, object_merge_check, merge_object_mapping, sys_dict): + """ + + """ + self.supported_attr = nsxt_profile_attributes['VS_supported_attr'] + self.server_ssl_attr = nsxt_profile_attributes['VS_server_ssl_supported_attr'] + self.client_ssl_attr = nsxt_profile_attributes['VS_client_ssl_supported_attr'] + self.common_na_attr = nsxt_profile_attributes['Common_Na_List'] + self.VS_na_attr = nsxt_profile_attributes["VS_na_list"] + self.rule_match_na = nsxt_profile_attributes["HttpPolicySetRules_Skiped_List_MatchingCondition"] + self.rules_actions_na = nsxt_profile_attributes["HttpPolicySetRules_Skiped_List_Actions"] + self.supported_attr_httppolicyset = nsxt_profile_attributes["HttpPolicySetRules_Supported_Attributes"] + self.object_merge_check = object_merge_check + self.merge_object_mapping = merge_object_mapping + self.sys_dict = sys_dict + + + def convert(self, lb_vs_config, alb_config, cloud_name, prefix, tenant="admin"): + ''' + + ''' + self.lb_vs_config = lb_vs_config + self.alb_config = alb_config + + policy_set_name = lb_vs_config.get("display_name") + "-" + cloud_name + "-HTTP-Policy-Set" + if prefix: + policy_set_name = prefix + '-' + policy_set_name + + policy_obj = { + 'name': policy_set_name, + 'tenant_ref': conv_utils.get_object_ref(tenant, 'tenant'), + } + http_request_policy = { + 'rules': [] + } + http_security_policy = { + 'rules': [] + } + http_response_policy = { + 'rules': [] + } + + http_rules = [] + rsp_rules = [] + sec_rules = [] + + skipped_rule = [] + status_rule_list = [] + rules = lb_vs_config.get("rules") + print("\n") + for index, policy in enumerate(rules): + actions = policy.get("actions") + na_action_list = list(filter(lambda x: x["type"] in self.rules_actions_na, actions)) + + #If na_action_list is empty then we can mapped this rule otherwise skipped this rule + if len(na_action_list) > 0: + skipped_rule.append(policy) + status_rule_list.append('[VS-RULES: {}] SKIPPING RULE Actions Not supported {}'.format(policy_set_name, [action['type'] for action in na_action_list])) + LOG.info('[VS-RULES: {}] SKIPPING RULE Actions Not supported {}'.format(policy_set_name, [action['type'] for action in na_action_list])) + #print('[VS-RULES: {}] SKIPPING RULE Actions Not supported {}'.format(policy_set_name, [action['type'] for action in na_action_list])) + continue + if not len(na_action_list): + match_conditions = policy.get("match_conditions") + match_strategy = policy.get("match_strategy") + phase = policy.get("phase") + actions = policy.get("actions") + # if check all type of matches if any one not supported then check match_strategy is ALL + # then skip other migrate whaterver supported + if match_strategy == "ALL": + na_match_list = list(filter(lambda x: x["type"] in self.rule_match_na, match_conditions)) + if len(na_match_list) > 0: + LOG.info('[VS-RULES: {}] SKIPPING RULE One of Match Conditions is Not supported {}'.format( + policy_set_name, + [match['type'] for match in na_match_list])) + status_rule_list.append('[VS-RULES: {}] SKIPPING RULE One of Match Conditions is Not supported ' + '{}'.format(policy_set_name, + [match['type'] for match in na_match_list])) + + skipped_rule.append(policy) + continue + if not len(na_match_list): + rule_dict = dict(name="Rule {}", + index=0, + enable=True) + match = {} + for match_condition in match_conditions: + if match_condition['type'] == "LBHttpSslCondition": + # TODO Silent Skip add loggers + #GR: If action allow -> add it to the SSL profile; otherwise skip + continue + match = self.convert_match_conditions_to_match(match, match_condition) + rule_dict['match'] = match + rule_dict = self.convert_actions_to_avi_actions(rule_dict, actions, prefix, cloud_name) + if phase == "HTTP_REQUEST_REWRITE" or phase == "TRANSPORT": + http_rules.append(rule_dict) + elif phase == "HTTP_RESPONSE_REWRITE": + rsp_rules.append(rule_dict) + elif phase == "HTTP_ACCESS": + sec_rules.append(rule_dict) + elif phase == "HTTP_FORWARDING": + if len(actions) == 1 and actions[0]['type'] == "LBConnectionDropAction": + sec_rules.append(rule_dict) + elif rule_dict.__contains__('redirect_action') and rule_dict.__contains__('switching_action'): + redirect_action = copy.deepcopy(rule_dict) + redirect_action.pop('switching_action') + http_rules.append(redirect_action) + + switching_action = copy.deepcopy(rule_dict) + switching_action.pop('redirect_action') + http_rules.append(switching_action) + if match_strategy == "ANY": + for match_condition in match_conditions: + if match_condition["type"] in self.rule_match_na: + LOG.info('[VS-RULES: {}] SKIPPING RULE Match Condition is Not supported {}'.format( + policy_set_name, + [match_condition['type']])) + status_rule_list.append('[VS-RULES: {}] SKIPPING RULE Match Condition is Not supported {}'.format( + policy_set_name, + [match_condition['type']])) + continue + rule_dict = dict(name="Rule {}", + index=0, + enable=True) + match = {} + + if match_condition['type'] == "LBHttpSslCondition": + # TODO Silent Skip add loggers + # GR: If action allow -> add it to the SSL profile; otherwise skip + continue + match = self.convert_match_conditions_to_match(match, match_condition) + rule_dict['match'] = match + rule_dict = self.convert_actions_to_avi_actions(rule_dict, actions, prefix, cloud_name) + if phase == "HTTP_REQUEST_REWRITE" or phase == "TRANSPORT": + http_rules.append(rule_dict) + elif phase == "HTTP_RESPONSE_REWRITE": + rsp_rules.append(rule_dict) + elif phase == "HTTP_ACCESS": + sec_rules.append(rule_dict) + elif phase == "HTTP_FORWARDING": + if len(actions) == 1 and actions[0]['type'] == "LBConnectionDropAction": + sec_rules.append(rule_dict) + elif rule_dict.__contains__('redirect_action') and rule_dict.__contains__( + 'switching_action'): + redirect_action = copy.deepcopy(rule_dict) + redirect_action.pop('switching_action') + http_rules.append(redirect_action) + + switching_action = copy.deepcopy(rule_dict) + switching_action.pop('redirect_action') + http_rules.append(switching_action) + + for index, rule in enumerate(http_rules): + counter = index + 1 + rule['name'] = rule.get("name").format(counter) + rule['index'] = counter + + for index, rule in enumerate(sec_rules): + counter = index + 1 + rule['name'] = rule.get("name").format(counter) + rule['index'] = counter + + for index, rule in enumerate(rsp_rules): + counter = index + 1 + rule['name'] = rule.get("name").format(counter) + rule['index'] = counter + + indirect = [] + u_ignore = [] + ignore_for_defaults = {} + + conv_status = conv_utils.get_conv_status( + [], indirect, ignore_for_defaults, [], + u_ignore, []) + + + for skipped in status_rule_list: + print(skipped) + if http_rules or sec_rules or rsp_rules: + if http_rules: + http_request_policy['rules'] = http_rules + policy_obj['http_request_policy'] = http_request_policy + if sec_rules: + http_security_policy['rules'] = sec_rules + policy_obj['http_security_policy'] = http_security_policy + if rsp_rules: + http_response_policy['rules'] = rsp_rules + policy_obj['http_response_policy'] = http_response_policy + + conv_status["skipped"] = skipped_rule + conv_status["na_list"] = [] + conv_status["status"] = "PARTIAL" + conv_utils.add_conv_status('policy', None, policy_set_name, conv_status, + [{"policy_set": policy_obj}]) + + return policy_obj, status_rule_list + else: + conv_utils.add_status_row('policy', [], policy_set_name, + conv_const.STATUS_SKIPPED) + return None, status_rule_list + + + def convert_match_conditions_to_match(self, match, match_condition): + if match_condition.get("type") == "LBHttpResponseHeaderCondition": + hdrs = dict(value=[match_condition.get("header_value")], + match_case="SENSITIVE" if match_condition.get("case_sensitive") else "INSENSITIVE") + if match_condition.get("match_type"): + match_criteria = match_condition.get("match_type") + if match_condition.get("match_type") == "EQUALS": + match_criteria = "HDR_EQUALS" + elif match_condition.get("match_type") == "STARTS_WITH": + match_criteria = "HDR_BEGINS_WITH" + elif match_condition.get("match_type") == "ENDS_WITH": + match_criteria = "HDR_ENDS_WITH" + elif match_condition.get("match_type") == "CONTAINS" or match_condition.get( + "match_type") == "REGEX": + match_criteria = "HDR_CONTAINS" + hdrs["match_criteria"] = match_criteria + if match_condition.get("header_name"): + hdrs["hdr"] = match_condition.get("header_name") + match['rsp_hdrs'] = [hdrs] + if match_condition.get("type") == "LBHttpRequestUriCondition": + request_uri = dict(match_str=[match_condition.get("uri")], + match_case="SENSITIVE" if match_condition.get("case_sensitive") else "INSENSITIVE") + if match_condition.get("match_type"): + match_criteria = match_condition.get("match_type") + if match_condition.get("match_type") == "EQUALS": + match_criteria = "EQUALS" + elif match_condition.get("match_type") == "STARTS_WITH": + match_criteria = "BEGINS_WITH" + elif match_condition.get("match_type") == "ENDS_WITH": + match_criteria = "HDR_ENDS_WITH" + elif match_condition.get("match_type") == "CONTAINS" or match_condition.get("match_type") == "REGEX": + match_criteria = "HDR_CONTAINS" + request_uri['match_criteria'] = match_criteria + match["path"] = request_uri + if match_condition.get("type") == "LBHttpRequestHeaderCondition": + hdrs = dict(value=[match_condition.get("header_value")], + match_case="SENSITIVE" if match_condition.get("case_sensitive") else "INSENSITIVE") + if match_condition.get("match_type"): + match_criteria = match_condition.get("match_type") + if match_condition.get("match_type") == "EQUALS": + match_criteria = "HDR_EQUALS" + elif match_condition.get("match_type") == "STARTS_WITH": + match_criteria = "HDR_BEGINS_WITH" + elif match_condition.get("match_type") == "ENDS_WITH": + match_criteria = "HDR_ENDS_WITH" + elif match_condition.get("match_type") == "CONTAINS" or match_condition.get("match_type") == "REGEX": + match_criteria = "HDR_CONTAINS" + hdrs["match_criteria"] = match_criteria + if match_condition.get("header_name"): + hdrs["hdr"] = match_condition.get("header_name") + match['hdrs'] = [hdrs] + if match_condition.get("type") == "LBHttpRequestMethodCondition": + method = dict(methods=["HTTP_METHOD_" + match_condition.get("method")], match_criteria="IS_IN") + match["method"] = method + if match_condition.get("type") == "LBHttpRequestUriArgumentsCondition": + query = dict(match_str=[match_condition.get("uri_arguments")], + match_case="SENSITIVE" if match_condition.get("case_sensitive") else "INSENSITIVE", + match_criteria="QUERY_MATCH_CONTAINS") + match["query"] = query + if match_condition.get("type") == "LBHttpRequestVersionCondition": + version = dict(versions=["ONE_ONE" if match_condition.get("version") == "HTTP_VERSION_1_1" else "ONE_ZERO"], + match_criteria="IS_IN") + match["version"] = version + if match_condition.get("type") == "LBHttpRequestCookieCondition": + cookie = dict(name=match_condition.get("cookie_name"), + value=match_condition.get("cookie_value"), + match_case="SENSITIVE" if match_condition.get("case_sensitive") else "INSENSITIVE") + if match_condition.get("match_type"): + match_criteria = match_condition.get("match_type") + if match_condition.get("match_type") == "EQUALS": + match_criteria = "HDR_EQUALS" + elif match_condition.get("match_type") == "STARTS_WITH": + match_criteria = "HDR_BEGINS_WITH" + elif match_condition.get("match_type") == "ENDS_WITH": + match_criteria = "HDR_ENDS_WITH" + elif match_condition.get("match_type") == "CONTAINS" or match_condition.get("match_type") == "REGEX": + match_criteria = "HDR_CONTAINS" + cookie["match_criteria"] = match_criteria + match["cookie"] = cookie + if match_condition.get("type") == "LBIpHeaderCondition": + if match_condition.get("source_address"): + client_ip = { + "match_criteria": "IS_IN", + "addrs": [{"addr": match_condition.get("source_address"), "type": "V4"}] + } + match['client_ip'] = client_ip + elif match_condition.get("group_path"): + # TODO Need to discuss + type = match_condition.get("type") + return match + + def convert_actions_to_avi_actions(self, rule_dict, actions, prefix, cloud_name): + rule_dict['hdr_action'] = [] + for action in actions: + if action["type"] == "LBVariablePersistenceLearnAction" or action[ + 'type'] == 'LBVariablePersistenceOnAction': + # Gr: Create a new Pool with the persistent profile and same members and context switch + if self.lb_vs_config.get('pool_path'): + pool_ref = self.lb_vs_config.get('pool_path') + pool_name = pool_ref.split('/')[-1] + if prefix: + pool_name = prefix + '-' + pool_name + for pool in self.alb_config['Pool']: + if pool.get('name') == pool_name: + new_pool = copy.deepcopy(pool) + new_pool['name'] = '%s-%s' % (pool_name, final.PLACE_HOLDER_STR) + if prefix: + persistence_name = prefix + '-' + action.get('persistence_profile_path').split('/')[ + -1] + else: + persistence_name = action.get('persistence_profile_path').split('/')[-1] + new_pool[ + 'persistence_profile_ref'] = '/api/applicationpersistenceprofile/?tenant=admin&name=' + persistence_name + self.alb_config['Pool'].append(new_pool) + rule_dict['switching_action'] = {'action': 'HTTP_SWITCHING_SELECT_POOL', + "pool_ref": conv_utils.get_object_ref( + new_pool['name'], 'pool', tenant="admin", + cloud_name=cloud_name)} + if action["type"] == "LBHttpRequestUriRewriteAction": + rule_dict['rewrite_url_action'] = {} + path = {"type": "URI_PARAM_TYPE_TOKENIZED", + "tokens": [{'type': 'URI_TOKEN_TYPE_STRING', 'str_value': action["uri"]}]} + rule_dict['rewrite_url_action']['path'] = path + if action.get("uri_arguments", None): + query = {'keep_query': True, 'add_string': action.get("uri_arguments", None)} + rule_dict['rewrite_url_action']['query'] = query + if action['type'] == "LBHttpRequestHeaderRewriteAction": + hdr_action = {'action': 'HTTP_REPLACE_HDR', 'hdr': + {'name': action.get("header_name"), 'value': {'val': action.get("header_value")}}} + rule_dict['hdr_action'].append(hdr_action) + if action['type'] == "LBHttpRequestHeaderDeleteAction": + hdr_action = {'action': 'HTTP_REMOVE_HDR', 'hdr': {'name': action.get("header_name")}} + rule_dict['hdr_action'].append(hdr_action) + if action["type"] == "LBHttpResponseHeaderRewriteAction": + hdr_action = {'action': 'HTTP_REPLACE_HDR', 'hdr': + {'name': action.get("header_name"), 'value': {'val': action.get("header_value")}}} + rule_dict['hdr_action'].append(hdr_action) + if action["type"] == "LBHttpResponseHeaderDeleteAction": + hdr_action = {'action': 'HTTP_REMOVE_HDR', 'hdr': + {'name': action.get("header_name")}} + rule_dict['hdr_action'].append(hdr_action) + if action["type"] == "LBSelectPoolAction": + pool_ref = action.get('pool_id') + pool_name = pool_ref.split('/')[-1] + if prefix: + pool_name = prefix + '-' + pool_name + rule_dict['switching_action'] = {'action': 'HTTP_SWITCHING_SELECT_POOL', + "pool_ref": conv_utils.get_object_ref( + pool_name, 'pool', tenant="admin", cloud_name=cloud_name)} + if action["type"] == "LBConnectionDropAction": + rule_dict['action'] = {'action': 'HTTP_SECURITY_ACTION_CLOSE_CONN'} + if action["type"] == "LBHttpRedirectAction" and action.get("redirect_url").__contains__("http"): + redirect_url = action.get("redirect_url") + host_protocol = redirect_url.split("://") + + protocol = host_protocol[0].upper() + host_path = host_protocol[1].split("/") + + port = 80 if protocol == "HTTP" else 443 + + redirect_action = { + "protocol": protocol, + "port": port, + "status_code": "HTTP_REDIRECT_STATUS_CODE_{}".format(action.get("redirect_status")), + "host": { + "type": "URI_PARAM_TYPE_TOKENIZED", + "tokens": [ + { + "type": "URI_TOKEN_TYPE_STRING", + "str_value": host_path[0] + } + ] + }, + } + if len(host_path) > 1: + redirect_action["path"] = { + "type": "URI_PARAM_TYPE_TOKENIZED", + "tokens": [ + { + "type": "URI_TOKEN_TYPE_STRING", + "str_value": host_path[1] + } + ] + } + + rule_dict['redirect_action'] = redirect_action + if action['type'] == "LBHttpRejectAction": + # TODO need to discuss + continue + security_policy_counter = security_policy_counter + 1 + rule_dict = dict(name="Rule {}".format(security_policy_counter), + index=security_policy_counter, + enable=True) + rule_dict['action'] = {'action': 'HTTP_SECURITY_ACTION_SEND_RESPONSE', + 'status_code': 'HTTP_LOCAL_RESPONSE_STATUS_CODE_{}'.format( + action.get("reply_status"))} + match_conditions = policy.get("match_conditions") + match = {} + for match_condition in match_conditions: + match = self.convert_match_conditions_to_match(match, match_condition) + if match: rule_dict["match"] = match + httppolicyset['http_security_policy']['rules'].append(rule_dict) + + if not rule_dict['hdr_action']: + rule_dict.pop('hdr_action') + return rule_dict + diff --git a/python/avi/migrationtools/nsxt_converter/pools_converter.py b/python/avi/migrationtools/nsxt_converter/pools_converter.py new file mode 100755 index 0000000000..77807b417f --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/pools_converter.py @@ -0,0 +1,429 @@ +import copy +import logging +import random +import time +from random import randint + +from avi.migrationtools.avi_migration_utils import update_count +from avi.migrationtools.nsxt_converter.conversion_util import NsxtConvUtil +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +from avi.migrationtools.nsxt_converter.nsxt_util import get_object_segments, get_lb_service_name + +LOG = logging.getLogger(__name__) + +conv_utils = NsxtConvUtil() +skipped_pools_list = [] +vs_pool_segment_list = dict() +vs_sorry_pool_segment_list = dict() + + +class PoolConfigConv(object): + def __init__(self, nsxt_pool_attributes, object_merge_check, merge_object_mapping, sys_dict): + """ + :param nsxt_pool_attributes: Supported attributes for pool migration + """ + self.supported_attr = nsxt_pool_attributes['Pool_supported_attr'] + self.server_attributes = nsxt_pool_attributes[ + 'Pool_supported_attr_convert_servers_config'] + self.member_group_attr = nsxt_pool_attributes[ + 'Pool_supported_attr_convert_member_group'] + self.common_na_attr = nsxt_pool_attributes['Common_Na_List'] + self.pool_na_attr = nsxt_pool_attributes['Pool_na_list'] + self.object_merge_check = object_merge_check + self.merge_object_mapping = merge_object_mapping + self.sys_dict = sys_dict + + def convert(self, alb_config, nsx_lb_config, prefix, tenant): + ''' + LBPool to Avi Config pool converter + ''' + alb_config['Pool'] = list() + alb_config['PoolGroup'] = list() + progressbar_count = 0 + pool_list =[] + total_size = len(nsx_lb_config['LbPools']) + print("\nConverting Pools ...") + LOG.info('[POOL] Converting Pools...') + for lb_pl in nsx_lb_config['LbPools']: + try: + LOG.info('[POOL] Migration started for Pool {}'.format(lb_pl['display_name'])) + progressbar_count += 1 + tenant_name, name = conv_utils.get_tenant_ref(tenant) + if not tenant: + tenant = tenant_name + + lb_type, name = self.get_name_type(lb_pl) + alb_pl = { + 'lb_algorithm': lb_type, + } + vs_list = [vs["id"] for vs in nsx_lb_config["LbVirtualServers"] if + (vs.get("pool_path") and vs.get("pool_path").split("/")[-1] == lb_pl.get("id"))] + vs_list_for_sorry_pool = [vs["id"] for vs in nsx_lb_config["LbVirtualServers"] if + (vs.get("sorry_pool_path") and vs.get("sorry_pool_path").split("/")[-1] == lb_pl.get("id"))] + if prefix: + name = prefix+"-"+name + pool_temp = list(filter(lambda pl: pl["name"] == name, alb_config['Pool'])) + if pool_temp: + name = name + "-" + lb_pl["id"] + pool_skip = True + pool_count = 0 + if lb_pl.get("members") : + if vs_list: + for member in lb_pl.get("members"): + lb_list = {} + for vs_id in vs_list: + if vs_id in vs_pool_segment_list.keys(): + pool_skip = False + continue + lb = get_lb_service_name(vs_id) + if not lb: + continue + pool_segment = get_object_segments(vs_id, + member["ip_address"]) + if pool_segment: + pool_skip = False + if lb in lb_list.keys(): + # pool_list = lb_list[lb] + # pool_list["name"] = pool_list["name"]+"-"+vs_id + vs_pool_segment_list[vs_id] = lb_list[lb] + continue + + if pool_count == 0: + vs_pool_segment_list[vs_id] = { + "pool_name": name, + "pool_segment": pool_segment + } + lb_list[lb] = vs_pool_segment_list[vs_id] + + else: + new_pool_name = '%s-%s' % (name, pool_segment[0]["subnets"]["network_range"]) + new_pool_name = new_pool_name.replace('/', '-') + vs_pool_segment_list[vs_id] = { + "pool_name": new_pool_name, + "pool_segment": pool_segment + } + lb_list[lb] = vs_pool_segment_list[vs_id] + pool_count += 1 + for vs_id in vs_list_for_sorry_pool: + if vs_id in vs_sorry_pool_segment_list.keys(): + pool_skip = False + continue + lb = get_lb_service_name(vs_id) + pool_segment = get_object_segments(vs_id, + member["ip_address"]) + if pool_segment: + pool_skip = False + if lb in lb_list.keys(): + vs_sorry_pool_segment_list[vs_id] = lb_list[lb] + continue + + if pool_count == 0: + vs_sorry_pool_segment_list[vs_id] = { + "pool_name": name, + "pool_segment": pool_segment + } + lb_list[lb] = vs_sorry_pool_segment_list[vs_id] + + else: + vs_sorry_pool_segment_list[vs_id] = { + "pool_name": '%s-%s' % (name, pool_segment[0]["subnets"]["network_range"]), + "pool_segment": pool_segment + } + lb_list[lb] = vs_sorry_pool_segment_list[vs_id] + pool_count += 1 + + if pool_skip: + skipped_pools_list.append(name) + skip_msg='Member ip not falling in segment rnge' + conv_utils.add_status_row('pool', None, lb_pl['display_name'], + conv_const.STATUS_SKIPPED, skip_msg) + LOG.warning("POOL {} not migrated. Reason: {}".format(name, + skip_msg)) + continue + else: + skipped_pools_list.append(name) + skip_msg = 'Pool does not contains members' + conv_utils.add_status_row('pool', None, lb_pl['display_name'], + conv_const.STATUS_SKIPPED, skip_msg) + LOG.warning("POOL {} not migrated. Reason: {}".format(name, + skip_msg)) + continue + + na_list = [val for val in lb_pl.keys() + if val in self.common_na_attr or val in self.pool_na_attr] + servers, member_skipped_config, skipped_servers, limits = \ + self.convert_servers_config(lb_pl.get("members", [])) + alb_pl["name"] = name + alb_pl["servers"] = servers + + if any(server.get("port") == None for server in servers): + alb_pl.update({"use_service_port": "true"}) + alb_pl['tenant_ref'] = conv_utils.get_object_ref( + tenant, 'tenant') + + if lb_pl.get("tcp_multiplexing_enabled"): + # TO-DO - HANDLE In APPLICATION PROFILE + # Need to set in Application profile + LOG.info('[POOL] tcp_multiplexing_enabled Needs to Handle in Application Profile.') + pass + + if lb_pl.get("tcp_multiplexing_number"): + alb_pl['conn_pool_properties'] = { + 'upstream_connpool_server_max_cache': lb_pl.get( + 'tcp_multiplexing_number') + } + if lb_pl.get('min_active_members'): + alb_pl['min_servers_up'] = lb_pl.get('min_active_members') + if limits.get('connection_limit', 0) > 0: + alb_pl['max_concurrent_connections_per_server'] = \ + limits['connection_limit'] + + skipped_list_mg = [] + if lb_pl.get('member_group'): + skipped_mg = [val for val in + lb_pl.get('member_group').keys() + if val not in self.member_group_attr] + skipped_list_mg.append({"skipped_mg": skipped_mg}) + if lb_pl['member_group'].get('group_path'): + alb_pl['nsx_securitygroup'] = [ + lb_pl.get('member_group').get('group_path') + ] + if lb_pl['member_group'].get("port", None): + alb_pl['default_server_port'] = lb_pl[ + 'member_group'].get("port") + if lb_pl.get("snat_translation"): + # TO-DO - HANDLE In APPLICATION PROFILE + # Need to set in Application profile + LOG.info('[POOL] snat_translation Needs to Handle in Application Profile.') + pass + + active_monitor_paths = lb_pl.get("active_monitor_paths", None) + if active_monitor_paths: + monitor_refs = [] + for lb_hm_path in active_monitor_paths: + ref = lb_hm_path.split("/lb-monitor-profiles/")[1] + hm_config = list( + filter(lambda pr: pr["id"] == ref, nsx_lb_config["LbMonitorProfiles"])) + hm_name = hm_config[0]["display_name"] + if prefix: + hm_name = prefix + "-" + hm_name + if hm_name in [monitor_obj.get('name') for monitor_obj in alb_config['HealthMonitor']]: + hm_name = hm_name + elif self.object_merge_check: + if hm_name in self.merge_object_mapping['health_monitor'].keys(): + hm_name = self.merge_object_mapping['health_monitor'].get(hm_name) + else: + continue + monitor_refs.append( + "/api/healthmonitor/?tenant=%s&name=%s" % (tenant, hm_name)) + + alb_pl["health_monitor_refs"] = list(set(monitor_refs)) + skipped = [val for val in lb_pl.keys() + if val not in self.supported_attr] + ## + if vs_list_for_sorry_pool: + is_pg, pg_dict = self.check_for_pool_group(servers,sorry_pool=True) + else: + is_pg, pg_dict = self.check_for_pool_group(servers) + converted_objs = dict() + if is_pg: + converted_objs = self.convert_for_pg(pg_dict, + alb_pl, name, + tenant, alb_config) + else: + converted_objs['pools'] = [alb_pl] + pool_list += converted_objs['pools'] + if 'pg_obj' in converted_objs: + alb_config['PoolGroup'].extend(converted_objs['pg_obj']) + + ## + indirect = [] + u_ignore = [] + ignore_for_defaults = {} + if skipped_servers: + skipped.append({"server": skipped_servers}) + if member_skipped_config: + skipped.append(member_skipped_config) + if skipped_list_mg: + skipped.append(skipped_list_mg) + + conv_status = conv_utils.get_conv_status( + skipped, indirect, ignore_for_defaults, + nsx_lb_config['LbPools'], u_ignore, na_list) + na_list = [val for val in na_list if val not in self.common_na_attr] + conv_status["na_list"] = na_list + conv_utils.add_conv_status('pool',None,alb_pl['name'],conv_status,converted_objs) +# conv_utils.add_conv_status( +# 'pool', None, alb_pl['name'], conv_status, + # {'pools': [alb_pl]}) + msg = "Pools conversion started..." + conv_utils.print_progress_bar( + progressbar_count, total_size, msg, prefix='Progress', + suffix='') + +# alb_config['Pool'].append(alb_pl) + # time.sleep(0.1) + + if len(conv_status['skipped']) > 0: + LOG.debug('[POOL] Skipped Attribute {}:{}'.format(lb_pl['display_name'], conv_status['skipped'])) + LOG.info('[POOL] Migration completed for Pool {}'.format(lb_pl['display_name'])) + except: + update_count('error') + LOG.error("[POOL] Failed to convert pool: %s" % lb_pl['display_name'], + exc_info=True) + conv_utils.add_status_row('pool', None, lb_pl['display_name'], + conv_const.STATUS_ERROR) + alb_config['Pool'] = pool_list + + def get_name_type(self, lb_pl): + type = "" + if lb_pl['algorithm'] in ['ROUND_ROBIN', 'WEIGHTED_ROUND_ROBIN']: + type = 'LB_ALGORITHM_ROUND_ROBIN' + elif lb_pl['algorithm'] in ['LEAST_CONNECTION', + 'WEIGHTED_LEAST_CONNECTION']: + type = 'LB_ALGORITHM_LEAST_CONNECTION' + elif lb_pl['algorithm'] == 'IP_HASH': + type = 'LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS' + return type, lb_pl['display_name'] + + def convert_servers_config(self, servers_config): + server_list = [] + skipped_list = [] + server_skipped = [] + connection_limit = [] + pg_obj =[] + for member in servers_config: + server_obj = { + 'ip': { + 'addr': member['ip_address'], + 'type': 'V4' + }, + 'description': member.get("display_name"), + } + if member["backup_member"]: + server_obj['backup_member'] = member["backup_member"] + if member.get("port", ""): + server_obj['port'] = int(member.get("port")) + else: + server_skipped.append(member.get("display_name")) + + if member.get("weight"): + server_obj['ratio'] = member.get('weight') + + server_obj["enabled"] = False + if member.get("admin_state") == "ENABLED": + server_obj['enabled'] = True + if member.get("max_concurrent_connections"): + c_lim = int(member.get("max_concurrent_connections", '0')) + if c_lim > 0: + connection_limit.append(c_lim) + server_list.append(server_obj) + + skipped = [key for key in member.keys() + if key not in self.server_attributes] + if skipped: + skipped_list.append({member['display_name']: skipped}) + limits = dict() + if connection_limit: + limits['connection_limit'] = min(connection_limit) + return server_list, skipped_list, server_skipped, limits + + # + def check_for_pool_group(self, servers, sorry_pool=False): + """ + Check if the priority group for the server exist + :param servers: List of servers to check server priority + :return: if priority exist returns true and priority wise + dict of servers + """ + # + + pool_bmd = [] + pool_bme = [] + pg_dict={} + is_pool_group= False + for member in servers: + if member.get("backup_member"): + pool_bme.append(member) + else: + pool_bmd.append(member) + if pool_bme and pool_bmd: + is_pool_group = True + if not sorry_pool: + bmd_priority = "3" + bme_priority = "2" + else: + bmd_priority = "1" + bme_priority = "0" + priority_list = pg_dict.get(bmd_priority, []) + priority_list=pool_bmd + pg_dict[bmd_priority] = priority_list + pg_dict[bmd_priority] = pg_dict[bmd_priority][0] + priority_list = pg_dict.get(bme_priority, []) + priority_list=pool_bme + pg_dict[bme_priority]=priority_list + pg_dict[bme_priority] = pg_dict[bme_priority][0] + + elif pool_bme: + is_pool_group = True + if not sorry_pool: + priority = "2" + else: + priority = "0" + priority_list = pg_dict.get(priority, []) + priority_list = pool_bme + pg_dict[priority]=priority_list + pg_dict[priority] = pg_dict[priority][0] + + else: + if sorry_pool: + is_pool_group = True + priority = "1" + priority_list = pg_dict.get(priority, []) + priority_list=pool_bmd + pg_dict[priority]=priority_list + pg_dict[priority]=pg_dict[priority][0] + + return is_pool_group, pg_dict + + def convert_for_pg(self, pg_dict, pool_obj, name, tenant, avi_config, + ): + """ + Creates a pool group object + :param pg_dict: priority wise sorted dict of pools + :param pool_obj: Converted f5 pool object + :param name: name of the pool + :param tenant: tenant name for tenant reference + :param avi_config: Avi config to add temporary labels + :return: + """ + pg_members = [] + pools = [] + for priority in pg_dict: + priority_pool = copy.deepcopy(pool_obj) + priority_pool['servers'] = [pg_dict[priority]] + priority_pool_ref = '%s-%s' % (name, priority) + # Added prefix for objects + priority_pool['name'] = priority_pool_ref + pools.append(priority_pool) + if priority_pool_ref: + member = { + 'pool_ref': conv_utils.get_object_ref(priority_pool_ref,'pool',tenant=tenant), + 'priority_label': priority + } + pg_members.append(member) + + pg_obj = { + 'name': name, + 'members': pg_members, + } + + pg_obj['tenant_ref'] = conv_utils.get_object_ref(tenant, 'tenant') + converted_objs = { + 'pools': pools, + 'pg_obj': [pg_obj] + } + return converted_objs + + # + diff --git a/python/avi/migrationtools/nsxt_converter/profile_converter.py b/python/avi/migrationtools/nsxt_converter/profile_converter.py new file mode 100755 index 0000000000..8ca26e38b5 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/profile_converter.py @@ -0,0 +1,239 @@ +import time, logging + +from avi.migrationtools.avi_migration_utils import update_count +from avi.migrationtools.nsxt_converter.conversion_util import NsxtConvUtil +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +from avi.migrationtools.avi_migration_utils import MigrationUtil + +LOG = logging.getLogger(__name__) + +conv_utils = NsxtConvUtil() +common_avi_util = MigrationUtil() + + +class ProfileConfigConv(object): + def __init__(self, nsxt_profile_attributes, object_merge_check, merge_object_mapping, sys_dict): + """ + + """ + self.ap_http_supported_attributes = nsxt_profile_attributes['Application_Http_Profile_supported_attr'] + self.common_na_attr = nsxt_profile_attributes['Common_Na_List'] + self.http_na_attr = nsxt_profile_attributes["http_na_list"] + self.tcp_na_attr = nsxt_profile_attributes["tcp_na_list"] + self.np_supported_attributes = nsxt_profile_attributes['Network_Profile_supported_attr'] + self.object_merge_check = object_merge_check + self.merge_object_mapping = merge_object_mapping + self.sys_dict = sys_dict + self.app_pr_count = 0 + self.np_pr_count = 0 + + def convert(self, alb_config, nsx_lb_config, prefix, tenant): + alb_config['ApplicationProfile'] = list() + alb_config['NetworkProfile'] = list() + skipped_ap = [] + skipped_np = [] + na_ap = [] + na_np = [] + attr_ap = [] + attr_np = [] + progressbar_count = 0 + converted_objs = [] + total_size = len(nsx_lb_config['LbAppProfiles']) + print("\nConverting Profiles ...") + LOG.info('[APPLICATION-PROFILE] Converting Profiles...') + for lb_pr in nsx_lb_config['LbAppProfiles']: + try: + LOG.info('[APPLICATION-PROFILE] Migration started for AP {}'.format(lb_pr['display_name'])) + progressbar_count += 1 + name = lb_pr.get('display_name') + if prefix: + name = prefix + '-' + name + tenant_name, t_name = conv_utils.get_tenant_ref(tenant) + if not tenant: + tenant = tenant_name + + alb_pr = dict( + name=name, + tenant_ref=conv_utils.get_object_ref(tenant, 'tenant') + ) + + if lb_pr['resource_type'] == 'LBHttpProfile': + na_list = [val for val in lb_pr.keys() + if val in self.common_na_attr or val in self.http_na_attr] + self.convert_http(alb_pr, lb_pr) + if lb_pr['resource_type'] == 'LBFastUdpProfile': + self.convert_udp(alb_pr, lb_pr) + na_list = [val for val in lb_pr.keys() + if val in self.common_na_attr] + if lb_pr['resource_type'] == 'LBFastTcpProfile': + na_list = [val for val in lb_pr.keys() + if val in self.common_na_attr or val in self.tcp_na_attr] + self.convert_tcp(alb_pr, lb_pr) + + indirect = [] + u_ignore = [] + ignore_for_defaults = {} + + if lb_pr['resource_type'] == 'LBHttpProfile': + if self.object_merge_check: + if name in self.merge_object_mapping['app_profile'].keys(): + name = name + "-" + lb_pr["id"] + else: + profile_temp = list(filter(lambda pr: pr["name"] == name, alb_config['ApplicationProfile'])) + if profile_temp: + name = name + "-" + lb_pr["id"] + alb_pr["name"] = name + skipped = [val for val in lb_pr.keys() + if val not in self.ap_http_supported_attributes] + if lb_pr.get("description"): + alb_pr["description"] = lb_pr['description'] + + if self.object_merge_check: + common_avi_util.update_skip_duplicates(alb_pr, + alb_config['ApplicationProfile'], 'app_profile', + converted_objs, name, None, + self.merge_object_mapping, + lb_pr['resource_type'], prefix, + self.sys_dict['ApplicationProfile']) + self.app_pr_count += 1 + else: + alb_config['ApplicationProfile'].append(alb_pr) + skipped_ap.append(skipped) + val = dict( + id=lb_pr["id"], + name=alb_pr['name'], + resource_type=lb_pr['resource_type'], + alb_pr=alb_pr) + attr_ap.append(val) + na_ap.append(na_list) + + else: + if self.object_merge_check: + if name in self.merge_object_mapping['network_profile'].keys(): + name = name + "-" + lb_pr["id"] + else: + profile_temp = list(filter(lambda pr: pr["name"] == name, alb_config['NetworkProfile'])) + if profile_temp: + name = name + "-" + lb_pr["id"] + alb_pr["name"] = name + skipped = [val for val in lb_pr.keys() + if val not in self.np_supported_attributes] + + if self.object_merge_check: + common_avi_util.update_skip_duplicates(alb_pr, + alb_config['NetworkProfile'], 'network_profile', + converted_objs, name, None, + self.merge_object_mapping, + lb_pr['resource_type'], prefix, + self.sys_dict['NetworkProfile']) + self.np_pr_count += 1 + else: + alb_config['NetworkProfile'].append(alb_pr) + skipped_np.append(skipped) + val = dict( + id=lb_pr["id"], + name=alb_pr['name'], + resource_type=lb_pr['resource_type'], + alb_pr=alb_pr) + attr_np.append(val) + na_np.append(na_list) + + msg = "Profile conversion started..." + conv_utils.print_progress_bar(progressbar_count, total_size, msg, + prefix='Progress', suffix='') + # time.sleep(1) + + LOG.info('[APPLICATION-PROFILE] Migration completed for AP {}'.format(lb_pr['display_name'])) + except: + update_count('error') + LOG.error("[APPLICATION-PROFILE] Failed to convert ApplicationProfile: %s" % lb_pr['display_name'], + exc_info=True) + conv_utils.add_status_row('applicationprofile', None, lb_pr['display_name'], + conv_const.STATUS_ERROR) + + if len(skipped_ap): + for index, skipped in enumerate(skipped_ap): + conv_status = conv_utils.get_conv_status( + skipped, indirect, ignore_for_defaults, nsx_lb_config['LbAppProfiles'], + u_ignore, na_ap[index]) + na_list = [val for val in na_ap[index] if val not in self.common_na_attr] + conv_status["na_list"] = na_list + name = attr_ap[index]['name'] + profile_id = attr_ap[index]['id'] + alb_mig_app_pr = attr_ap[index]['alb_pr'] + if self.object_merge_check: + alb_mig_app_pr = [app_pr for app_pr in alb_config['ApplicationProfile'] if + app_pr.get('name') == self.merge_object_mapping['app_profile'].get(name)] + conv_utils.add_conv_status('applicationprofile', attr_ap[index]['resource_type'], + attr_ap[index]['name'], conv_status, + [{'application_http_profile': alb_mig_app_pr[0]}]) + else: + conv_utils.add_conv_status('applicationprofile', attr_ap[index]['resource_type'], + attr_ap[index]['name'], conv_status, + [{'application_http_profile': alb_mig_app_pr}]) + if len(conv_status['skipped']) > 0: + LOG.debug('[APPLICATION-PROFILE] Skipped Attribute {}:{}'.format(attr_ap[index]['name'], + conv_status['skipped'])) + + if len(skipped_np): + for index, skipped in enumerate(skipped_np): + conv_status = conv_utils.get_conv_status( + skipped, indirect, ignore_for_defaults, nsx_lb_config['LbAppProfiles'], + u_ignore, na_np[index]) + na_list = [val for val in na_np[index] if val not in self.common_na_attr] + conv_status["na_list"] = na_list + name = attr_np[index]['name'] + profile_id = attr_np[index]['id'] + alb_mig_np_pr = attr_np[index]['alb_pr'] + if self.object_merge_check: + alb_mig_np_pr = [np_pr for np_pr in alb_config['NetworkProfile'] if + np_pr.get('name') == self.merge_object_mapping['network_profile'].get( + name)] + conv_utils.add_conv_status('applicationprofile', attr_np[index]['resource_type'], + attr_np[index]['name'], conv_status, + [{'network_profile': alb_mig_np_pr[0]}]) + else: + conv_utils.add_conv_status('applicationprofile', attr_np[index]['resource_type'], + attr_np[index]['name'], conv_status, + [{'network_profile': alb_mig_np_pr}]) + if len(conv_status['skipped']) > 0: + LOG.debug('[APPLICATION-PROFILE] Skipped Attribute {}:{}'.format(attr_np[index]['name'], + conv_status['skipped'])) + + def convert_http(self, alb_pr, lb_pr): + alb_pr['type'] = 'APPLICATION_PROFILE_TYPE_HTTP' + alb_pr['http_profile'] = dict( + xff_enabled=lb_pr.get('xForwardedFor', False), + http_to_https=lb_pr.get('httpRedirectToHttps', False), + keepalive_timeout=lb_pr.get('idle_timeout'), + client_max_header_size=lb_pr.get('request_header_size'), + keepalive_header=lb_pr.get('server_keep_alive'), + max_response_headers_size=lb_pr.get("response_header_size"), + detect_ntlm_app=lb_pr.get("ntlm") + ) + if lb_pr.get('request_body_size', None): + alb_pr['http_profile']['client_max_body_size'] = lb_pr.get('request_body_size', None) + alb_pr["preserve_client_ip"] = False + if lb_pr.get("http_redirect_to"): + # TODO + print("http_redirect_to") + + def convert_udp(self, alb_pr, lb_pr): + alb_pr['profile'] = dict( + type='PROTOCOL_TYPE_UDP_FAST_PATH', + udp_fast_path_profile=self.fast_profile_path(lb_pr) + ) + alb_pr["connection_mirror"] = lb_pr.get("flow_mirroring_enabled") + + def convert_tcp(self, alb_pr, lb_pr): + alb_pr['profile'] = dict( + type='PROTOCOL_TYPE_TCP_FAST_PATH', + tcp_fast_path_profile=self.fast_profile_path(lb_pr) + ) + alb_pr["connection_mirror"] = lb_pr.get("ha_flow_mirroring_enabled") + + def fast_profile_path(self, lb_pr): + path = dict( + session_idle_timeout=lb_pr.get('idle_timeout') + ) + return path diff --git a/python/avi/migrationtools/nsxt_converter/rollback.py b/python/avi/migrationtools/nsxt_converter/rollback.py new file mode 100755 index 0000000000..36897b0e54 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/rollback.py @@ -0,0 +1,127 @@ +# !/usr/bin/env python3 +import logging +import os +import json +import argparse +from datetime import datetime +from avi.migrationtools.avi_converter import AviConverter +from avi.migrationtools.avi_migration_utils import get_count +from avi.migrationtools.nsxt_converter.nsxt_util import NSXUtil + +LOG = logging.getLogger(__name__) + + +class NsxtAlbRollback(AviConverter): + def __init__(self, args): + ''' + + :param args: + ''' + self.nsxt_ip = args.nsxt_ip + self.nsxt_user = args.nsxt_user + self.nsxt_passord = args.nsxt_password + self.nsxt_port = args.nsxt_port + + self.controller_ip = args.alb_controller_ip + self.controller_version = args.alb_controller_version + self.user = args.alb_controller_user + self.password = args.alb_controller_password + self.rollback_vs = None + if args.rollback: + self.rollback_vs = \ + (set(args.rollback) if type(args.rollback) == list + else set(args.rollback.split(','))) + self.output_file_path = args.output_file_path if args.output_file_path \ + else 'output' + + output_dir = os.path.normpath(self.output_file_path) + + # Load values from state file if not given on command line while executing script + if self.nsxt_ip: + output_path = output_dir + os.path.sep + self.nsxt_ip + os.path.sep + "output" + with open(output_path + os.path.sep + "state.json", 'r') as file: + data = json.load(file) + if not self.nsxt_user: + self.nsxt_user = data.get('nsxt_user') + if not self.nsxt_port: + self.nsxt_port = data.get('nsxt_port') + if not self.controller_ip: + self.controller_ip = data.get('alb_controller_ip') + if not self.controller_version: + self.controller_version = data.get('alb_controller_version') + if not self.user: + self.user = data.get('alb_controller_user') + if not self.password: + self.password = data.get('alb_controller_password') + if not self.output_file_path: + self.output_file_path = data.get('output_file_path') + + input_path = None + self.input_data = None + if self.nsxt_ip: + input_path = output_dir + os.path.sep + self.nsxt_ip + os.path.sep + "input" + else: + input_path = output_dir + os.path.sep + "config-output" + os.path.sep + "input" + with open(input_path + os.path.sep + "config.json", 'r') as file: + self.input_data = json.load(file) + + def initiate_rollback(self): + + if not os.path.exists(self.output_file_path): + os.mkdir(self.output_file_path) + self.init_logger_path() + + cutover_msg = "Performing rollback for applications" + LOG.debug(cutover_msg) + print(cutover_msg) + nsx_util = NSXUtil(self.nsxt_user, self.nsxt_passord, self.nsxt_ip, self.nsxt_port \ + , self.controller_ip, self.user, self.password, self.controller_version) + nsx_util.rollback_vs(self.rollback_vs, self.input_data) + + print("Total Warning: ", get_count('warning')) + print("Total Errors: ", get_count('error')) + LOG.info("Total Warning: {}".format(get_count('warning'))) + LOG.info("Total Errors: {}".format(get_count('error'))) + + +if __name__ == "__main__": + HELP_STR = """ + Usage: + python nsxt_converter.py -n 192.168.100.101 -u admin -p password + """ + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, description=HELP_STR) + + parser.add_argument('-c', '--alb_controller_ip', + help='controller ip for auto upload') + parser.add_argument('--alb_controller_version', + help='Target Avi controller version') + parser.add_argument('--alb_controller_user', + help='controller username') + parser.add_argument('--alb_controller_password', + help='controller password. Input ' + 'prompt will appear if no value provided', required=True) + parser.add_argument('-n', '--nsxt_ip', + help='Ip of NSXT', required=True) + parser.add_argument('-u', '--nsxt_user', + help='NSX-T User name') + parser.add_argument('-p', '--nsxt_password', + help='NSX-T Password', required=True) + parser.add_argument('-port', '--nsxt_port', default=443, + help='NSX-T Port') + parser.add_argument('-o', '--output_file_path', + help='Folder path for output files to be created in', + ) + # Added command line args to take skip type for ansible playbook + parser.add_argument('--rollback', + help='comma separated names of virtualservices for cutover.\n', + required=True) + + start = datetime.now() + args = parser.parse_args() + nsxtalb_rollback = NsxtAlbRollback(args) + nsxtalb_rollback.initiate_rollback() + end = datetime.now() + print("The time of execution of above program is :", + str(end - start)) diff --git a/python/avi/migrationtools/nsxt_converter/ssl_profile_converter.py b/python/avi/migrationtools/nsxt_converter/ssl_profile_converter.py new file mode 100755 index 0000000000..c2d296b412 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/ssl_profile_converter.py @@ -0,0 +1,266 @@ +import logging +import os + +from avi.migrationtools.avi_migration_utils import update_count, MigrationUtil +from avi.migrationtools.nsxt_converter.conversion_util import NsxtConvUtil +import avi.migrationtools.nsxt_converter.converter_constants as final +import avi.migrationtools.nsxt_converter.converter_constants as conv_const + +LOG = logging.getLogger(__name__) + +conv_utils = NsxtConvUtil() +common_avi_util = MigrationUtil() + + +class SslProfileConfigConv(object): + def __init__(self, nsxt_profile_attributes, object_merge_check, merge_object_mapping, sys_dict): + """ + + """ + self.supported_client_ssl_attributes = nsxt_profile_attributes['SSLProfile_Client_Supported_Attributes'] + self.supported_server_ssl_attributes = nsxt_profile_attributes['SSLProfile_Server_Supported_Attributes'] + self.common_na_attr = nsxt_profile_attributes['Common_Na_List'] + self.indirect_client_ssl_attr = nsxt_profile_attributes["SSLProfile_Client_Indirect_Attributes"] + self.indirect_server_ssl_attr = nsxt_profile_attributes["SSLProfile_Server_Indirect_Attributes"] + self.object_merge_check = object_merge_check + self.merge_object_mapping = merge_object_mapping + self.sys_dict = sys_dict + self.ssl_profile_count = 0 + + def convert(self, alb_config, nsx_lb_config, prefix, tenant): + alb_config["SSLProfile"] = [] + tenant_name, name = conv_utils.get_tenant_ref(tenant) + if not tenant: + tenant = tenant_name + if nsx_lb_config.get('LbClientSslProfiles'): + converted_objs = [] + skipped_list = [] + converted_alb_ssl = [] + na_list = [] + progressbar_count = 0 + total_size = len(nsx_lb_config['LbClientSslProfiles']) + print("\nConverting Client SSL Profile ...") + LOG.info('[SSL-PROFILE] Converting Profiles...') + + for lb_ssl in nsx_lb_config["LbClientSslProfiles"]: + try: + LOG.info('[SSL-PROFILE] Migration started for AP {}'.format(lb_ssl['display_name'])) + skipped = [val for val in lb_ssl.keys() + if val not in self.supported_client_ssl_attributes] + na_attr = [val for val in lb_ssl.keys() + if val in self.common_na_attr] + na_list.append(na_attr) + progressbar_count += 1 + name = lb_ssl.get('display_name') + if prefix: + name = prefix + '-' + name + if self.object_merge_check: + if name in self.merge_object_mapping['ssl_profile'].keys(): + name = name + "-" + lb_ssl["id"] + else: + c_ssl_temp = list(filter(lambda c_ssl: c_ssl["name"] == name, alb_config['SSLProfile'])) + if c_ssl_temp: + name = name + "-" + lb_ssl["id"] + alb_ssl = dict( + name=name, + tenant_ref=conv_utils.get_object_ref(tenant, 'tenant'), + ) + if lb_ssl.get("session_cache_enabled"): + alb_ssl['enable_ssl_session_reuse'] = lb_ssl['session_cache_enabled'] + if lb_ssl.get("session_cache_timeout"): + alb_ssl['ssl_session_timeout'] = lb_ssl['session_cache_timeout'] + + if lb_ssl.get("ciphers"): + converted_ciphers = self.convert_ciphers_to_valid_format(":".join(lb_ssl['ciphers'])) + alb_ssl['accepted_ciphers'] = converted_ciphers + + if lb_ssl.get("protocols"): + self.convert_protocols(lb_ssl['protocols'], alb_ssl) + if lb_ssl.get("prefer_server_ciphers"): + alb_ssl["prefer_client_cipher_ordering"] = not lb_ssl["prefer_server_ciphers"] + + skipped_list.append(skipped) + ## + if self.object_merge_check: + common_avi_util.update_skip_duplicates(alb_ssl, + alb_config['SSLProfile'], + 'ssl_profile', + converted_objs, name, None, + self.merge_object_mapping, + lb_ssl['resource_type'], prefix, + self.sys_dict['SSLProfile']) + self.ssl_profile_count += 1 + else: + alb_config['SSLProfile'].append(alb_ssl) + + val = dict( + id=lb_ssl["id"], + name=name, + resource_type=lb_ssl['resource_type'], + alb_ssl=alb_ssl + + ) + converted_alb_ssl.append(val) + + msg = "SSLProfile conversion started..." + conv_utils.print_progress_bar(progressbar_count, total_size, msg, + prefix='Progress', suffix='') + + LOG.info('[SSL-PROFILE] Migration completed for HM {}'.format(lb_ssl['display_name'])) + except: + update_count('error') + LOG.error("[SSL-PROFILE] Failed to convert Client SSLProfile: %s" % lb_ssl['display_name'], + exc_info=True) + conv_utils.add_status_row('sslprofile', None, lb_ssl['display_name'], + conv_const.STATUS_ERROR) + + indirect = self.indirect_client_ssl_attr + u_ignore = [] + ignore_for_defaults = {} + for index, skipped in enumerate(skipped_list): + conv_status = conv_utils.get_conv_status( + skipped_list[index], indirect, ignore_for_defaults, nsx_lb_config['LbClientSslProfiles'], + u_ignore, na_list[index]) + ssl_na = [val for val in na_list[index] if val not in self.common_na_attr] + conv_status["na_list"] = ssl_na + name = converted_alb_ssl[index]['name'] + ssl_id = converted_alb_ssl[index]['id'] + alb_mig_ssl = converted_alb_ssl[index]['alb_ssl'] + resource_type = converted_alb_ssl[index]['resource_type'] + if self.object_merge_check: + alb_mig_ssl = [pp for pp in alb_config['SSLProfile'] if + pp.get('name') == self.merge_object_mapping['ssl_profile'].get(name)] + conv_utils.add_conv_status('sslprofile', resource_type, name, conv_status, + [{'ssl_profile': alb_mig_ssl[0]}]) + else: + conv_utils.add_conv_status('sslprofile', resource_type, name, conv_status, + [{'ssl_profile': alb_mig_ssl}]) + if len(conv_status['skipped']) > 0: + LOG.debug( + '[SSL-PROFILE] Skipped Attribute {}:{}'.format(name, + conv_status['skipped'])) + + if nsx_lb_config.get('LbServerSslProfiles'): + converted_objs = [] + skipped_list = [] + converted_alb_ssl = [] + na_list = [] + progressbar_count = 0 + total_size = len(nsx_lb_config['LbServerSslProfiles']) + print("\nConverting Server SSL Profile ...") + LOG.info('[SSL-PROFILE] Converting Client SSL Profiles...') + + for lb_ssl in nsx_lb_config["LbServerSslProfiles"]: + try: + LOG.info('[SSL-PROFILE] Migration started for AP {}'.format(lb_ssl['display_name'])) + skipped = [val for val in lb_ssl.keys() + if val not in self.supported_client_ssl_attributes] + na_attr = [val for val in lb_ssl.keys() + if val in self.common_na_attr] + na_list.append(na_attr) + progressbar_count += 1 + name = lb_ssl.get('display_name') + if prefix: + name = prefix + '-' + name + if self.object_merge_check: + if name in self.merge_object_mapping['ssl_profile'].keys(): + name = name + "-" + lb_ssl["id"] + else: + s_ssl_temp = list(filter(lambda ssl: ssl["name"] == name, alb_config['SSLProfile'])) + if s_ssl_temp: + name = name + "-" + lb_ssl["id"] + alb_ssl = dict( + name=name, + tenant_ref=conv_utils.get_object_ref(tenant, 'tenant'), + ) + if lb_ssl.get("ciphers"): + converted_ciphers = self.convert_ciphers_to_valid_format(":".join(lb_ssl['ciphers'])) + alb_ssl['accepted_ciphers'] = converted_ciphers + + if lb_ssl.get("protocols"): + self.convert_protocols(lb_ssl['protocols'], alb_ssl) + + skipped_list.append(skipped) + ## + if self.object_merge_check: + common_avi_util.update_skip_duplicates(alb_ssl, + alb_config['SSLProfile'], + 'ssl_profile', + converted_objs, name, None, + self.merge_object_mapping, + lb_ssl['resource_type'], prefix, + self.sys_dict['SSLProfile']) + self.ssl_profile_count += 1 + else: + alb_config['SSLProfile'].append(alb_ssl) + + val = dict( + id=lb_ssl["id"], + name=name, + resource_type=lb_ssl['resource_type'], + alb_ssl=alb_ssl + + ) + converted_alb_ssl.append(val) + + msg = "SSLProfile conversion started..." + conv_utils.print_progress_bar(progressbar_count, total_size, msg, + prefix='Progress', suffix='') + + LOG.info('[SSL-PROFILE] Migration completed for HM {}'.format(lb_ssl['display_name'])) + except: + update_count('error') + LOG.error("[SSL-PROFILE] Failed to convert Server Side SSLProfile: %s" % lb_ssl['display_name'], + exc_info=True) + conv_utils.add_status_row('sslprofile', None, lb_ssl['display_name'], + conv_const.STATUS_ERROR) + + indirect = self.indirect_server_ssl_attr + u_ignore = [] + ignore_for_defaults = {} + for index, skipped in enumerate(skipped_list): + conv_status = conv_utils.get_conv_status( + skipped_list[index], indirect, ignore_for_defaults, nsx_lb_config['LbServerSslProfiles'], + u_ignore, na_list[index]) + ssl_na = [val for val in na_list[index] if val not in self.common_na_attr] + conv_status["na_list"] = ssl_na + name = converted_alb_ssl[index]['name'] + ssl_id = converted_alb_ssl[index]['id'] + alb_mig_ssl = converted_alb_ssl[index]['alb_ssl'] + resource_type = converted_alb_ssl[index]['resource_type'] + if self.object_merge_check: + alb_mig_ssl = [pp for pp in alb_config['SSLProfile'] if + pp.get('name') == self.merge_object_mapping['ssl_profile'].get(name)] + conv_utils.add_conv_status('sslprofile', resource_type, name, conv_status, + [{'ssl_profile': alb_mig_ssl[0]}]) + else: + conv_utils.add_conv_status('sslprofile', resource_type, name, conv_status, + [{'ssl_profile': alb_mig_ssl}]) + if len(conv_status['skipped']) > 0: + LOG.debug( + '[SSL-PROFILE] Skipped Attribute {}:{}'.format(name, + conv_status['skipped'])) + + def convert_protocols(self, protocols, alb_ssl): + accepted_version = dict( + SSL_V2="", + SSL_V3="SSL_VERSION_SSLV3", + TLS_V1="SSL_VERSION_TLS1", + TLS_V1_1="SSL_VERSION_TLS1_1", + TLS_V1_2="SSL_VERSION_TLS1_2" + + ) + alb_ssl['accepted_versions'] = [] + + for acc_ver in protocols: + acc_version = dict( + type=accepted_version[acc_ver] + ) + alb_ssl['accepted_versions'].append(acc_version) + + def convert_ciphers_to_valid_format(self, cipher_str): + cipher_str = cipher_str.replace('TLS_', '') + cipher_str = cipher_str.replace('_', '-') + cipher_str = cipher_str.replace('WITH-AES-128', 'AES128') + cipher_str = cipher_str.replace('WITH-AES-256', 'AES256') + return cipher_str \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/test/avi_config.json b/python/avi/migrationtools/nsxt_converter/test/avi_config.json new file mode 100644 index 0000000000..c060d71851 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/avi_config.json @@ -0,0 +1,234 @@ +{ + "ApplicationProfile": [ + { + "name": "prefix-default-http-lb-app-profile", + "tenant_ref": "/api/tenant/?name=admin", + "type": "APPLICATION_PROFILE_TYPE_HTTP", + "http_profile": { + "xff_enabled": true, + "http_to_https": false, + "keepalive_timeout": 15, + "client_max_header_size": 64, + "keepalive_header": false, + "max_response_headers_size": 256, + "detect_ntlm_app": false + } + } + ], + "NetworkProfile": [ + { + "name": "prefix-default-tcp-lb-app-profile", + "profile": { + "type": "PROTOCOL_TYPE_TCP_FAST_PATH", + "tcp_fast_path_profile": { + "session_idle_timeout": 1800 + } + } + } + ], + "SSLProfile": [], + "PKIProfile": [], + "SSLKeyAndCertificate": [ + { + "name": "prefix-l7-vs-auto_created", + "tenant_ref": "/api/tenant/?name=admin", + "key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCroPmLSsz9HyR8\nCT6hpN7QU5LoQtD02Fwqb4hnl0Xvbee42bdEpKAd1c7pzXa8deSZ8KzUTMiBJmLb\n82EM/n8a7gS3AOtVrtr7yl7fpNTBTJz8wZ2wtx/n5Fj3iM+lWBVuvocEpdQxDAB7\nv8J1BypPCE+jCj1DpNIl38BDjL/SRWdaVO2Hiook7h733oYlsSe/y6r3kCl22KMH\ndEeawGNe5QL2g5qlGGBiiWQonuW8vbfCIY2MKjL4TGuOq69loASBqEiu8jEKN7UW\nD0QvGx7mx+4ktFQRuYl9WtLnnDkpPK7J18IeINUCeSOmQsBckXNDZelwYpfYF1WJ\ndIIWNXm1AgMBAAECggEALQEUDot5vzIIXiIKc1aJOrV/gznqExoc6N/AM/G92+/E\n4HrujKhieBBVpodDCF4dqKQsnarYtMz9BVIHc+HUMJqTT+2EyD4GnBnP0PMm+tz9\nGWYn1GvgiO0eyyHr2R6E2DpLq1NUldvg0JJTCl7roDe8sjMAJ5mzpUzL3KqurhMo\n9ccbe9sR7M+wQ+CI0IKfqpaX7KfRcGytfTEVQkl/iwriTdS618PjTcanfEj461gz\nIDpTsviM0v9Jj40QxjCL2UffhNQZFzZI1an5U3dfA9bLR2uVrnwkQREue1futXE0\n1QIP64mGjwb2zqWp92QBb4yN+TDcfCSQoQ+pjCaqDQKBgQDWrzphSv+Rkr3lKHiX\nCXyH/AjEpqjK8FhAH1SCw1QqCUg56H13m+sa+qNgK0wx8o/KR9FXRuQdQmPbKCQF\npddMUQU3vFIAN0FmBMMjYzTJEQSxWlFjC9oXWQNe/PqFrlbbDBvOuEh2uxW17IZt\ntz6dN+VHrPVaTIawLfKHKBUK1wKBgQDMqIpbzs4wLqeAribWDaFK/85czP9KvSuz\nKELE/CWSRk6hzmJ8hyW/S7bsVZ+666paVirK5AJpfxY+dMMJWjp4YAM+C2eZbJYa\n7goqZfdPdMIS4geUFNEefmpg2xaUdaP29CsYoXSHcfSVq15Bf7W1Sy+hdFAQBVCq\n/nViJvv6UwKBgCDYoHsASYfRRsKH5aNeLTJ1CjZkNVEPYsE0ye5Tyk3AK8/IVWH2\ndgsjq9lr5nWB4iQVxt6ulvovDS4bvx8qF32yq7SyWG12BsiAFAcXB513wBsOcYNQ\n7eZClTR0SjQnz1wmRx7irRuVViYGcS7aKXdOdQL5YHSXXsgXz5KEPGvzAoGBAJJ2\nR1X+gTagHMWNXEOxy2019kQZfNXK3ZxAQtBgNq8g26MWZQsDWPM2xzkI6d/GScqO\ngd9jH4rKJWwa+X0W8mM1KvzbApAc0quBEQtYn41gXIdQ9OV3pFRzWcPo5pbmPao9\nAR6GZTONTOFTm4e6UprkrLiZ2bYtZ5aHtg/BIHD7AoGAM+DGveHOQh6/cDMJPnqt\n1YR21oDD57i5oYRvxQY/PsemwOLmsbaPJd9uRgb+dZ6IfwX5+fAn0P3VCAQr5dPl\nQX4+6QlJzWqLbd6t++pXZ2XNz5kohKenTRMKuBriSM4q+OxMlu2suVHHlRuafM0u\nI6sWkM2rr0MGMqa0/KrC1vI=\n-----END PRIVATE KEY-----\n", + "certificate": { + "certificate": "-----BEGIN CERTIFICATE-----\nMIIDAzCCAesCAgPoMA0GCSqGSIb3DQEBCwUAMEcxCzAJBgNVBAYTAlVTMRUwEwYD\nVQQKDAxBdmkgTmV0d29ya3MxITAfBgNVBAMMGGFnYXJ3YWxtZS1hMDEudm13YXJl\nLmNvbTAeFw0yMjAyMDcwNTM4NDlaFw0zMjAyMDUwNTM4NDlaMEcxCzAJBgNVBAYT\nAlVTMRUwEwYDVQQKDAxBdmkgTmV0d29ya3MxITAfBgNVBAMMGGFnYXJ3YWxtZS1h\nMDEudm13YXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKug\n+YtKzP0fJHwJPqGk3tBTkuhC0PTYXCpviGeXRe9t57jZt0SkoB3VzunNdrx15Jnw\nrNRMyIEmYtvzYQz+fxruBLcA61Wu2vvKXt+k1MFMnPzBnbC3H+fkWPeIz6VYFW6+\nhwSl1DEMAHu/wnUHKk8IT6MKPUOk0iXfwEOMv9JFZ1pU7YeKiiTuHvfehiWxJ7/L\nqveQKXbYowd0R5rAY17lAvaDmqUYYGKJZCie5by9t8IhjYwqMvhMa46rr2WgBIGo\nSK7yMQo3tRYPRC8bHubH7iS0VBG5iX1a0uecOSk8rsnXwh4g1QJ5I6ZCwFyRc0Nl\n6XBil9gXVYl0ghY1ebUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAgBkPt4G+3Xdl\nATstXLEYquukchQ1a2D87En7W63SZOeIutGF3cOJN78YlBAw4hddhvHsspEucoyo\ngSvdnKUbMeIMZK858fEU7k2iUYcY1BUnUyQwHSUGAafr2XR3ddEpplGB0ec6oVjL\nNnnwD/PKiQ/9L6dQEPmFA3X1D+/Dpof+znn37VbIYIwxLDsMtgbkwT0xA0iu21Ay\nhsc25kmwZz6lATZMPWvZnkHbE0frjg4i172sgZU2YHUGSViv0hbzSN3xSwNaNpGL\nA50AgRfa7K8brIAQVT4+bhQ8bwg9hjgyb8ogUEKLGvGfcvb8LPXM0psvbCvimRy5\nmLCYenKYcA==\n-----END CERTIFICATE-----\n" + }, + "type": "SSL_CERTIFICATE_TYPE_VIRTUALSERVICE" + }, + { + "name": "prefix-l7-pool-auto_created", + "tenant_ref": "/api/tenant/?name=admin", + "key": "-----BEGIN PRIVATE KEY-----\nMIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQCsX2fN6wYZ6iZg\nNJ/vqznKfA44fYU9YflRmvwY81cLn1m0f1L9MEy7vIYeH3qklfuD8A71soiENpuz\nas8LmySeP+Vqj5SHuMnDWwacMjufTvrQN7g1dZBQJ5B4weKLwNfVoSNPCy5urifW\nkihX1dk+ZwKBZawM8gCa7AQe8ab3HWJfUqK46kk7vbG54wo0pGudnSTzsPO8iFHa\n0a6JkxCbPlCojMXYBuHyddfRcOio6jhsNM4mxESVbUev8ws3ew4aQnLTccvJerVp\nZiTPSL4x8eKP+939XGf/x6TBIub0q7E05miNsfLOMlTd5JsIFm5RxXM5dXwNkPwR\n2w8Qb3I7AgMBAAECggEBAJkLLFM8NPKVXvVURBy73lfVxZqKd96EOqUmAmb0x3he\nR3tnAPwkAj09qabdNDsln2cJoX6swWECS0QFtUcMvHp1KPBwAuRuQVXeYAQW5bEQ\nosh0cG2msDc6qZ9OJiMHWZsnsv3fY56Jqn1sz70M9hHdy++vog7sX3Ut0IrY6/HD\nBCFYP/vJ9FlqDR90cLm4I7QqdoKpAUCM9S2qWfz7pzNfUJM/AWz/hYqzrZmtqT3x\nTc+u3snczXpVeUmw5FSJNkjNV3PJ0m6EcPWjLY3JlClywKedkbAc1ayog+HvbyoH\nOwDhA9adSu2CKcu0yKR6wgIXy/vYy+ASslqjFFZUTMECgYEA1PkHNcXf/CBJ8YqF\n2k4ihRRQLegcS3sArrSphcqBl0mtXoN4GX26UCf21hAYAYtbk7+ThF5/PBe+F0Xq\n8tfIVxRDg6CKgoTQXct4e6eDziTbt8h3BUPtpia8n6+UiNcCg0HPbM6EisX+4XMq\n77Uu3qnfO7apydWExytOGr3KMokCgYEAzzKJb4A9NkSEAA9CFc98OkjUQ+ivjC4b\n/+ExHj5CpGu0uR2aeAiRuA6hdnFHbr75QPoBigMjH911PXnFuUjrWdUcIOiZOo4A\nc8PIBZyW6PaeWYt23jChm6K+QR7Ge7WgtdMCIF7Akp/x4h5WiZUqBLKAxacP3181\ntb+L/bJJ3aMCgYEAp1F+eKCc+Fflv/40au438JWk6GwSzro0Lw2AobwXU+RucHKZ\nSG2ekprTMhoywgPaTpmUK8qR64Q7s80aaO9eVgt1f1QBfckaapYQchWJV8tdU2YL\nG/fiHUcRqeMnrto8/yIU9EbLmrszI6x3bxvJVuXem+tGOWumhVNgaWHyGQECgYEA\njhUcs85fq3AOiK/t5GD00k8Q6ESO5PTVlIFLGm8dafzF4E9RltBhhLLbvAnwLxxc\ndlc6aPa/xmEpvW9czDMZ2O9Pq611Lfckjj4KUvbPcY90hPkjGTiGCeDu0F8XQswJ\nGcZJ5gKz0ZhG2YVuWEhmmPQDZAas1sNGZIympeD10DsCgYEA0NQVQwKRlG+0pwOl\nz35PhS29YGIffvoc3vXKdDU/A1CeI+9UNKGsT01r5NM/sk6Cxugv1bH7s64hyZwG\n+LzjIV4C/OlRREXZZzn+ZiAgp1y5qHkAFgYW9k+q1aXp5wEwD467NtyrsMOKKb9d\nc9NcY7fVJiNcCpumuCH7Gz52VuE=\n-----END PRIVATE KEY-----\n", + "certificate": { + "certificate": "-----BEGIN CERTIFICATE-----\nMIIDAzCCAesCAgPoMA0GCSqGSIb3DQEBCwUAMEcxCzAJBgNVBAYTAlVTMRUwEwYD\nVQQKDAxBdmkgTmV0d29ya3MxITAfBgNVBAMMGGFnYXJ3YWxtZS1hMDEudm13YXJl\nLmNvbTAeFw0yMjAyMDcwNTM4NDlaFw0zMjAyMDUwNTM4NDlaMEcxCzAJBgNVBAYT\nAlVTMRUwEwYDVQQKDAxBdmkgTmV0d29ya3MxITAfBgNVBAMMGGFnYXJ3YWxtZS1h\nMDEudm13YXJlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKxf\nZ83rBhnqJmA0n++rOcp8Djh9hT1h+VGa/BjzVwufWbR/Uv0wTLu8hh4feqSV+4Pw\nDvWyiIQ2m7NqzwubJJ4/5WqPlIe4ycNbBpwyO59O+tA3uDV1kFAnkHjB4ovA19Wh\nI08LLm6uJ9aSKFfV2T5nAoFlrAzyAJrsBB7xpvcdYl9SorjqSTu9sbnjCjSka52d\nJPOw87yIUdrRromTEJs+UKiMxdgG4fJ119Fw6KjqOGw0zibERJVtR6/zCzd7DhpC\nctNxy8l6tWlmJM9IvjHx4o/73f1cZ//HpMEi5vSrsTTmaI2x8s4yVN3kmwgWblHF\nczl1fA2Q/BHbDxBvcjsCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAiuu4mCFiKfX6\nRPSew6jyOB0uK4PvrIeOcyKxbHWf0irbVfopMf4S2b2aaMHREQ6rj46DljLzvWhA\no3pZaZLbuxcFuf8X184ogwYmy0BkDMB0dG6tr/ZhxRgJB+7zeA/2dkBaFMQgR+oX\n4A93vL+8bJqyIiGodj67Nbjy+Vts8KZwSGREsop872HBhjNx+UaNMNXqrs5Y5R6m\n/IJkCqucPq7JW5JTewhdNfZzs1RKDprB6GNNZO1gwvYMdGkKGs5SurX0RO35v94x\nEFax9nXh2Zj92DENWyRv33W9c5iPEVBuZhNKPJnyNqeJ1YrbGLgOgB6MJmvtYzsH\nG6NYTBBshA==\n-----END CERTIFICATE-----\n" + }, + "type": "SSL_CERTIFICATE_TYPE_VIRTUALSERVICE" + } + ], + "ApplicationPersistenceProfile": [ + { + "name": "prefix-default-source-ip-lb-persistence-profile", + "ip_persistence_profile": { + "ip_persistent_timeout": 300 + }, + "tenant_ref": "/api/tenant/?name=admin", + "persistence_type": "PERSISTENCE_TYPE_CLIENT_IP_ADDRESS" + } + ], + "HealthMonitor": [ + { + "name": "prefix-default-http-lb-monitor", + "failed_checks": 3, + "receive_timeout": 5, + "send_interval": 5, + "successful_checks": 3, + "monitor_port": 80, + "tenant_ref": "/api/tenant/?name=admin", + "type": "HEALTH_MONITOR_HTTP", + "http_monitor": { + "http_request": "/", + "http_request_body": null, + "http_response": null, + "http_response_code": [ + "HTTP_2XX", + "HTTP_3XX" + ] + } + }, + { + "name": "prefix-default-https-lb-monitor", + "failed_checks": 3, + "receive_timeout": 5, + "send_interval": 5, + "successful_checks": 3, + "monitor_port": 443, + "tenant_ref": "/api/tenant/?name=admin", + "type": "HEALTH_MONITOR_HTTPS", + "https_monitor": { + "http_request": "/", + "http_request_body": null, + "http_response": null, + "http_response_code": [ + "HTTP_2XX", + "HTTP_3XX" + ] + } + } + ], + "IpAddrGroup": [], + "Pool": [ + { + "name": "prefix-l4-pool", + "servers": [ + { + "ip": { + "addr": "192.168.0.1", + "type": "V4" + }, + "description": "server", + "port": 80, + "ratio": 1, + "enabled": true + } + ], + "lb_algorithm": "LB_ALGORITHM_ROUND_ROBIN", + "cloud_ref": "/api/cloud/?tenant=admin&name=nsxt_cloud_overlay", + "tenant_ref": "/api/tenant/?name=admin", + "conn_pool_properties": { + "upstream_connpool_server_max_cache": 6 + }, + "min_servers_up": 1, + "health_monitor_refs": [ + "/api/healthmonitor/?tenant=admin&name=prefix-default-http-lb-monitor" + ], + "persistence_profile_ref": "/api/applicationpersistenceprofile/?tenant=admin&name=prefix-default-source-ip-lb-persistence-profile", + "default_port": 80, + "tier1_lr": "/infra/tier-1s/London_Tier1Gateway1" + }, + { + "name": "prefix-l7-pool", + "servers": [ + { + "ip": { + "addr": "192.168.0.2", + "type": "V4" + }, + "description": "server", + "port": 80, + "ratio": 1, + "enabled": true + } + ], + "lb_algorithm": "LB_ALGORITHM_ROUND_ROBIN", + "cloud_ref": "/api/cloud/?tenant=admin&name=nsxt_cloud_overlay", + "tenant_ref": "/api/tenant/?name=admin", + "conn_pool_properties": { + "upstream_connpool_server_max_cache": 6 + }, + "min_servers_up": 1, + "health_monitor_refs": [ + "/api/healthmonitor/?tenant=admin&name=prefix-default-https-lb-monitor" + ], + "ssl_key_and_certificate_ref": "/api/sslkeyandcertificate/?tenant=admin&name=prefix-l7-pool-auto_created", + "persistence_profile_ref": "/api/applicationpersistenceprofile/?tenant=admin&name=prefix-default-source-ip-lb-persistence-profile", + "default_port": 8080, + "tier1_lr": "/infra/tier-1s/London_Tier1Gateway1" + } + ], + "VirtualService": [ + { + "name": "prefix-l4-vs", + "enabled": true, + "cloud_ref": "/api/cloud/?tenant=admin&name=nsxt_cloud_overlay", + "tenant_ref": "/api/tenant/?name=admin", + "vsvip_ref": "/api/vsvip/?name=prefix-l4-vs-vsvip&cloud=nsxt_cloud_overlay", + "services": [ + { + "port": 80, + "enable_ssl": false + } + ], + "network_profile_ref": "/api/networkprofile/?tenant=admin&name=prefix-default-tcp-lb-app-profile", + "application_profile_ref": "/api/applicationprofile/?tenant=admin&name=System-L4-Application", + "performance_limits": { + "max_concurrent_connections": 1000 + }, + "pool_ref": "/api/pool/?tenant=admin&name=prefix-l4-pool&cloud=nsxt_cloud_overlay" + }, + { + "name": "prefix-l7-vs", + "enabled": true, + "cloud_ref": "/api/cloud/?tenant=admin&name=nsxt_cloud_overlay", + "tenant_ref": "/api/tenant/?name=admin", + "vsvip_ref": "/api/vsvip/?name=prefix-l7-vs-vsvip&cloud=nsxt_cloud_overlay", + "services": [ + { + "port": 80, + "enable_ssl": true + } + ], + "application_profile_ref": "/api/applicationprofile/?tenant=admin&name=prefix-default-http-lb-app-profile", + "network_profile_ref": "/api/networkprofile/?tenant=admin&name=System-TCP-Proxy", + "performance_limits": { + "max_concurrent_connections": 100 + }, + "ssl_key_and_certificate_refs": [ + "/api/sslkeyandcertificate/?tenant=admin&name=prefix-l7-vs-auto_created" + ], + "pool_ref": "/api/pool/?tenant=admin&name=prefix-l7-pool&cloud=nsxt_cloud_overlay" + } + ], + "VsVip": [ + { + "name": "prefix-l4-vs-vsvip", + "tier1_lr": "/infra/tier-1s/London_Tier1Gateway1", + "cloud_ref": "/api/cloud/?tenant=admin&name=nsxt_cloud_overlay", + "vip": [ + { + "vip_id": "1", + "ip_address": { + "addr": "10.10.0.1", + "type": "V4" + } + } + ] + }, + { + "name": "prefix-l7-vs-vsvip", + "tier1_lr": "/infra/tier-1s/London_Tier1Gateway1", + "cloud_ref": "/api/cloud/?tenant=admin&name=nsxt_cloud_overlay", + "vip": [ + { + "vip_id": "1", + "ip_address": { + "addr": "10.10.0.2", + "type": "V4" + } + } + ] + } + ] +} \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/test/config.json b/python/avi/migrationtools/nsxt_converter/test/config.json new file mode 100644 index 0000000000..fac37c1597 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/config.json @@ -0,0 +1,1587 @@ +{ + "LBServices": [ + { + "_revision": 1, + "_create_time": 1643797106577, + "_create_user": "admin", + "_last_modified_time": 1643972011268, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "description": "Load balancers", + "display_name": "lb", + "id": "lb", + "resource_type": "LBService", + "tags": [ + { + "scope": "NLB-Lb-ID", + "tag": "lb" + } + ], + "parent_path": "/infra", + "path": "/infra/lb-services/lb", + "realization_id": "d9a2630e-eefd-4d66-b4a3-7285370a8cb1", + "relative_path": "lb", + "unique_id": "d9a2630e-eefd-4d66-b4a3-7285370a8cb1", + "marked_for_delete": false, + "overridden": false, + "connectivity_path": "/infra/tier-1s/London_Tier1Gateway1", + "enabled": true, + "error_log_level": "INFO", + "relax_scale_validation": false, + "size": "SMALL" + } + ], + "LbMonitorProfiles": [ + { + "request_url": "/", + "request_method": "GET", + "request_version": "HTTP_VERSION_1_1", + "response_status_codes": [ + 200, + 300, + 301, + 302, + 304, + 307 + ], + "resource_type": "LBHttpMonitorProfile", + "id": "default-http-lb-monitor", + "display_name": "default-http-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-http-lb-monitor", + "relative_path": "default-http-lb-monitor", + "parent_path": "/infra", + "unique_id": "c5d5fc24-656f-44c9-93e1-5bcdd02b9a5d", + "realization_id": "c5d5fc24-656f-44c9-93e1-5bcdd02b9a5d", + "marked_for_delete": false, + "overridden": false, + "monitor_port": 80, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1643751877517, + "_create_user": "system", + "_last_modified_time": 1643751877517, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "request_url": "/", + "request_method": "GET", + "request_version": "HTTP_VERSION_1_1", + "response_status_codes": [ + 200, + 300, + 301, + 302, + 304, + 307 + ], + "resource_type": "LBHttpsMonitorProfile", + "id": "default-https-lb-monitor", + "display_name": "default-https-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-https-lb-monitor", + "relative_path": "default-https-lb-monitor", + "parent_path": "/infra", + "unique_id": "ae2eabe3-8bdd-4801-ba7e-471a19411f79", + "realization_id": "ae2eabe3-8bdd-4801-ba7e-471a19411f79", + "marked_for_delete": false, + "overridden": false, + "monitor_port": 443, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1643751877599, + "_create_user": "system", + "_last_modified_time": 1643751877599, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "data_length": 56, + "resource_type": "LBIcmpMonitorProfile", + "id": "default-icmp-lb-monitor", + "display_name": "default-icmp-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-icmp-lb-monitor", + "relative_path": "default-icmp-lb-monitor", + "parent_path": "/infra", + "unique_id": "11f257c0-c2e6-4a12-a0bf-7b2cce6556be", + "realization_id": "11f257c0-c2e6-4a12-a0bf-7b2cce6556be", + "marked_for_delete": false, + "overridden": false, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1643751877661, + "_create_user": "system", + "_last_modified_time": 1643751877661, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "max_fails": 5, + "timeout": 5, + "resource_type": "LBPassiveMonitorProfile", + "id": "default-passive-lb-monitor", + "display_name": "default-passive-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-passive-lb-monitor", + "relative_path": "default-passive-lb-monitor", + "parent_path": "/infra", + "unique_id": "0b9d4539-5cdd-4a9e-9067-d17e79544163", + "realization_id": "0b9d4539-5cdd-4a9e-9067-d17e79544163", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1643751877715, + "_create_user": "system", + "_last_modified_time": 1643751877715, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "resource_type": "LBTcpMonitorProfile", + "id": "default-tcp-lb-monitor", + "display_name": "default-tcp-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-tcp-lb-monitor", + "relative_path": "default-tcp-lb-monitor", + "parent_path": "/infra", + "unique_id": "6c80bf44-1ec8-4d53-8d9d-dafaccf14d7f", + "realization_id": "6c80bf44-1ec8-4d53-8d9d-dafaccf14d7f", + "marked_for_delete": false, + "overridden": false, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1643751877343, + "_create_user": "system", + "_last_modified_time": 1643751877343, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "send": "10", + "receive": "10", + "resource_type": "LBUdpMonitorProfile", + "id": "udp-lb-monitor", + "display_name": "udp-lb-monitor", + "path": "/infra/lb-monitor-profiles/udp-lb-monitor", + "relative_path": "udp-lb-monitor", + "parent_path": "/infra", + "unique_id": "941ad999-f7dd-48c0-84d2-31582e06871c", + "realization_id": "941ad999-f7dd-48c0-84d2-31582e06871c", + "marked_for_delete": false, + "overridden": false, + "monitor_port": 80, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1644228103820, + "_create_user": "admin", + "_last_modified_time": 1644228167871, + "_last_modified_user": "admin", + "_system_owned": false, + "_protection": "NOT_PROTECTED", + "_revision": 1 + } + ], + "LbPools": [ + { + "_revision": 4, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644475153831, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "description": "Server Pools", + "display_name": "l4-pool", + "id": "l4-pool", + "resource_type": "LBPool", + "tags": [ + { + "scope": "NLB-Lb-ID", + "tag": "lb" + } + ], + "parent_path": "/infra", + "path": "/infra/lb-pools/l4-pool", + "realization_id": "971e5a98-6032-4cb5-bf61-b6692bfc92aa", + "relative_path": "l4-pool", + "unique_id": "971e5a98-6032-4cb5-bf61-b6692bfc92aa", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/default-http-lb-monitor" + ], + "algorithm": "ROUND_ROBIN", + "members": [ + { + "admin_state": "ENABLED", + "backup_member": false, + "display_name": "server", + "ip_address": "192.168.0.1", + "port": "80", + "weight": 1 + } + ], + "min_active_members": 1, + "passive_monitor_path": "/infra/lb-monitor-profiles/default-passive-lb-monitor", + "snat_translation": { + "type": "LBSnatDisabled" + }, + "tcp_multiplexing_enabled": true, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 3, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644567710183, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "description": "Server pools", + "display_name": "l7-pool", + "id": "l7-pool", + "resource_type": "LBPool", + "tags": [ + { + "scope": "NLB-Lb-ID", + "tag": "lb" + } + ], + "parent_path": "/infra", + "path": "/infra/lb-pools/l7-pool", + "realization_id": "6d833ee5-493a-4f90-a768-3f7e2914dd0c", + "relative_path": "l7-pool", + "unique_id": "6d833ee5-493a-4f90-a768-3f7e2914dd0c", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/default-http-lb-monitor" + ], + "algorithm": "ROUND_ROBIN", + "members": [ + { + "admin_state": "ENABLED", + "backup_member": false, + "display_name": "server", + "ip_address": "192.168.0.2", + "port": "80", + "weight": 1 + } + ], + "min_active_members": 1, + "passive_monitor_path": "/infra/lb-monitor-profiles/default-passive-lb-monitor", + "snat_translation": { + "ip_addresses": [ + { + "ip_address": "10.10.0.13" + } + ], + "type": "LBSnatIpPool" + }, + "tcp_multiplexing_enabled": true, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 3, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644839765572, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "pool1", + "id": "pool1", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/pool1", + "realization_id": "2bec0b52-7e37-4d19-ae8e-a9d19b86fd8d", + "relative_path": "pool1", + "unique_id": "2bec0b52-7e37-4d19-ae8e-a9d19b86fd8d", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/default-icmp-lb-monitor" + ], + "algorithm": "ROUND_ROBIN", + "member_group": { + "group_path": "/infra/domains/default/groups/Testing-Groups", + "ip_revision_filter": "IPV4" + }, + "min_active_members": 1, + "snat_translation": { + "type": "LBSnatDisabled" + }, + "tcp_multiplexing_enabled": true, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 0, + "_create_time": 1644228721462, + "_create_user": "admin", + "_last_modified_time": 1644228721462, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "sorry-pool-l4", + "id": "sorry-pool-l4", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/sorry-pool-l4", + "realization_id": "85b99cb1-9f5a-42e1-a928-fac73d41651d", + "relative_path": "sorry-pool-l4", + "unique_id": "85b99cb1-9f5a-42e1-a928-fac73d41651d", + "marked_for_delete": false, + "overridden": false, + "algorithm": "ROUND_ROBIN", + "members": [ + { + "admin_state": "ENABLED", + "backup_member": true, + "display_name": "l4-pool-member", + "ip_address": "10.10.0.8", + "port": "80", + "weight": 1 + } + ], + "min_active_members": 1, + "snat_translation": { + "type": "LBSnatAutoMap" + }, + "tcp_multiplexing_enabled": false, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 1, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644229951170, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "sorry-pool-l7", + "id": "sorry-pool-l7", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/sorry-pool-l7", + "realization_id": "4e7a7600-0932-44fe-b00c-4c2b5cf2e808", + "relative_path": "sorry-pool-l7", + "unique_id": "4e7a7600-0932-44fe-b00c-4c2b5cf2e808", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/default-https-lb-monitor" + ], + "algorithm": "ROUND_ROBIN", + "member_group": { + "group_path": "/infra/domains/default/groups/NSX-POOL-GROUP", + "ip_revision_filter": "IPV4" + }, + "min_active_members": 1, + "snat_translation": { + "type": "LBSnatAutoMap" + }, + "tcp_multiplexing_enabled": false, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 0, + "_create_time": 1644228785019, + "_create_user": "admin", + "_last_modified_time": 1644228785019, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "sorry-pool-udp", + "id": "sorry-pool-udp", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/sorry-pool-udp", + "realization_id": "6b3670c0-6015-4c58-935d-46dbc3af1cb9", + "relative_path": "sorry-pool-udp", + "unique_id": "6b3670c0-6015-4c58-935d-46dbc3af1cb9", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/udp-lb-monitor" + ], + "algorithm": "ROUND_ROBIN", + "member_group": { + "group_path": "/infra/domains/default/groups/Testing-Groups", + "ip_revision_filter": "IPV4" + }, + "min_active_members": 1, + "snat_translation": { + "type": "LBSnatAutoMap" + }, + "tcp_multiplexing_enabled": false, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 1, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644229968196, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "sorrypool-l7-1", + "id": "sorrypool-l7-1", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/sorrypool-l7-1", + "realization_id": "cde6a5dd-7ebf-4255-9536-8539da5d5ca4", + "relative_path": "sorrypool-l7-1", + "unique_id": "cde6a5dd-7ebf-4255-9536-8539da5d5ca4", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/default-http-lb-monitor" + ], + "algorithm": "ROUND_ROBIN", + "member_group": { + "group_path": "/infra/domains/default/groups/NSX-POOL-GROUP", + "ip_revision_filter": "IPV4" + }, + "min_active_members": 1, + "snat_translation": { + "type": "LBSnatAutoMap" + }, + "tcp_multiplexing_enabled": false, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 1, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644228178994, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "udp-pool", + "id": "udp-pool", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/udp-pool", + "realization_id": "3d8b3327-1344-4806-98a3-5c819ce50422", + "relative_path": "udp-pool", + "unique_id": "3d8b3327-1344-4806-98a3-5c819ce50422", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/udp-lb-monitor" + ], + "algorithm": "ROUND_ROBIN", + "member_group": { + "group_path": "/infra/domains/default/groups/Testing-Groups", + "ip_revision_filter": "IPV4" + }, + "min_active_members": 1, + "snat_translation": { + "type": "LBSnatAutoMap" + }, + "tcp_multiplexing_enabled": false, + "tcp_multiplexing_number": 6 + } + ], + "LbAppProfiles": [ + { + "x_forwarded_for": "INSERT", + "http_redirect_to_https": false, + "ntlm": false, + "idle_timeout": 15, + "request_header_size": 1024, + "response_timeout": 60, + "response_header_size": 4096, + "response_buffering": false, + "server_keep_alive": false, + "resource_type": "LBHttpProfile", + "id": "Http-app1", + "display_name": "Http-app1", + "path": "/infra/lb-app-profiles/Http-app1", + "relative_path": "Http-app1", + "parent_path": "/infra", + "unique_id": "ffbe0c58-8158-4b1f-861c-5020b5a12dcb", + "realization_id": "ffbe0c58-8158-4b1f-861c-5020b5a12dcb", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1644474224082, + "_create_user": "admin", + "_last_modified_time": 1644815073141, + "_last_modified_user": "admin", + "_system_owned": false, + "_protection": "NOT_PROTECTED", + "_revision": 1 + }, + { + "http_redirect_to_https": false, + "ntlm": false, + "idle_timeout": 15, + "request_header_size": 1024, + "response_timeout": 60, + "response_header_size": 4096, + "response_buffering": false, + "server_keep_alive": false, + "resource_type": "LBHttpProfile", + "id": "default-http-lb-app-profile", + "display_name": "default-http-lb-app-profile", + "path": "/infra/lb-app-profiles/default-http-lb-app-profile", + "relative_path": "default-http-lb-app-profile", + "parent_path": "/infra", + "unique_id": "9b5c76e0-c653-41c3-bb92-2b31b7140d9e", + "realization_id": "9b5c76e0-c653-41c3-bb92-2b31b7140d9e", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1643751878205, + "_create_user": "system", + "_last_modified_time": 1643751878205, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "idle_timeout": 1800, + "close_timeout": 8, + "ha_flow_mirroring_enabled": false, + "resource_type": "LBFastTcpProfile", + "id": "default-tcp-lb-app-profile", + "display_name": "default-tcp-lb-app-profile", + "path": "/infra/lb-app-profiles/default-tcp-lb-app-profile", + "relative_path": "default-tcp-lb-app-profile", + "parent_path": "/infra", + "unique_id": "91633ac2-a2f4-4c88-8d1a-63f9240cd055", + "realization_id": "91633ac2-a2f4-4c88-8d1a-63f9240cd055", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1643751878275, + "_create_user": "system", + "_last_modified_time": 1643751878275, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "idle_timeout": 300, + "flow_mirroring_enabled": false, + "resource_type": "LBFastUdpProfile", + "id": "default-udp-lb-app-profile", + "display_name": "default-udp-lb-app-profile", + "path": "/infra/lb-app-profiles/default-udp-lb-app-profile", + "relative_path": "default-udp-lb-app-profile", + "parent_path": "/infra", + "unique_id": "fbc5c4f8-1834-4062-806b-b917883bc640", + "realization_id": "fbc5c4f8-1834-4062-806b-b917883bc640", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1643751878320, + "_create_user": "system", + "_last_modified_time": 1643751878320, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + } + ], + "LbClientSslProfiles": [ + { + "_revision": 0, + "_create_time": 1643751878612, + "_create_user": "system", + "_last_modified_time": 1643751878612, + "_last_modified_user": "system", + "_protection": "NOT_PROTECTED", + "_system_owned": true, + "display_name": "default-balanced-client-ssl-profile", + "id": "default-balanced-client-ssl-profile", + "resource_type": "LBClientSslProfile", + "parent_path": "/infra", + "path": "/infra/lb-client-ssl-profiles/default-balanced-client-ssl-profile", + "realization_id": "fbd9b058-8d86-4be2-85ca-5df582adc065", + "relative_path": "default-balanced-client-ssl-profile", + "unique_id": "fbd9b058-8d86-4be2-85ca-5df582adc065", + "marked_for_delete": false, + "overridden": false, + "cipher_group_label": "BALANCED", + "ciphers": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA" + ], + "is_fips": true, + "is_secure": true, + "prefer_server_ciphers": true, + "protocols": [ + "TLS_V1_1", + "TLS_V1_2" + ], + "session_cache_enabled": true, + "session_cache_timeout": 300 + }, + { + "_revision": 0, + "_create_time": 1643751878578, + "_create_user": "system", + "_last_modified_time": 1643751878578, + "_last_modified_user": "system", + "_protection": "NOT_PROTECTED", + "_system_owned": true, + "display_name": "default-high-compatibility-client-ssl-profile", + "id": "default-high-compatibility-client-ssl-profile", + "resource_type": "LBClientSslProfile", + "parent_path": "/infra", + "path": "/infra/lb-client-ssl-profiles/default-high-compatibility-client-ssl-profile", + "realization_id": "3e8a841e-38bf-4ca6-a071-60db52f76099", + "relative_path": "default-high-compatibility-client-ssl-profile", + "unique_id": "3e8a841e-38bf-4ca6-a071-60db52f76099", + "marked_for_delete": false, + "overridden": false, + "cipher_group_label": "HIGH_COMPATIBILITY", + "ciphers": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA" + ], + "is_fips": true, + "is_secure": false, + "prefer_server_ciphers": true, + "protocols": [ + "TLS_V1_1", + "TLS_V1_2" + ], + "session_cache_enabled": true, + "session_cache_timeout": 300 + }, + { + "_revision": 0, + "_create_time": 1643751878632, + "_create_user": "system", + "_last_modified_time": 1643751878632, + "_last_modified_user": "system", + "_protection": "NOT_PROTECTED", + "_system_owned": true, + "display_name": "default-high-security-client-ssl-profile", + "id": "default-high-security-client-ssl-profile", + "resource_type": "LBClientSslProfile", + "parent_path": "/infra", + "path": "/infra/lb-client-ssl-profiles/default-high-security-client-ssl-profile", + "realization_id": "c72ca854-667b-4c2e-9be0-4d07478f9300", + "relative_path": "default-high-security-client-ssl-profile", + "unique_id": "c72ca854-667b-4c2e-9be0-4d07478f9300", + "marked_for_delete": false, + "overridden": false, + "cipher_group_label": "HIGH_SECURITY", + "ciphers": [ + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ], + "is_fips": true, + "is_secure": true, + "prefer_server_ciphers": true, + "protocols": [ + "TLS_V1_2" + ], + "session_cache_enabled": true, + "session_cache_timeout": 300 + } + ], + "LbServerSslProfiles": [ + { + "_revision": 0, + "_create_time": 1643751878860, + "_create_user": "system", + "_last_modified_time": 1643751878860, + "_last_modified_user": "system", + "_protection": "NOT_PROTECTED", + "_system_owned": true, + "display_name": "default-balanced-server-ssl-profile", + "id": "default-balanced-server-ssl-profile", + "resource_type": "LBServerSslProfile", + "parent_path": "/infra", + "path": "/infra/lb-server-ssl-profiles/default-balanced-server-ssl-profile", + "realization_id": "ac822bda-8bcc-4942-b7af-7d217cde5dd5", + "relative_path": "default-balanced-server-ssl-profile", + "unique_id": "ac822bda-8bcc-4942-b7af-7d217cde5dd5", + "marked_for_delete": false, + "overridden": false, + "cipher_group_label": "BALANCED", + "ciphers": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA" + ], + "is_fips": true, + "is_secure": true, + "protocols": [ + "TLS_V1_1", + "TLS_V1_2" + ], + "session_cache_enabled": true + }, + { + "_revision": 0, + "_create_time": 1643751878803, + "_create_user": "system", + "_last_modified_time": 1643751878803, + "_last_modified_user": "system", + "_protection": "NOT_PROTECTED", + "_system_owned": true, + "display_name": "default-high-compatibility-server-ssl-profile", + "id": "default-high-compatibility-server-ssl-profile", + "resource_type": "LBServerSslProfile", + "parent_path": "/infra", + "path": "/infra/lb-server-ssl-profiles/default-high-compatibility-server-ssl-profile", + "realization_id": "abcae212-bb06-45d1-af42-c736b0fc0a8e", + "relative_path": "default-high-compatibility-server-ssl-profile", + "unique_id": "abcae212-bb06-45d1-af42-c736b0fc0a8e", + "marked_for_delete": false, + "overridden": false, + "cipher_group_label": "HIGH_COMPATIBILITY", + "ciphers": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA" + ], + "is_fips": true, + "is_secure": false, + "protocols": [ + "TLS_V1_1", + "TLS_V1_2" + ], + "session_cache_enabled": true + }, + { + "_revision": 0, + "_create_time": 1643751878871, + "_create_user": "system", + "_last_modified_time": 1643751878871, + "_last_modified_user": "system", + "_protection": "NOT_PROTECTED", + "_system_owned": true, + "display_name": "default-high-security-server-ssl-profile", + "id": "default-high-security-server-ssl-profile", + "resource_type": "LBServerSslProfile", + "parent_path": "/infra", + "path": "/infra/lb-server-ssl-profiles/default-high-security-server-ssl-profile", + "realization_id": "6a943643-9a17-4105-9280-b2dbcdf48512", + "relative_path": "default-high-security-server-ssl-profile", + "unique_id": "6a943643-9a17-4105-9280-b2dbcdf48512", + "marked_for_delete": false, + "overridden": false, + "cipher_group_label": "HIGH_SECURITY", + "ciphers": [ + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ], + "is_fips": true, + "is_secure": true, + "protocols": [ + "TLS_V1_2" + ], + "session_cache_enabled": true + } + ], + "LbPersistenceProfiles": [ + { + "cookie_name": "NSXLB", + "cookie_mode": "INSERT", + "cookie_fallback": true, + "cookie_garble": true, + "cookie_httponly": false, + "cookie_secure": false, + "resource_type": "LBCookiePersistenceProfile", + "id": "default-cookie-lb-persistence-profile", + "display_name": "default-cookie-lb-persistence-profile", + "path": "/infra/lb-persistence-profiles/default-cookie-lb-persistence-profile", + "relative_path": "default-cookie-lb-persistence-profile", + "parent_path": "/infra", + "unique_id": "8d25f0d4-f460-4f6c-b548-2bc1ea819464", + "realization_id": "8d25f0d4-f460-4f6c-b548-2bc1ea819464", + "marked_for_delete": false, + "overridden": false, + "persistence_shared": false, + "_create_time": 1643751878139, + "_create_user": "system", + "_last_modified_time": 1643751878139, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "timeout": 300, + "ha_persistence_mirroring_enabled": false, + "resource_type": "LBGenericPersistenceProfile", + "id": "default-generic-lb-persistence-profile", + "display_name": "default-generic-lb-persistence-profile", + "path": "/infra/lb-persistence-profiles/default-generic-lb-persistence-profile", + "relative_path": "default-generic-lb-persistence-profile", + "parent_path": "/infra", + "unique_id": "463b8a94-6d67-4565-8314-952058216b12", + "realization_id": "463b8a94-6d67-4565-8314-952058216b12", + "marked_for_delete": false, + "overridden": false, + "persistence_shared": false, + "_create_time": 1643751878090, + "_create_user": "system", + "_last_modified_time": 1643751878090, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "purge": "FULL", + "timeout": 300, + "ha_persistence_mirroring_enabled": false, + "resource_type": "LBSourceIpPersistenceProfile", + "id": "default-source-ip-lb-persistence-profile", + "display_name": "default-source-ip-lb-persistence-profile", + "path": "/infra/lb-persistence-profiles/default-source-ip-lb-persistence-profile", + "relative_path": "default-source-ip-lb-persistence-profile", + "parent_path": "/infra", + "unique_id": "0ef8de23-c86e-4dee-929a-7967c726290e", + "realization_id": "0ef8de23-c86e-4dee-929a-7967c726290e", + "marked_for_delete": false, + "overridden": false, + "persistence_shared": false, + "_create_time": 1643751877923, + "_create_user": "system", + "_last_modified_time": 1643751877923, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + } + ], + "LbVirtualServers": [ + { + "_revision": 1, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644228847670, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "Vs-http-1", + "id": "Vs-http-1", + "resource_type": "LBVirtualServer", + "parent_path": "/infra", + "path": "/infra/lb-virtual-servers/Vs-http-1", + "realization_id": "4806a36a-692d-4428-81a1-d65462cf894a", + "relative_path": "Vs-http-1", + "unique_id": "4806a36a-692d-4428-81a1-d65462cf894a", + "marked_for_delete": false, + "overridden": false, + "access_log_enabled": false, + "application_profile_path": "/infra/lb-app-profiles/default-http-lb-app-profile", + "enabled": true, + "ip_address": "10.10.0.8", + "lb_persistence_profile_path": "/infra/lb-persistence-profiles/default-cookie-lb-persistence-profile", + "lb_service_path": "/infra/lb-services/lb", + "log_significant_event_only": false, + "pool_path": "/infra/lb-pools/pool1", + "ports": [ + "443" + ], + "sorry_pool_path": "/infra/lb-pools/sorrypool-l7-1" + }, + { + "_revision": 3, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644228787408, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "l4-udp", + "id": "l4-udp", + "resource_type": "LBVirtualServer", + "parent_path": "/infra", + "path": "/infra/lb-virtual-servers/l4-udp", + "realization_id": "1dd0a31f-605b-479c-b7c9-a3e1eceed682", + "relative_path": "l4-udp", + "unique_id": "1dd0a31f-605b-479c-b7c9-a3e1eceed682", + "marked_for_delete": false, + "overridden": false, + "access_log_enabled": false, + "application_profile_path": "/infra/lb-app-profiles/default-udp-lb-app-profile", + "enabled": true, + "ip_address": "10.10.0.3", + "lb_service_path": "/infra/lb-services/lb", + "log_significant_event_only": false, + "pool_path": "/infra/lb-pools/udp-pool", + "ports": [ + "80" + ], + "sorry_pool_path": "/infra/lb-pools/sorry-pool-udp" + }, + { + "_revision": 2, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644228729104, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "description": "Virtual Service", + "display_name": "l4-vs", + "id": "l4-vs", + "resource_type": "LBVirtualServer", + "tags": [ + { + "scope": "NLB-Lb-ID", + "tag": "lb" + } + ], + "parent_path": "/infra", + "path": "/infra/lb-virtual-servers/l4-vs", + "realization_id": "1774a525-0c34-41dc-9ef3-9faab126f5a7", + "relative_path": "l4-vs", + "unique_id": "1774a525-0c34-41dc-9ef3-9faab126f5a7", + "marked_for_delete": false, + "overridden": false, + "access_list_control": { + "action": "ALLOW", + "enabled": true, + "group_path": "/infra/domains/default/groups/03335585-9574-4d44-a001-818ca9a9c282" + }, + "access_log_enabled": true, + "application_profile_path": "/infra/lb-app-profiles/default-tcp-lb-app-profile", + "default_pool_member_ports": [ + "80" + ], + "enabled": true, + "ip_address": "10.10.0.1", + "lb_persistence_profile_path": "/infra/lb-persistence-profiles/default-source-ip-lb-persistence-profile", + "lb_service_path": "/infra/lb-services/lb", + "log_significant_event_only": true, + "max_concurrent_connections": 1000, + "max_new_connection_rate": 200, + "pool_path": "/infra/lb-pools/l4-pool", + "ports": [ + "80" + ], + "sorry_pool_path": "/infra/lb-pools/sorry-pool-l4" + }, + { + "_revision": 17, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1644829386585, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "l7-vs", + "id": "l7-vs", + "resource_type": "LBVirtualServer", + "parent_path": "/infra", + "path": "/infra/lb-virtual-servers/l7-vs", + "realization_id": "c619359e-e318-4fce-a923-4e0d7396deaf", + "relative_path": "l7-vs", + "unique_id": "c619359e-e318-4fce-a923-4e0d7396deaf", + "marked_for_delete": false, + "overridden": false, + "access_log_enabled": false, + "application_profile_path": "/infra/lb-app-profiles/default-http-lb-app-profile", + "client_ssl_profile_binding": { + "certificate_chain_depth": 3, + "client_auth": "IGNORE", + "client_auth_ca_paths": [ + "/infra/certificates/51ea59bb-c485-461f-8970-43a7b2722db8" + ], + "default_certificate_path": "/infra/certificates/a1246209-dc8e-48c3-a1cc-42ab657d452d" + }, + "default_pool_member_ports": [ + "8080" + ], + "enabled": true, + "ip_address": "10.10.0.2", + "lb_service_path": "/infra/lb-services/lb", + "log_significant_event_only": false, + "max_concurrent_connections": 100, + "max_new_connection_rate": 100, + "pool_path": "/infra/lb-pools/l7-pool", + "ports": [ + "80" + ], + "rules": [ + { + "actions": [ + { + "variable_name": "avi", + "variable_value": "avi", + "type": "LBVariableAssignmentAction" + } + ], + "match_conditions": [ + { + "source_port": "8080", + "type": "LBTcpHeaderCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_ACCESS" + }, + { + "actions": [ + { + "type": "LBConnectionDropAction" + } + ], + "match_conditions": [ + { + "source_port": "8080", + "type": "LBTcpHeaderCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_ACCESS" + }, + { + "actions": [ + { + "pool_id": "/infra/lb-pools/l4-pool", + "type": "LBSelectPoolAction" + }, + { + "ssl_mode": "SSL_PASSTHROUGH", + "type": "LBSslModeSelectionAction" + } + ], + "match_conditions": [ + { + "sni": "google", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBSslSniCondition", + "inverse": false + }, + { + "sni": "google", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBSslSniCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "TRANSPORT" + }, + { + "actions": [ + { + "ssl_mode": "SSL_PASSTHROUGH", + "type": "LBSslModeSelectionAction" + } + ], + "match_conditions": [ + { + "sni": "google", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBSslSniCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "TRANSPORT" + }, + { + "actions": [ + { + "uri": "avi", + "type": "LBHttpRequestUriRewriteAction" + } + ], + "match_conditions": [ + { + "body_value": "xyz", + "match_type": "CONTAINS", + "case_sensitive": false, + "type": "LBHttpRequestBodyCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "uri": "avi", + "uri_arguments": "avi", + "type": "LBHttpRequestUriRewriteAction" + }, + { + "header_name": "avi", + "header_value": "AVI", + "type": "LBHttpRequestHeaderRewriteAction" + }, + { + "header_name": "AVI", + "type": "LBHttpRequestHeaderDeleteAction" + }, + { + "variable_name": "AVI", + "variable_value": "AVI", + "type": "LBVariableAssignmentAction" + } + ], + "match_conditions": [ + { + "cookie_name": "avi", + "cookie_value": "avi", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBHttpRequestCookieCondition", + "inverse": false + }, + { + "cookie_name": "avi", + "cookie_value": "avi", + "match_type": "ENDS_WITH", + "case_sensitive": false, + "type": "LBHttpRequestCookieCondition", + "inverse": false + }, + { + "cookie_name": "avi", + "cookie_value": "avi", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestCookieCondition", + "inverse": false + }, + { + "cookie_name": "avi", + "cookie_value": "avi", + "match_type": "CONTAINS", + "case_sensitive": false, + "type": "LBHttpRequestCookieCondition", + "inverse": false + }, + { + "cookie_name": "avi", + "cookie_value": "avi", + "match_type": "REGEX", + "case_sensitive": false, + "type": "LBHttpRequestCookieCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "uri": "google.com", + "type": "LBHttpRequestUriRewriteAction" + } + ], + "match_conditions": [ + { + "body_value": "test", + "match_type": "CONTAINS", + "case_sensitive": false, + "type": "LBHttpRequestBodyCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "uri": "google.com", + "type": "LBHttpRequestUriRewriteAction" + } + ], + "match_conditions": [ + { + "cookie_name": "avi.com", + "cookie_value": "avi.com", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestCookieCondition", + "inverse": false + }, + { + "version": "HTTP_VERSION_1_1", + "type": "LBHttpRequestVersionCondition", + "inverse": false + }, + { + "header_value": "avi", + "header_name": "avi", + "match_type": "REGEX", + "case_sensitive": false, + "type": "LBHttpRequestHeaderCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "uri": "google.com", + "type": "LBHttpRequestUriRewriteAction" + } + ], + "match_conditions": [ + { + "version": "HTTP_VERSION_1_1", + "type": "LBHttpRequestVersionCondition", + "inverse": false + }, + { + "version": "HTTP_VERSION_1_0", + "type": "LBHttpRequestVersionCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "uri": "google.com", + "type": "LBHttpRequestUriRewriteAction" + } + ], + "match_conditions": [ + { + "header_value": "avi", + "header_name": "avi.com", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestHeaderCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "uri": "google.com", + "type": "LBHttpRequestUriRewriteAction" + } + ], + "match_conditions": [ + { + "uri_arguments": "google", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestUriArgumentsCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "uri": "google.com", + "uri_arguments": "hhh", + "type": "LBHttpRequestUriRewriteAction" + } + ], + "match_conditions": [ + { + "method": "GET", + "type": "LBHttpRequestMethodCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "uri": "google.com", + "type": "LBHttpRequestUriRewriteAction" + } + ], + "match_conditions": [ + { + "uri": "google.com", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBHttpRequestUriCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_REQUEST_REWRITE" + }, + { + "actions": [ + { + "reply_status": "200", + "reply_message": "ok", + "type": "LBHttpRejectAction" + }, + { + "redirect_status": "302", + "redirect_url": "https://avi.com/users", + "type": "LBHttpRedirectAction" + }, + { + "pool_id": "/infra/lb-pools/l4-pool", + "type": "LBSelectPoolAction" + } + ], + "match_conditions": [ + { + "uri": "avi", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestUriCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_FORWARDING" + }, + { + "actions": [ + { + "type": "LBConnectionDropAction" + } + ], + "match_conditions": [ + { + "uri": "google.com", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBHttpRequestUriCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_FORWARDING" + }, + { + "actions": [ + { + "header_name": "avi_hdr", + "header_value": "avi_val", + "type": "LBHttpResponseHeaderRewriteAction" + }, + { + "header_name": "avi", + "type": "LBHttpResponseHeaderDeleteAction" + }, + { + "persistence_profile_path": "/infra/lb-persistence-profiles/default-generic-lb-persistence-profile", + "variable_name": "abc", + "variable_hash_enabled": false, + "type": "LBVariablePersistenceLearnAction" + } + ], + "match_conditions": [ + { + "header_name": "avi", + "header_value": "avi", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBHttpResponseHeaderCondition", + "inverse": false + }, + { + "method": "GET", + "type": "LBHttpRequestMethodCondition", + "inverse": false + }, + { + "uri": "uri", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestUriCondition", + "inverse": false + }, + { + "uri_arguments": "zvi", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestUriArgumentsCondition", + "inverse": false + }, + { + "version": "HTTP_VERSION_1_1", + "type": "LBHttpRequestVersionCondition", + "inverse": false + }, + { + "header_value": "avi", + "header_name": "avi", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestHeaderCondition", + "inverse": false + }, + { + "cookie_name": "avi", + "cookie_value": "avi", + "match_type": "EQUALS", + "case_sensitive": false, + "type": "LBHttpRequestCookieCondition", + "inverse": false + }, + { + "client_certificate_issuer_dn": { + "issuer_dn": "avi", + "match_type": "STARTS_WITH", + "case_sensitive": false + }, + "session_reused": "IGNORE", + "type": "LBHttpSslCondition", + "inverse": false + }, + { + "source_port": "8080", + "type": "LBTcpHeaderCondition", + "inverse": false + }, + { + "source_address": "10.10.10.2", + "type": "LBIpHeaderCondition", + "inverse": false + }, + { + "variable_name": "avi", + "variable_value": "avi", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBVariableCondition", + "inverse": false + }, + { + "group_path": "/infra/domains/default/groups/03335585-9574-4d44-a001-818ca9a9c282", + "type": "LBIpHeaderCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_RESPONSE_REWRITE" + }, + { + "actions": [ + { + "header_name": "avi-version", + "header_value": "21.1.3", + "type": "LBHttpResponseHeaderRewriteAction" + }, + { + "header_name": "avi", + "type": "LBHttpResponseHeaderDeleteAction" + }, + { + "persistence_profile_path": "/infra/lb-persistence-profiles/default-generic-lb-persistence-profile", + "variable_name": "avi", + "variable_hash_enabled": false, + "type": "LBVariablePersistenceLearnAction" + } + ], + "match_conditions": [ + { + "header_name": "avi-version", + "header_value": "21.1.3", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBHttpResponseHeaderCondition", + "inverse": false + }, + { + "header_name": "avi", + "header_value": "21.1.3", + "match_type": "ENDS_WITH", + "case_sensitive": false, + "type": "LBHttpResponseHeaderCondition", + "inverse": false + }, + { + "header_name": "avi", + "header_value": "21.1.3", + "match_type": "STARTS_WITH", + "case_sensitive": false, + "type": "LBHttpResponseHeaderCondition", + "inverse": false + } + ], + "match_strategy": "ALL", + "phase": "HTTP_RESPONSE_REWRITE" + } + ], + "server_ssl_profile_binding": { + "certificate_chain_depth": 3, + "client_certificate_path": "/infra/certificates/a1246209-dc8e-48c3-a1cc-42ab657d452d", + "server_auth": "IGNORE", + "server_auth_ca_paths": [ + "/infra/certificates/51ea59bb-c485-461f-8970-43a7b2722db8" + ] + }, + "sorry_pool_path": "/infra/lb-pools/l4-pool" + } + ] +} \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/test/conftest.py b/python/avi/migrationtools/nsxt_converter/test/conftest.py new file mode 100644 index 0000000000..10ebc2a837 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/conftest.py @@ -0,0 +1,15 @@ +import pytest + + +def pytest_addoption(parser): + parser.addoption('--nsx_lb_config', + action="store" + ) + parser.addoption('--conv_excel',action="store") + parser.addoption("--config",action="store") + parser.addoption( '--avi_config_file', + help='absolute path for avi config file') + +def pytest_configure(config): + global option + option=config.option diff --git a/python/avi/migrationtools/nsxt_converter/test/default_params.json b/python/avi/migrationtools/nsxt_converter/test/default_params.json new file mode 100644 index 0000000000..8966784411 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/default_params.json @@ -0,0 +1,3 @@ +{ + "bgp_peer_configured_for_vlan": true +} diff --git a/python/avi/migrationtools/nsxt_converter/test/excel_reader.py b/python/avi/migrationtools/nsxt_converter/test/excel_reader.py new file mode 100644 index 0000000000..ba3f02ae93 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/excel_reader.py @@ -0,0 +1,111 @@ +import json + +import pandas +from avi.migrationtools.test.common import excel_reader + + +class ExcelReader(): + + def percentage_success(self, path_to_excel): + # Percentage Success from Excel Reports + # find the status column + path = path_to_excel + s = pandas.read_excel(path, engine='openpyxl', sheet_name='Status Sheet') + if "NsxT type" in s: + type_str = "NsxT type" + else: + pass + report_dict = dict() + for row in range(s.index.size): + # taking col_type_val column for type and col_status_val for status + val = s[type_str][row] + state = s['Status'][row] + fail = 1 + suc = 0 + if state == "PARTIAL" or state == "SUCCESSFUL": + fail = 0 + suc = 1 + if val not in report_dict: + report_dict.update({val: {'success': suc, 'fail': fail}}) + else: + report_dict[val]['success'] += suc + report_dict[val]['fail'] += fail + # break + for key in report_dict.keys(): + if report_dict[key]['success'] + report_dict[key]['fail'] != 0: + percent = float(report_dict[key]['success'] * 100 / + (report_dict[key]['success'] + report_dict[key]['fail'])) + report_dict[key].update({'percent': percent}) + else: + report_dict[key].update({'percent': 100.0}) + for key in report_dict.keys(): + print(key, " -> ", report_dict[key]['percent'], "%") + + def output_vs_level_status(self, path_to_excel): + excel_reader.output_vs_level_status(path_to_excel) + + def output_sanitization(self, path_to_excel, path_to_out_json=None, path_to_log=None): + ''' Find the Success percentage of each output report ''' + path = path_to_excel + + out_obj = [] + excel_obj = [] + + # Output Sanitization + s = pandas.read_excel(path, engine='openpyxl', sheet_name='Status Sheet') + cols = 0 + cols_id = None + cols_status = None + for row in range(s.index.size): + if 'NsxT ID' in s and s['Status'][row] in ['SUCCESSFUL', 'PARTIAL']: + if s['NsxT ID'][row] in ['hash', 'oneconnect'] or \ + s['NsxT type'][row] == 'route' or \ + s['NsxT SubType'][row] in ['oneconnect', 'one-connect'] or \ + "Indirectly mapped" in s['Avi Object'][row]: + value = None + else: + value = s['NsxT ID'][row] + if s['NsxT type'][row] in ['pool', 'policy']: + value = s['NsxT ID'][row].split('/')[-1] + if value: + print(value+"----------------------------") + excel_obj.append(value) + + with open(path_to_out_json, 'r') as file_strem: + file_strem = json.load(file_strem) + for entity in file_strem: + print(entity) + print("----") + if entity != 'META' and entity != 'VsVip' and entity != \ + "OneConnect" and entity != "hash_algorithm": + for obj in file_strem[entity]: + out_obj.append(obj.get('name')) + excel_obj.sort() + out_obj.sort() + log_obj = {} + if path_to_log: + with open(path_to_log, 'r') as file_strem: + a = file_strem.readlines() + try: + b = str(a).split('$$$$$$')[-2].replace('\'', '"') + print(b) + log_obj = eval(b) + except: + pass + + obj_list = list() + + # comparing excel objects with json out objects + obj_list = list(set(excel_obj) - set(out_obj)) + + # If object read from log is dict compare + if isinstance(log_obj, dict): + for key in log_obj.keys(): + obj_list = list(set(obj_list) - set(log_obj[key].keys())) + + print("Object Difference between Excel sheet and output is %s" % len(obj_list)) + if obj_list: + print("Object not Common in Both Excel and Output %s", obj_list) + return False + print("Excel sheet matches with Output.json") + return True diff --git a/python/avi/migrationtools/nsxt_converter/test/patch.yaml b/python/avi/migrationtools/nsxt_converter/test/patch.yaml new file mode 100644 index 0000000000..5205335dff --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/patch.yaml @@ -0,0 +1,17 @@ +VirtualService: #Object to match + - match_name: ".*" #Wildcard to match all names + patch: + enabled: True #Enable Virtual Service + +ApplicationProfile: #Object to match + - match_name_in_list: #Application profiles to match + - prefix-default-http-lb-app-profile + patch: + http_profile: + xff_enabled: true #Enable X-Forwarded-For header insertion + +Pool: + - match_name_regex: ".*10" + patch: + lb_algorithm: LB_ALGORITHM_ROUND_ROBIN + diff --git a/python/avi/migrationtools/nsxt_converter/test/test_monitor_converter.conf b/python/avi/migrationtools/nsxt_converter/test/test_monitor_converter.conf new file mode 100644 index 0000000000..4aebcb0452 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/test_monitor_converter.conf @@ -0,0 +1,210 @@ +{ +"LbMonitorProfiles": [ + { + "request_url": "/", + "request_method": "GET", + "request_version": "HTTP_VERSION_1_1", + "response_status_codes": [ + 200, + 300, + 301, + 302, + 304, + 307 + ], + "resource_type": "LBHttpMonitorProfile", + "id": "default-http-lb-monitor", + "display_name": "default-http-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-http-lb-monitor", + "relative_path": "default-http-lb-monitor", + "parent_path": "/infra", + "unique_id": "01ec5e4a-aa87-4100-a9d3-b82644f2792f", + "realization_id": "01ec5e4a-aa87-4100-a9d3-b82644f2792f", + "marked_for_delete": false, + "overridden": false, + "monitor_port": 80, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1636616860626, + "_create_user": "system", + "_last_modified_time": 1636616860626, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "request_url": "/", + "request_method": "GET", + "request_version": "HTTP_VERSION_1_1", + "response_status_codes": [ + 200, + 300, + 301, + 302, + 304, + 307 + ], + "resource_type": "LBHttpsMonitorProfile", + "id": "default-https-lb-monitor", + "display_name": "default-https-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-https-lb-monitor", + "relative_path": "default-https-lb-monitor", + "parent_path": "/infra", + "unique_id": "7c33b107-f88d-45fe-9dac-e2573020ccd3", + "realization_id": "7c33b107-f88d-45fe-9dac-e2573020ccd3", + "marked_for_delete": false, + "overridden": false, + "monitor_port": 443, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1636616860655, + "_create_user": "system", + "_last_modified_time": 1636616860655, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "data_length": 56, + "resource_type": "LBIcmpMonitorProfile", + "id": "default-icmp-lb-monitor", + "display_name": "default-icmp-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-icmp-lb-monitor", + "relative_path": "default-icmp-lb-monitor", + "parent_path": "/infra", + "unique_id": "8653fe08-4986-4da7-9815-15c2881bd126", + "realization_id": "8653fe08-4986-4da7-9815-15c2881bd126", + "marked_for_delete": false, + "overridden": false, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1636616860681, + "_create_user": "system", + "_last_modified_time": 1636616860681, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "max_fails": 5, + "timeout": 5, + "resource_type": "LBPassiveMonitorProfile", + "id": "default-passive-lb-monitor", + "display_name": "default-passive-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-passive-lb-monitor", + "relative_path": "default-passive-lb-monitor", + "parent_path": "/infra", + "unique_id": "b8820e84-5368-4970-92cf-27cd14499850", + "realization_id": "b8820e84-5368-4970-92cf-27cd14499850", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1636616860693, + "_create_user": "system", + "_last_modified_time": 1636616860693, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "resource_type": "LBTcpMonitorProfile", + "id": "default-tcp-lb-monitor", + "display_name": "default-tcp-lb-monitor", + "path": "/infra/lb-monitor-profiles/default-tcp-lb-monitor", + "relative_path": "default-tcp-lb-monitor", + "parent_path": "/infra", + "unique_id": "1054f253-c845-4c55-8137-6de24f83a4fc", + "realization_id": "1054f253-c845-4c55-8137-6de24f83a4fc", + "marked_for_delete": false, + "overridden": false, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1636616860585, + "_create_user": "system", + "_last_modified_time": 1636616860585, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "request_url": "https://goole.com", + "request_method": "GET", + "request_version": "HTTP_VERSION_1_0", + "request_headers": [ + { + "header_name": "content-type", + "header_value": "Application/Json" + } + ], + "response_status_codes": [ + 200, + 300, + 301, + 302, + 304, + 307 + ], + "response_body": "200", + "resource_type": "LBHttpMonitorProfile", + "id": "test-http", + "display_name": "test-http", + "path": "/infra/lb-monitor-profiles/test-http", + "relative_path": "test-http", + "parent_path": "/infra", + "unique_id": "4d945f07-33bf-4342-87f2-3d9f36386106", + "realization_id": "4d945f07-33bf-4342-87f2-3d9f36386106", + "marked_for_delete": false, + "overridden": false, + "monitor_port": 8080, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1637180948566, + "_create_user": "admin", + "_last_modified_time": 1637180948566, + "_last_modified_user": "admin", + "_system_owned": false, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "send": "http", + "receive": "https", + "resource_type": "LBUdpMonitorProfile", + "id": "test-udp", + "display_name": "test-udp", + "path": "/infra/lb-monitor-profiles/test-udp", + "relative_path": "test-udp", + "parent_path": "/infra", + "unique_id": "a5bc471a-05de-4f14-81b0-e2ec0dc52b1d", + "realization_id": "a5bc471a-05de-4f14-81b0-e2ec0dc52b1d", + "marked_for_delete": false, + "overridden": false, + "monitor_port": 8080, + "interval": 5, + "timeout": 5, + "rise_count": 3, + "fall_count": 3, + "_create_time": 1637176669608, + "_create_user": "admin", + "_last_modified_time": 1637176669608, + "_last_modified_user": "admin", + "_system_owned": false, + "_protection": "NOT_PROTECTED", + "_revision": 0 + } + ] + } \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/test/test_monitor_converter.py b/python/avi/migrationtools/nsxt_converter/test/test_monitor_converter.py new file mode 100644 index 0000000000..c2e951b600 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/test_monitor_converter.py @@ -0,0 +1,52 @@ +import logging +import os +import unittest +import json +import copy + +gSAMPLE_CONFIG = None +LOG = logging.getLogger(__name__) + +from avi.migrationtools.nsxt_converter.monitor_converter import MonitorConfigConv +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +import avi.migrationtools.nsxt_converter.conversion_util as conv_util + + +def setUpModule(): + LOG.setLevel(logging.DEBUG) + formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + dir_path = os.path.abspath(os.path.dirname(__file__)) + dir_path = dir_path.rsplit(os.path.sep, 1)[0] + dir_path = dir_path + os.path.sep + "output" + logging.basicConfig(filename=os.path.join(dir_path, 'converter.log'), + level=logging.DEBUG, format=formatter) + cfg_file = open('test_monitor_converter.conf', 'r') + cfg = cfg_file.read() + global gSAMPLE_CONFIG + gSAMPLE_CONFIG = json.loads(cfg) + global nsxt_attributes + nsxt_attributes = conv_const.init() + + +class Test(unittest.TestCase): + + def test_monitor_conversion(self): + + avi_config = dict() + monitor_converter = MonitorConfigConv(nsxt_attributes) + monitor_converter.convert(avi_config, gSAMPLE_CONFIG,'') + monitor_config=gSAMPLE_CONFIG['LbMonitorProfiles'] + avi_monitor_config=avi_config.get('HealthMonitor',None) + + assert avi_monitor_config + non_passive_monitor_count=0 + for monitor in monitor_config: + if monitor.get('resource_type') not in ['LBPassiveMonitorProfile']: + non_passive_monitor_count += 1 + + assert non_passive_monitor_count == len(avi_monitor_config) + + + + + diff --git a/python/avi/migrationtools/nsxt_converter/test/test_nsxt_converter.py b/python/avi/migrationtools/nsxt_converter/test/test_nsxt_converter.py new file mode 100755 index 0000000000..396a84481f --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/test_nsxt_converter.py @@ -0,0 +1,643 @@ +import argparse +import logging +import os +import unittest +import json +import copy +import pytest +import sys +import pandas as pd +import yaml + +from avi.migrationtools.nsxt_converter import nsxt_config_converter +from avi.migrationtools.nsxt_converter.alb_converter import ALBConverter +from avi.migrationtools.nsxt_converter.nsxt_converter import NsxtConverter +from avi.migrationtools.nsxt_converter.monitor_converter import MonitorConfigConv +from avi.migrationtools.nsxt_converter.persistant_converter import PersistantProfileConfigConv +from avi.migrationtools.nsxt_converter.profile_converter import ProfileConfigConv +from avi.migrationtools.nsxt_converter.ssl_profile_converter import SslProfileConfigConv +from avi.migrationtools.nsxt_converter.vs_converter import VsConfigConv +from avi.migrationtools.avi_migration_utils import get_count, set_update_count +from avi.migrationtools.nsxt_converter.test.excel_reader import ExcelReader +from avi.migrationtools.nsxt_converter.vs_converter import vs_data_path_not_work +gSAMPLE_CONFIG = None +LOG = logging.getLogger(__name__) + +from avi.migrationtools.nsxt_converter.pools_converter import PoolConfigConv +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +import avi.migrationtools.nsxt_converter.conversion_util as conv_util + +from conftest import option + +avi_cfg_file = open(option.avi_config_file, 'r') +avi_cfg = avi_cfg_file.read() +avi_config_file = json.loads(avi_cfg) +output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'output')) +if not os.path.exists(output_dir): + os.mkdir(output_dir) +input_path = output_dir + os.path.sep + "input" +if not os.path.exists(input_path): + os.makedirs(input_path) +output_path = output_dir + os.path.sep + "output" +if not os.path.exists(output_path): + os.makedirs(output_path) +object_merge = True +no_object_merge = False +avi_config = dict() +input_role_config_file = os.path.abspath(os.path.join( + os.path.dirname(__file__), 'custom_config.yaml')) +merge_object_mapping = { + + 'ssl_profile': {'no': 0}, + 'app_profile': {'no': 0}, + 'network_profile': {'no': 0}, + 'health_monitor': {'no': 0}, + 'ssl_cert_key': {'no': 0} +} +sys_dict = {} +sys_dict = {} +merge_object_type = ['ApplicationProfile', 'NetworkProfile', + 'SSLProfile', 'PKIProfile', 'SSLKeyAndCertificate', + 'ApplicationPersistenceProfile', 'HealthMonitor', + 'IpAddrGroup'] +for key in merge_object_type: + sys_dict[key] = [] + + +def setUpModule(): + cfg_file = open(option.nsx_lb_config, 'r') + cfg = cfg_file.read() + global nsx_config + nsx_config = json.loads(cfg) + global nsxt_attributes + nsxt_attributes = conv_const.init() + + +class Test(unittest.TestCase, ExcelReader): + + @pytest.fixture + def cleanup(self): + import avi.migrationtools.f5_converter.conversion_util as conv + import shutil + conv.csv_writer_dict_list = list() + if os.path.exists(output_dir): + for each_file in os.listdir(output_dir): + file_path = os.path.join(output_dir, each_file) + try: + if os.path.isfile(file_path): + if file_path.endswith('.log'): + open('converter.log', 'w').close() + else: + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + print(e) + + def test_output_sanitization(self): + + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="") + + self.excel_path = os.path.abspath(os.path.join( + output_path, 'nsxt-report-ConversionStatus.xlsx')) + self.json_path = os.path.abspath(os.path.join( + output_path, 'avi_config.json')) + assert self.excel_path + assert self.json_path + + def test_prefix(self): + """ + prefix added + """ + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="prefix", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="") + + def test_health_monitor_conversion(self): + tenant = "admin" + monitor_converter = MonitorConfigConv(nsxt_monitor_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + monitor_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix='', + tenant=tenant, + custom_mapping=False) + + def test_passive_health_monitor(self): + data_path = option.conv_excel + data = pd.read_excel(data_path) + for k, row in data.iterrows(): + if row['NsxT SubType'] in ["LBPassiveMonitorProfile"]: + assert row["Status"] == "SUCCESSFUL" + + def test_pool_conversion(self): + + pool_converter = PoolConfigConv(nsxt_pool_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + pool_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix='', + tenant="admin") + + def test_profile_conversion(self): + + profile_converter = ProfileConfigConv(nsxt_profile_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + profile_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix='', + tenant='admin') + + def test_config_hm_http(self): + + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="prefix", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="", + not_in_use=False + ) + o_file = "%s/%s" % (output_path, "avi_config.json") + with open(o_file) as json_file: + data = json.load(json_file) + avi_monitor_config = data['HealthMonitor'] + for alb_hm in avi_monitor_config: + if alb_hm["type"] == "HEALTH_MONITOR_HTTP": + assert alb_hm['http_monitor']['http_request'] + assert alb_hm['http_monitor']['http_response_code'] + + def test_config_hm_https(self): + + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="prefix", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="", + not_in_use=False + ) + o_file = "%s/%s" % (output_path, "avi_config.json") + with open(o_file) as json_file: + data = json.load(json_file) + avi_monitor_config = data['HealthMonitor'] + for alb_hm in avi_monitor_config: + if alb_hm["type"] == "HEALTH_MONITOR_HTTPS": + assert alb_hm['https_monitor']['http_request'] + assert alb_hm['https_monitor']['http_response_code'] + + def test_config_hm_udp(self): + + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="prefix", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="", + not_in_use=False + ) + o_file = "%s/%s" % (output_path, "avi_config.json") + with open(o_file) as json_file: + data = json.load(json_file) + avi_monitor_config = data['HealthMonitor'] + for alb_hm in avi_monitor_config: + if alb_hm["type"] == "HEALTH_MONITOR_UDP": + assert alb_hm["udp_monitor"] + + def test_config_hm_tcp(self): + + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="prefix", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="", + not_in_use=False + ) + o_file = "%s/%s" % (output_path, "avi_config.json") + with open(o_file) as json_file: + data = json.load(json_file) + avi_monitor_config = data['HealthMonitor'] + for alb_hm in avi_monitor_config: + if alb_hm["type"] == "HEALTH_MONITOR_TCP": + assert alb_hm["tcp_monitor"] + + def test_converted_status(self): + + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="") + + excel_path = os.path.abspath(os.path.join( + output_path, 'nsxt-report-ConversionStatus.xlsx')) + data = pd.read_excel(excel_path) + for k, row in data.iterrows(): + assert row['Status'] != "ERROR" + + def test_healthmonitor_prefix(self): + """ + for testing prefix in HealthMonitors + """ + + prefix = "AVI" + monitor_converter = MonitorConfigConv(nsxt_monitor_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + monitor_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix=prefix, + tenant="admin", + custom_mapping=False) + avi_monitor_config = avi_config.get('HealthMonitor', None) + assert avi_monitor_config + + for hm in avi_monitor_config: + assert hm['name'].startswith(prefix) + + def test_pool_prefix(self): + """ + for testing prefix in Pool + """ + + prefix = "AVI" + pool_converter = PoolConfigConv(nsxt_pool_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + pool_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix=prefix, + tenant="admin") + avi_pool_config = avi_config['Pool'] + + assert avi_pool_config + for pools in avi_pool_config: + assert pools['name'].startswith(prefix) + if pools.get('health_monitor_refs'): + for pool_hm in pools['health_monitor_refs']: + assert pool_hm.split('name=')[1].startswith(prefix) + + def test_profile_prefix(self): + """ + for testing prefix in ApplicationProfiles + """ + + prefix = "AVI" + profile_converter = ProfileConfigConv(nsxt_profile_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + profile_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix=prefix, + tenant="admin") + avi_app_profile_config = avi_config.get('ApplicationProfile', None) + avi_network_profile_confg = avi_config.get('NetworkProfile', None) + + assert avi_app_profile_config + for app_pr in avi_app_profile_config: + assert app_pr['name'].startswith(prefix) + assert avi_network_profile_confg + for np_pr in avi_network_profile_confg: + assert np_pr['name'].startswith(prefix) + + def test_vs_prefix(self): + """ + for testing prefix in virtual service + """ + + prefix = "AVI" + vs_state = True + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to='', + object_merge_check=no_object_merge, + controller_version="") + o_file = "%s/%s" % (output_path, "avi_config.json") + with open(o_file) as json_file: + data = json.load(json_file) + avi_vs_config = data.get('VirtualService', None) + assert avi_vs_config + + for hm in avi_vs_config: + assert hm['name'].startswith(prefix) + + def test_migrate_to(self): + """ + added migrate_to + """ + migrate_to = 'NSX' + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to=migrate_to, + object_merge_check=no_object_merge, + controller_version="") + + def test_skipped_object(self): + """ + test case for skipped objct + """ + + data_path = option.conv_excel + data = pd.read_excel(data_path) + for k, row in data.iterrows(): + if row['NsxT SubType'] in ["LBGenericPersistenceProfile"]: + assert row['Status'] == 'SKIPPED' + + def test_object_merge(self): + """ + testing with object merge flag + """ + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=object_merge, + controller_version="") + + def test_vs_state_level_true(self): + """ + testing when vs level status is true + """ + controller_version = "20.1.7" + vs_state = True + vs_level_status = True + migrate_to = 'NSX' + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to=migrate_to, + object_merge_check=no_object_merge, + controller_version=controller_version, + vs_state=vs_state, + vs_level_status=vs_level_status) + excel_path = os.path.abspath( + os.path.join( + output_path, 'nsxt-report-ConversionStatus.xlsx' + ) + ) + self.output_vs_level_status(excel_path) + + def test_vs_level_status_false(self): + """ + testing when vs level status is false + """ + + vs_state = True + vs_level_status = False + migrate_to = 'NSX' + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to=migrate_to, + object_merge_check=no_object_merge, + controller_version="", + vs_state=vs_state, + vs_level_status=vs_level_status) + + def test_error_and_warning_count(self): + + set_update_count() + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="", + ) + + assert get_count('error') == 0 + + def test_ssl_prefix(self): + """ + for testing prefix in ssl profile + """ + + prefix = "AVI" + ssl_converter = SslProfileConfigConv(nsxt_profile_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + ssl_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix=prefix, + tenant="admin") + avi_ssl_config = avi_config.get('SSLProfile', None) + assert avi_ssl_config + + for hm in avi_ssl_config: + assert hm['name'].startswith(prefix) + + def test_persistance_prefix(self): + """ + for testing prefix in persistance profile + """ + + prefix = "AVI" + persistance_converter = PersistantProfileConfigConv(nsxt_profile_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + persistance_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix=prefix, + tenant="admin") + avi_persis_config = avi_config.get('ApplicationPersistenceProfile', None) + assert avi_persis_config + + for hm in avi_persis_config: + assert hm['name'].startswith(prefix) + + def test_persistence_conversion(self): + + persistance_converter = PersistantProfileConfigConv(nsxt_profile_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + persistance_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix="", + tenant="admin") + + def test_ssl_profile_conversion(self): + + ssl_converter = SslProfileConfigConv(nsxt_profile_attributes=nsxt_attributes, + object_merge_check=no_object_merge, + merge_object_mapping=merge_object_mapping, + sys_dict=sys_dict) + + ssl_converter.convert(alb_config=avi_config, + nsx_lb_config=nsx_config, + prefix="", + tenant="admin") + + def test_excel_report(self): + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="") + + self.percentage_success(os.path.join(output_path, + 'nsxt-report-ConversionStatus.xlsx')) + + def test_check_health_monitor_request_url(self): + + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="") + + o_file = "%s/%s" % (output_path, "avi_config.json") + with open(o_file) as json_file: + data = json.load(json_file) + hm_object = data['HealthMonitor'] + monitor_urls = [] + for monitor in hm_object: + if 'https_monitor' in monitor: + monitor_urls.append(monitor['https_monitor'][ + 'http_request']) + elif 'http_monitor' in monitor: + monitor_urls.append(monitor['http_monitor']['http_request']) + for eachUrl in monitor_urls: + request = eachUrl.split('\r\n')[0] + assert (request.endswith('HTTP/1.1') or + request.endswith('HTTP/1.0')) + + def test_no_profile_merge(self): + """ + No_profile_merge Flag Reset + """ + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="") + + def test_pki_profile(self): + + pki_obj = avi_config_file.get("PKIProfile", None) + if pki_obj: + for pki in pki_obj: + assert pki["tenant_ref"] + assert pki["name"] + assert pki["ca_certs"] + assert pki["crl_check"] + + def test_tier1_lr_in_pools(self): + vs_object = avi_config_file.get("VirtualService", None) + pool_object = avi_config_file.get("Pool", None) + for vs in vs_object: + if vs.get("pool_ref"): + pool_name = vs["pool_ref"].split("name=")[-1] + pool_name = pool_name.split("&cloud")[0] + print(pool_name) + pool = [pool for pool in pool_object if pool["name"] == pool_name] + print(pool) + assert pool[0]["tier1_lr"] + + def test_tier1_lr_in_vsvip(self): + vs_object = avi_config_file.get("VirtualService", None) + vsvip_object = avi_config_file.get("VsVip", None) + for vs in vs_object: + if vs.get("vsvip_ref"): + vsvip_name = vs["vsvip_ref"].split("name=")[-1] + vsvip_name = vsvip_name.split("&cloud")[0] + vsvip = [vsvip for vsvip in vsvip_object if vsvip["name"] == vsvip_name] + assert vsvip[0]["tier1_lr"] + + def test_cmd_profile(self): + app_object = avi_config_file.get("ApplicationProfile", None) + for app in app_object: + if app["name"].endswith("cmd"): + assert not app["connection_multiplexing_enabled"] + assert app["preserve_client_ip"] + + def test_not_in_use(self): + """ + testing migration of not in use objects + """ + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="", + not_in_use=False) + + def test_is_bgp_configured_for_vlan_vs(self): + nsxt_config_converter.convert(nsx_lb_config=nsx_config, + input_path=input_path, + output_path=output_path, + tenant="admin", + prefix="", + migrate_to="", + object_merge_check=no_object_merge, + controller_version="", + not_in_use=False) + o_file = "%s/%s" % (output_path, "avi_config.json") + with open(o_file) as json_file: + data = json.load(json_file) + avi_vs_config = data.get('VirtualService', None) + for vs in avi_vs_config: + if vs in vs_data_path_not_work: + assert not vs.get("enable_rhi", None) \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/test/test_pool_converter.conf b/python/avi/migrationtools/nsxt_converter/test/test_pool_converter.conf new file mode 100644 index 0000000000..8ce197b286 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/test_pool_converter.conf @@ -0,0 +1,133 @@ +{ + "LbPools": [ + { + "_revision": 4, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1637746909273, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "nsx-pool", + "id": "nsx-pool", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/nsx-pool", + "realization_id": "b7ab75ac-7c5e-42e6-9b26-f5291351707f", + "relative_path": "nsx-pool", + "unique_id": "b7ab75ac-7c5e-42e6-9b26-f5291351707f", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/test-http" + ], + "algorithm": "ROUND_ROBIN", + "members": [ + { + "admin_state": "ENABLED", + "backup_member": false, + "display_name": "server1", + "ip_address": "10.10.10.2", + "max_concurrent_connections": 11, + "weight": 1 + }, + { + "admin_state": "ENABLED", + "backup_member": false, + "display_name": "server", + "ip_address": "10.10.10.1", + "max_concurrent_connections": 100, + "port": "8080", + "weight": 1 + } + ], + "min_active_members": 1, + "snat_translation": { + "type": "LBSnatAutoMap" + }, + "tcp_multiplexing_enabled": false, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 3, + "_create_time": 0, + "_create_user": "unknown", + "_last_modified_time": 1637569737206, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "nsx-pool-1", + "id": "nsx-poo-1", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/nsx-poo-1", + "realization_id": "848cfcbc-4021-4b77-94c1-7ec9273544f5", + "relative_path": "nsx-poo-1", + "unique_id": "848cfcbc-4021-4b77-94c1-7ec9273544f5", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/test-http" + ], + "algorithm": "ROUND_ROBIN", + "member_group": { + "group_path": "/infra/domains/default/groups/nsx-alb-testing", + "ip_revision_filter": "IPV4", + "port": 8080 + }, + "min_active_members": 1, + "snat_translation": { + "type": "LBSnatAutoMap" + }, + "tcp_multiplexing_enabled": false, + "tcp_multiplexing_number": 6 + }, + { + "_revision": 0, + "_create_time": 1637756753362, + "_create_user": "admin", + "_last_modified_time": 1637756753362, + "_last_modified_user": "admin", + "_protection": "NOT_PROTECTED", + "_system_owned": false, + "display_name": "nsx-pool-2", + "id": "nsx-pool-2", + "resource_type": "LBPool", + "parent_path": "/infra", + "path": "/infra/lb-pools/nsx-pool-2", + "realization_id": "ec92be7f-5648-4d6e-96ad-22d2b4b0d257", + "relative_path": "nsx-pool-2", + "unique_id": "ec92be7f-5648-4d6e-96ad-22d2b4b0d257", + "marked_for_delete": false, + "overridden": false, + "active_monitor_paths": [ + "/infra/lb-monitor-profiles/default-http-lb-monitor", + "/infra/lb-monitor-profiles/default-https-lb-monitor", + "/infra/lb-monitor-profiles/default-icmp-lb-monitor", + "/infra/lb-monitor-profiles/default-tcp-lb-monitor", + "/infra/lb-monitor-profiles/test-http", + "/infra/lb-monitor-profiles/test-udp" + ], + "algorithm": "ROUND_ROBIN", + "members": [ + { + "admin_state": "ENABLED", + "backup_member": false, + "display_name": "server_1", + "ip_address": "192.168.0.11", + "max_concurrent_connections": 100, + "port": "8080", + "weight": 1 + } + ], + "min_active_members": 1, + "passive_monitor_path": "/infra/lb-monitor-profiles/default-passive-lb-monitor", + "snat_translation": { + "type": "LBSnatAutoMap" + }, + "tcp_multiplexing_enabled": true, + "tcp_multiplexing_number": 6 + } + ] + +} \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/test/test_pool_converter.py b/python/avi/migrationtools/nsxt_converter/test/test_pool_converter.py new file mode 100644 index 0000000000..813613c320 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/test_pool_converter.py @@ -0,0 +1,54 @@ +import logging +import os +import unittest +import json +import copy + +gSAMPLE_CONFIG = None +LOG = logging.getLogger(__name__) + +from avi.migrationtools.nsxt_converter.pools_converter import PoolConfigConv +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +import avi.migrationtools.nsxt_converter.conversion_util as conv_util + + +def setUpModule(): + LOG.setLevel(logging.DEBUG) + formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + dir_path = os.path.abspath(os.path.dirname(__file__)) + dir_path = dir_path.rsplit(os.path.sep, 1)[0] + dir_path = dir_path + os.path.sep + "output" + logging.basicConfig(filename=os.path.join(dir_path, 'converter.log'), + level=logging.DEBUG, format=formatter) + cfg_file = open('test_pool_converter.conf', 'r') + cfg = cfg_file.read() + global gSAMPLE_CONFIG + gSAMPLE_CONFIG = json.loads(cfg) + global nsxt_attributes + nsxt_attributes= conv_const.init() + +class Test(unittest.TestCase): + + def test_pool_conversion(self): + + avi_config = dict() + pool_converter = PoolConfigConv(nsxt_attributes) + pool_converter.convert(avi_config, gSAMPLE_CONFIG, '', {}) + pool_config=gSAMPLE_CONFIG['LbPools'] + avi_pool_config=avi_config.get('Pool',None) + + assert avi_pool_config + + for index,pool in enumerate(avi_pool_config): + assert pool.get('name') + assert pool.get('lb_algorithm') + if pool_config[index].get('member'): + assert pool.get('servers') + + + assert len(pool_config) == len(avi_pool_config) + + + + + diff --git a/python/avi/migrationtools/nsxt_converter/test/test_profile_converter.conf b/python/avi/migrationtools/nsxt_converter/test/test_profile_converter.conf new file mode 100644 index 0000000000..d9ca7344e3 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/test_profile_converter.conf @@ -0,0 +1,106 @@ +{ + "LbAppProfiles": [ + { + "http_redirect_to_https": false, + "ntlm": false, + "idle_timeout": 15, + "request_header_size": 1024, + "response_timeout": 60, + "response_header_size": 4096, + "response_buffering": false, + "server_keep_alive": false, + "resource_type": "LBHttpProfile", + "id": "default-http-lb-app-profile", + "display_name": "default-http-lb-app-profile", + "path": "/infra/lb-app-profiles/default-http-lb-app-profile", + "relative_path": "default-http-lb-app-profile", + "parent_path": "/infra", + "unique_id": "12098bcd-b79a-42d3-a3e3-5920a9780d6b", + "realization_id": "12098bcd-b79a-42d3-a3e3-5920a9780d6b", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1636616861099, + "_create_user": "system", + "_last_modified_time": 1636616861099, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "idle_timeout": 1800, + "close_timeout": 8, + "ha_flow_mirroring_enabled": false, + "resource_type": "LBFastTcpProfile", + "id": "default-tcp-lb-app-profile", + "display_name": "default-tcp-lb-app-profile", + "path": "/infra/lb-app-profiles/default-tcp-lb-app-profile", + "relative_path": "default-tcp-lb-app-profile", + "parent_path": "/infra", + "unique_id": "5648f78a-4feb-4272-8756-e35586cb386d", + "realization_id": "5648f78a-4feb-4272-8756-e35586cb386d", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1636616861181, + "_create_user": "system", + "_last_modified_time": 1636616861181, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "idle_timeout": 300, + "flow_mirroring_enabled": false, + "resource_type": "LBFastUdpProfile", + "id": "default-udp-lb-app-profile", + "display_name": "default-udp-lb-app-profile", + "path": "/infra/lb-app-profiles/default-udp-lb-app-profile", + "relative_path": "default-udp-lb-app-profile", + "parent_path": "/infra", + "unique_id": "290d8ffd-c09e-4f96-9d18-19575646498b", + "realization_id": "290d8ffd-c09e-4f96-9d18-19575646498b", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1636616861251, + "_create_user": "system", + "_last_modified_time": 1636616861251, + "_last_modified_user": "system", + "_system_owned": true, + "_protection": "NOT_PROTECTED", + "_revision": 0 + }, + { + "x_forwarded_for": "INSERT", + "http_redirect_to": "https://google.com", + "http_redirect_to_https": false, + "ntlm": true, + "idle_timeout": 15, + "request_header_size": 1024, + "request_body_size": 30, + "response_timeout": 60, + "response_header_size": 4096, + "response_buffering": true, + "server_keep_alive": true, + "resource_type": "LBHttpProfile", + "id": "test", + "display_name": "test", + "description": "eee", + "path": "/infra/lb-app-profiles/test", + "relative_path": "test", + "parent_path": "/infra", + "unique_id": "28b88a5b-99c7-45ec-8f69-b0969471bac9", + "realization_id": "28b88a5b-99c7-45ec-8f69-b0969471bac9", + "marked_for_delete": false, + "overridden": false, + "_create_time": 1637149243432, + "_create_user": "admin", + "_last_modified_time": 1638185008289, + "_last_modified_user": "admin", + "_system_owned": false, + "_protection": "NOT_PROTECTED", + "_revision": 4 + } + ] + +} \ No newline at end of file diff --git a/python/avi/migrationtools/nsxt_converter/test/test_profile_converter.py b/python/avi/migrationtools/nsxt_converter/test/test_profile_converter.py new file mode 100644 index 0000000000..c3a5009b33 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/test/test_profile_converter.py @@ -0,0 +1,51 @@ +import logging +import os +import unittest +import json +import copy + +gSAMPLE_CONFIG = None +LOG = logging.getLogger(__name__) + +from avi.migrationtools.nsxt_converter.profile_converter import ProfileConfigConv +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +import avi.migrationtools.nsxt_converter.conversion_util as conv_util + + +def setUpModule(): + LOG.setLevel(logging.DEBUG) + formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + dir_path = os.path.abspath(os.path.dirname(__file__)) + dir_path = dir_path.rsplit(os.path.sep, 1)[0] + dir_path = dir_path + os.path.sep + "output" + logging.basicConfig(filename=os.path.join(dir_path, 'converter.log'), + level=logging.DEBUG, format=formatter) + cfg_file = open('test_profile_converter.conf', 'r') + cfg = cfg_file.read() + global gSAMPLE_CONFIG + gSAMPLE_CONFIG = json.loads(cfg) + global nsxt_attributes + nsxt_attributes= conv_const.init() + global profile_converter + profile_converter=ProfileConfigConv(nsxt_attributes) + + +class Test(unittest.TestCase): + + def test_profile_conversion(self): + + avi_config = dict() + # profile_converter = ProfileConfigConv(nsxt_attributes) + profile_converter.convert(avi_config, gSAMPLE_CONFIG, '') + profile_config=gSAMPLE_CONFIG['LbAppProfiles'] + avi_app_profile_config=avi_config.get('ApplicationProfile',None) + avi_network_profile_confg=avi_config.get('NetworkProfile',None) + + assert avi_app_profile_config + assert avi_network_profile_confg + assert len(profile_config) == len(avi_app_profile_config) + len(avi_network_profile_confg) + + + + + diff --git a/python/avi/migrationtools/nsxt_converter/traffic_cutover.py b/python/avi/migrationtools/nsxt_converter/traffic_cutover.py new file mode 100755 index 0000000000..04ea749747 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/traffic_cutover.py @@ -0,0 +1,117 @@ +# !/usr/bin/env python3 +import logging +import os +import json +import argparse +from datetime import datetime +from avi.migrationtools.avi_converter import AviConverter +from avi.migrationtools.avi_migration_utils import get_count +from avi.migrationtools.nsxt_converter.nsxt_util import NSXUtil + +LOG = logging.getLogger(__name__) + + +class TrafficCutover(AviConverter): + def __init__(self, args): + ''' + + :param args: + ''' + self.nsxt_ip = args.nsxt_ip + self.nsxt_user = args.nsxt_user + self.nsxt_passord = args.nsxt_password + self.nsxt_port = args.nsxt_port + + self.controller_ip = args.alb_controller_ip + self.controller_version = args.alb_controller_version + self.user = args.alb_controller_user + self.password = args.alb_controller_password + self.cutover_vs = None + if args.cutover: + self.cutover_vs = \ + (set(args.cutover) if type(args.cutover) == list + else set(args.cutover.split(','))) + self.output_file_path = args.output_file_path if args.output_file_path \ + else 'output' + + output_dir = os.path.normpath(self.output_file_path) + output_path = output_dir + os.path.sep + self.nsxt_ip + os.path.sep + "output" + + # Load values from state file if not given on command line while executing script + if self.nsxt_ip: + output_path = output_dir + os.path.sep + self.nsxt_ip + os.path.sep + "output" + with open(output_path + os.path.sep + "state.json", 'r') as file: + data = json.load(file) + if not self.nsxt_user: + self.nsxt_user = data.get('nsxt_user') + if not self.nsxt_port: + self.nsxt_port = data.get('nsxt_port') + if not self.controller_ip: + self.controller_ip = data.get('alb_controller_ip') + if not self.controller_version: + self.controller_version = data.get('alb_controller_version') + if not self.user: + self.user = data.get('alb_controller_user') + if not self.password: + self.password = data.get('alb_controller_password') + + def initiate_cutover_vs(self): + + if not os.path.exists(self.output_file_path): + os.mkdir(self.output_file_path) + self.init_logger_path() + + cutover_msg = "Performing cutover for applications" + LOG.debug(cutover_msg) + print(cutover_msg) + nsx_util = NSXUtil(self.nsxt_user, self.nsxt_passord, self.nsxt_ip, self.nsxt_port \ + , self.controller_ip, self.user, self.password, self.controller_version) + nsx_util.cutover_vs(self.cutover_vs) + + print("Total Warning: ", get_count('warning')) + print("Total Errors: ", get_count('error')) + LOG.info("Total Warning: {}".format(get_count('warning'))) + LOG.info("Total Errors: {}".format(get_count('error'))) + + +if __name__ == "__main__": + HELP_STR = """ + Usage: + python nsxt_converter.py -n 192.168.100.101 -u admin -p password + """ + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, description=HELP_STR) + + parser.add_argument('-c', '--alb_controller_ip', + help='controller ip for auto upload') + parser.add_argument('--alb_controller_version', + help='Target Avi controller version') + parser.add_argument('--alb_controller_user', + help='controller username') + parser.add_argument('--alb_controller_password', + help='controller password. Input ' + 'prompt will appear if no value provided', required=True) + # Added command line args to take skip type for ansible playbook + parser.add_argument('--cutover', + help='comma separated names of virtualservices for cutover.\n', + required=True) + parser.add_argument('-n', '--nsxt_ip', + help='Ip of NSXT', required=True) + parser.add_argument('-u', '--nsxt_user', + help='NSX-T User name') + parser.add_argument('-p', '--nsxt_password', + help='NSX-T Password', required=True) + parser.add_argument('-port', '--nsxt_port', default=443, + help='NSX-T Port') + parser.add_argument('-o', '--output_file_path', + help='Folder path for output files to be created in', + ) + + start = datetime.now() + args = parser.parse_args() + traffic_cutover = TrafficCutover(args) + traffic_cutover.initiate_cutover_vs() + end = datetime.now() + print("The time of execution of above program is :", + str(end - start)) diff --git a/python/avi/migrationtools/nsxt_converter/v_client.py b/python/avi/migrationtools/nsxt_converter/v_client.py index ac42d8f5ae..f6f528c10c 100755 --- a/python/avi/migrationtools/nsxt_converter/v_client.py +++ b/python/avi/migrationtools/nsxt_converter/v_client.py @@ -2,8 +2,6 @@ from avi.migrationtools.nsxt_converter import base_client -requests.packages.urllib3.disable_warnings() - class NSXVClient(base_client.NSXClient): def get_list_results(self, endpoint, params=None): diff --git a/python/avi/migrationtools/nsxt_converter/vs_converter.py b/python/avi/migrationtools/nsxt_converter/vs_converter.py new file mode 100644 index 0000000000..6c34272179 --- /dev/null +++ b/python/avi/migrationtools/nsxt_converter/vs_converter.py @@ -0,0 +1,1261 @@ +import copy +import logging + +from avi.migrationtools.avi_migration_utils import MigrationUtil +from avi.migrationtools.nsxt_converter.nsxt_util import is_vlan_configured_with_bgp, \ + is_segment_configured_with_subnet, get_vs_cloud_type, get_lb_skip_reason +from avi.migrationtools.nsxt_converter.conversion_util import NsxtConvUtil +from avi.migrationtools.avi_migration_utils import update_count +from avi.migrationtools.nsxt_converter.nsxt_util import get_vs_cloud_name, get_object_segments, get_certificate_data +from avi.migrationtools.nsxt_converter.policy_converter import PolicyConfigConverter +import avi.migrationtools.nsxt_converter.converter_constants as conv_const +import avi.migrationtools.nsxt_converter.converter_constants as final +import random + +from avi.migrationtools.nsxt_converter.pools_converter import skipped_pools_list, vs_pool_segment_list, \ + vs_sorry_pool_segment_list + +LOG = logging.getLogger(__name__) + +conv_utils = NsxtConvUtil() +common_avi_util = MigrationUtil() +vs_list_with_snat_deactivated = [] +vs_data_path_not_work = [] +pool_attached_with_poolgroup = [] +pool_attached_with_vs_poolref = [] +vs_with_no_cloud_configured = [] +vs_with_lb_skipped = [] +is_pool_group_used={} + +class VsConfigConv(object): + def __init__(self, nsxt_profile_attributes, object_merge_check, merge_object_mapping, sys_dict, + nsxt_ip, nsxt_password): + """ + + """ + self.nsxt_profile_attributes = nsxt_profile_attributes + self.supported_attr = nsxt_profile_attributes['VS_supported_attr'] + self.server_ssl_attr = nsxt_profile_attributes['VS_server_ssl_supported_attr'] + self.client_ssl_attr = nsxt_profile_attributes['VS_client_ssl_supported_attr'] + self.common_na_attr = nsxt_profile_attributes['Common_Na_List'] + self.VS_na_attr = nsxt_profile_attributes["VS_na_list"] + self.rule_match_na = nsxt_profile_attributes["HttpPolicySetRules_Skiped_List_MatchingCondition"] + self.rules_actions_na = nsxt_profile_attributes["HttpPolicySetRules_Skiped_List_Actions"] + self.VS_client_ssl_indirect_attr = nsxt_profile_attributes["VS_client_ssl_indirect_attr"] + self.VS_server_ssl_indirect_attr = nsxt_profile_attributes["VS_server_ssl_indirect_attr"] + self.vs_indirect_attr = nsxt_profile_attributes["VS_indirect_aatr"] + self.supported_attr_httppolicyset = nsxt_profile_attributes["HttpPolicySetRules_Supported_Attributes"] + self.object_merge_check = object_merge_check + self.merge_object_mapping = merge_object_mapping + self.sys_dict = sys_dict + self.certkey_count = 0 + self.pki_count = 0 + self.nsxt_ip = nsxt_ip + self.nsxt_password = nsxt_password + + def convert(self, alb_config, nsx_lb_config, prefix, tenant, vs_state, controller_version, traffic_enabled, + cloud_tenant, ssh_root_password, migration_input_config=None, vrf=None, segroup=None): + ''' + LBVirtualServer to Avi Config vs converter + ''' + converted_alb_ssl_certs = list() + converted_http_policy_sets = list() + converted_http_policy_na_list = list() + converted_http_policy_skipped = list() + indirect_client_ssl = [] + indirect_server_ssl = [] + alb_config['VirtualService'] = list() + alb_config['VsVip'] = list() + alb_config["HTTPPolicySet"] = list() + converted_objs = [] + progressbar_count = 0 + total_size = len(nsx_lb_config['LbVirtualServers']) + print("\nConverting Virtual Services ...") + LOG.info('[Virtual Service ] Converting Services...') + policy_converter = PolicyConfigConverter(self.nsxt_profile_attributes, self.object_merge_check, + self.merge_object_mapping, self.sys_dict) + + for lb_vs in nsx_lb_config['LbVirtualServers']: + try: + progressbar_count += 1 + LOG.info('[Virtual Service] Migration started for VS {}'.format(lb_vs['display_name'])) + # vs_name = lb_vs['name'] + cloud_name = get_vs_cloud_name(lb_vs["id"]) + cloud_type = get_vs_cloud_type(lb_vs["id"]) + if get_lb_skip_reason(lb_vs["id"]): + skip_reason = get_lb_skip_reason(lb_vs["id"]) + conv_utils.add_status_row('virtualservice', None, lb_vs["display_name"], + conv_const.STATUS_SKIPPED, skip_reason) + LOG.warning("VS {} not migrated. Reason: {}".format(lb_vs["display_name"], + skip_reason)) + vs_with_lb_skipped.append(lb_vs["display_name"]) + conv_utils.print_progress_bar(progressbar_count, total_size, '', + prefix='Progress', suffix='') + continue + elif cloud_name == 'Cloud Not Found' or not cloud_name: + conv_utils.add_status_row('virtualservice', None, lb_vs["display_name"], + conv_const.STATUS_SKIPPED, cloud_name) + LOG.warning("cloud is not configured for %s" % lb_vs["display_name"]) + vs_with_no_cloud_configured.append(lb_vs["display_name"]) + conv_utils.print_progress_bar(progressbar_count, total_size, '', + prefix='Progress', suffix='') + continue + tenant_name, name = conv_utils.get_tenant_ref(tenant) + if not tenant: + tenant = tenant_name + if not cloud_tenant: + cloud_tenant = "admin" + if not ssh_root_password: + ssh_root_password = self.nsxt_password + name = lb_vs.get('display_name') + if prefix: + name = prefix + '-' + name + vs_temp = list(filter(lambda vs: vs["name"] == name, alb_config['VirtualService'])) + if vs_temp: + name = name + "-" + lb_vs["id"] + enabled = lb_vs.get('enabled') + if enabled and vs_state: + enabled = (vs_state == 'enable') + alb_vs = dict( + name=name, + traffic_enabled=(enabled and traffic_enabled), + enabled=enabled, + cloud_ref=conv_utils.get_object_ref(cloud_name, 'cloud', cloud_tenant=cloud_tenant), + tenant_ref=conv_utils.get_object_ref(tenant, 'tenant') + ) + tier1_lr = '' + for ref in nsx_lb_config['LBServices']: + if lb_vs['lb_service_path'] == ref['path']: + tier1_lr = ref.get('connectivity_path', None) + + if lb_vs.get('ip_address'): + vip = dict( + name=name + '-vsvip', + tier1_lr=tier1_lr, + cloud_ref=conv_utils.get_object_ref(cloud_name, 'cloud', cloud_tenant=cloud_tenant), + tenant_ref=conv_utils.get_object_ref(tenant, 'tenant'), + vip=[ + dict( + vip_id="1", + ip_address=dict( + addr=lb_vs.get('ip_address'), + type='V4' + ) + ) + ] + ) + + if cloud_type == "Vlan": + vip_segment = get_object_segments(lb_vs["id"], lb_vs['ip_address']) + if vip_segment: + self.add_placement_network_to_vip(vip['vip'], vip_segment, tenant, cloud_name) + else: + conv_utils.add_status_row('virtualservice', None, lb_vs["display_name"], + conv_const.STATUS_SKIPPED) + LOG.warning("vip segment is not found for %s" % lb_vs["display_name"]) + continue + alb_config['VsVip'].append(vip) + vsvip_ref = conv_utils.get_object_ref( + name + '-vsvip', 'vsvip', tenant=tenant, cloud_name=cloud_name) + alb_vs['vsvip_ref'] = vsvip_ref + alb_vs['services'] = [ + dict( + port=int(lb_vs.get('ports')[0]), + enable_ssl=False + )] + skipped = [val for val in lb_vs.keys() + if val not in self.supported_attr] + na_list = [val for val in lb_vs.keys() + if val in self.common_na_attr or val in self.VS_na_attr] + if segroup: + segroup_ref = conv_utils.get_object_ref( + segroup, 'serviceenginegroup', tenant, + cloud_name=cloud_name) + alb_vs['se_group_ref'] = segroup_ref + client_pki = False + server_pki = False + + if lb_vs.get("client_ssl_profile_binding"): + if lb_vs["client_ssl_profile_binding"].get("client_auth_ca_paths"): + pki_client_profile = dict() + error = False + ca = self.get_ca_cert(lb_vs["client_ssl_profile_binding"].get("client_auth_ca_paths"), + self.nsxt_ip, ssh_root_password) + if ca: + pki_client_profile["ca_certs"] = [{'certificate': ca}] + else: + error = True + if lb_vs["client_ssl_profile_binding"].get("client_auth_crl_paths"): + crl = self.get_crl_cert(lb_vs["client_ssl_profile_binding"].get("client_auth_crl_paths")) + if crl: + pki_client_profile["crls"] = [{'body': crl}] + else: + error = True + else: + pki_client_profile['crl_check'] = False + if not error: + pki_client_profile["name"] = name + "-client-pki" + pki_client_profile["tenant_ref"] = conv_utils.get_object_ref(tenant, "tenant") + client_pki = True + if self.object_merge_check: + conv_utils.update_skip_duplicates(pki_client_profile, + alb_config['PKIProfile'], 'pki_profile', + converted_objs, pki_client_profile["name"], None, + self.merge_object_mapping, None, prefix, + self.sys_dict['PKIProfile']) + self.pki_count += 1 + else: + converted_objs.append({'pki_profile': pki_client_profile}) + alb_config['PKIProfile'].append(pki_client_profile) + indirect = [] + u_ignore = [] + ignore_for_defaults = {} + conv_status = conv_utils.get_conv_status( + [], indirect, ignore_for_defaults, [], + u_ignore, []) + conv_utils.add_conv_status('pki_profile', None, pki_client_profile["name"], conv_status, + [{"pki_profile": pki_client_profile}]) + if lb_vs.get("server_ssl_profile_binding"): + if lb_vs["server_ssl_profile_binding"].get("server_auth_ca_paths"): + pki_server_profile = dict() + error = False + ca = self.get_ca_cert(lb_vs["server_ssl_profile_binding"].get("server_auth_ca_paths"), + self.nsxt_ip, ssh_root_password) + if ca: + pki_server_profile["ca_certs"] = [{'certificate': ca}] + else: + error = True + if lb_vs["server_ssl_profile_binding"].get("server_auth_crl_paths"): + crl = self.get_crl_cert(lb_vs["server_ssl_profile_binding"].get("server_auth_crl_paths")) + if crl: + pki_server_profile["crls"] = [{'body': crl}] + else: + error = True + else: + pki_server_profile['crl_check'] = False + if not error: + pki_server_profile["name"] = name + "-server-pki" + pki_server_profile["tenant_ref"] = conv_utils.get_object_ref(tenant, "tenant") + server_pki = True + if self.object_merge_check: + conv_utils.update_skip_duplicates(pki_server_profile, + alb_config['PKIProfile'], 'pki_profile', + converted_objs, pki_server_profile["name"], None, + self.merge_object_mapping, None, prefix, + self.sys_dict['PKIProfile']) + self.pki_count += 1 + else: + converted_objs.append({'pki_profile': pki_server_profile}) + alb_config['PKIProfile'].append(pki_server_profile) + indirect = [] + u_ignore = [] + ignore_for_defaults = {} + conv_status = conv_utils.get_conv_status( + [], indirect, ignore_for_defaults, [], + u_ignore, []) + conv_utils.add_conv_status('pki_profile', None, pki_server_profile["name"], conv_status, + [{"pki_profile": pki_server_profile}]) + + if lb_vs.get('application_profile_path'): + profile_path = lb_vs.get('application_profile_path') + profile_id = profile_path.split('/')[-1] + profile_config = list(filter(lambda pr: pr["id"] == profile_id, nsx_lb_config["LbAppProfiles"])) + profile_name = profile_config[0]["display_name"] + if prefix: + profile_name = prefix + "-" + profile_name + profile_type = "network" + merge_profile_name = profile_name + if self.object_merge_check: + merge_profile_name = self.merge_object_mapping["app_profile"].get(profile_name) + for profile in alb_config["ApplicationProfile"]: + if profile["name"] == profile_name or profile["name"] == merge_profile_name: + profile_type = profile["type"] + + app_profile_ref = self.get_vs_app_profile_ref(alb_config['ApplicationProfile'], + profile_name, self.object_merge_check, + self.merge_object_mapping, profile_type, tenant) + if app_profile_ref.__contains__("networkprofile"): + alb_vs['network_profile_ref'] = app_profile_ref + alb_vs['application_profile_ref'] = conv_utils.get_object_ref("System-L4-Application", + 'applicationprofile', + tenant="admin") + else: + alb_vs['application_profile_ref'] = app_profile_ref + alb_vs['network_profile_ref'] = conv_utils.get_object_ref("System-TCP-Proxy", 'networkprofile', + tenant="admin") + + if client_pki: + pki_client_profile_name = pki_client_profile["name"] + self.update_app_with_pki(merge_profile_name, alb_config["ApplicationProfile"], + pki_client_profile_name, tenant) + if lb_vs.get('max_concurrent_connections'): + alb_vs['performance_limits'] = dict( + max_concurrent_connections=lb_vs.get('max_concurrent_connections') + ) + if lb_vs.get('client_ssl_profile_binding'): + client_ssl = lb_vs.get('client_ssl_profile_binding') + ssl_key_cert_refs = [] + if client_ssl.get('ssl_profile_path'): + ssl_ref_id = client_ssl['ssl_profile_path'].split('/')[-1] + client_ssl_config = list( + filter(lambda c_ssl: c_ssl["id"] == ssl_ref_id, nsx_lb_config["LbClientSslProfiles"])) + if client_ssl_config: + ssl_name = client_ssl_config[0]["display_name"] + if prefix: + ssl_name = prefix + '-' + ssl_name + if self.object_merge_check: + ssl_name = self.merge_object_mapping['ssl_profile'].get(ssl_name) + alb_vs['ssl_profile_ref'] = conv_utils.get_object_ref(ssl_name, 'sslprofile', tenant=tenant) + if client_ssl.get('default_certificate_path', None): + alb_vs['services'][0]["enable_ssl"] = True + cert_name = name + "-" + str(random.randint(0, 20)) + ca_cert_obj = self.update_ca_cert_obj(cert_name, alb_config, [], tenant, prefix, + ssl_type='client_ssl', ssl_data=client_ssl, + nsxt_ip=self.nsxt_ip, ssh_root_password=ssh_root_password) + + ssl_key_cert_refs.append( + "/api/sslkeyandcertificate/?tenant=%s&name=%s" % (tenant, ca_cert_obj.get("name"))) + converted_alb_ssl_certs.append(ca_cert_obj) + if client_ssl.get('sni_certificate_paths', None): + # TODO need to revisit to fix some issues + alb_vs['services'][0]["enable_ssl"] = True + sni_cert_list = client_ssl.get('sni_certificate_paths', None) + for cert in sni_cert_list: + cert_name = name + "-" + str(random.randint(0, 20)) + ca_cert_obj = self.update_ca_cert_obj(cert_name, alb_config, [], tenant, prefix, + ssl_type='client_ssl', ssl_data=client_ssl, + nsxt_ip=self.nsxt_ip, + ssh_root_password=ssh_root_password) + + ssl_key_cert_refs.append( + "/api/sslkeyandcertificate/?tenant=%s&name=%s" % (tenant, ca_cert_obj.get("name"))) + converted_alb_ssl_certs.append(ca_cert_obj) + alb_vs["vh_type"] = "VS_TYPE_VH_SNI" + alb_vs["type"] = "VS_TYPE_VH_PARENT" + if client_ssl.get("client_auth"): + for profile in alb_config["ApplicationProfile"]: + if merge_profile_name == profile["name"]: + if client_ssl["client_auth"] == 'IGNORE': + profile["ssl_client_certificate_mode"] = 'SSL_CLIENT_CERTIFICATE_NONE' + if ssl_key_cert_refs: + alb_vs["ssl_key_and_certificate_refs"] = list(set(ssl_key_cert_refs)) + skipped_client_ssl = [val for val in client_ssl.keys() + if val not in self.client_ssl_attr] + indirect_client_attr = self.VS_client_ssl_indirect_attr + + indirect_client_ssl = [val for val in skipped_client_ssl if + val in indirect_client_attr] + skipped_client_ssl = [attr for attr in skipped_client_ssl if attr not in indirect_client_ssl] + if skipped_client_ssl: + skipped.append({"client_ssl ": skipped_client_ssl}) + + lb_pl_config = nsx_lb_config['LbPools'] + sry_pool_present = False + sorry_pool_ref = None + if lb_vs.get("sorry_pool_path"): + pool_ref = lb_vs.get("sorry_pool_path") + sry_pl_id = lb_vs.get("sorry_pool_path").split("/")[-1] + sry_pl_config = list(filter(lambda pr: pr["id"] == sry_pl_id, nsx_lb_config["LbPools"])) + sry_pl = sry_pl_config[0]["display_name"] + sorry_pool_name = sry_pl + if lb_vs["id"] in vs_sorry_pool_segment_list.keys(): + pool_segment = vs_sorry_pool_segment_list[lb_vs["id"]].get("pool_segment") + pl_name = sry_pl + is_sry_pool_group = False + if pool_ref: + p_tenant, pool_ref = conv_utils.get_tenant_ref(pool_ref) + if tenant: + p_tenant = tenant + pool_ref = pl_name + persist_ref = self.get_persist_ref(lb_vs) + avi_persistence = alb_config['ApplicationPersistenceProfile'] + persist_type = None + if persist_ref: + # Called tenant ref to get object name + persist_ref = conv_utils.get_tenant_ref(persist_ref)[1] + if prefix: + persist_ref = '{}-{}'.format(prefix, persist_ref) + persist_profile_objs = ( + [ob for ob in avi_persistence if ob['name'] == + self.merge_object_mapping['app_per_profile'].get( + persist_ref)] or + [obj for obj in avi_persistence if + (obj["name"] == persist_ref or persist_ref in obj.get( + "dup_of", []))]) + persist_type = (persist_profile_objs[0]['persistence_type'] if + persist_profile_objs else None) + # Pool cloned if controller version < 17.1.6 or VS has non http + # cookie persistence or app profile type is different and poolgroup + # cloned + pool_ref, is_sry_pool_group = conv_utils.clone_pool_if_shared( + pool_ref, alb_config, name, tenant, p_tenant, persist_type, + controller_version, alb_vs['application_profile_ref'],is_pool_group_used, + cloud_name=cloud_name, prefix=prefix) + sry_pool_present = is_sry_pool_group + sorry_pool_ref = pool_ref + is_pool_group_used[pool_ref] = alb_vs['name'] + if cloud_type == 'Vlan': + if is_sry_pool_group: + self.add_placement_network_to_pool_group(pool_ref, pool_segment, + alb_config, cloud_name, tenant) + + else: + self.add_placement_network_to_pool(alb_config['Pool'], + pool_ref, pool_segment, cloud_name, tenant) + + is_pg_created = False + main_pool_ref = None + pool_present = False + if lb_vs.get('pool_path'): + pool_ref = lb_vs.get('pool_path') + pl_id = pool_ref.split('/')[-1] + pl_config = list(filter(lambda pr: pr["id"] == pl_id, nsx_lb_config["LbPools"])) + pl_name = pl_config[0]["display_name"] + pool_name = pl_name + if lb_vs["id"] in vs_pool_segment_list.keys(): + pool_segment = vs_pool_segment_list[lb_vs["id"]].get("pool_segment") + vs_app_name = profile_name + if pl_config[0].get("snat_translation"): + if pl_config[0]["snat_translation"].get("type") == "LBSnatDisabled": + vs_app_name = self.update_app_with_snat(profile_name, profile_type, + alb_config["ApplicationProfile"], + self.object_merge_check, + self.merge_object_mapping) + if vs_app_name != profile_name: + alb_vs['application_profile_ref'] = conv_utils.get_object_ref \ + (vs_app_name, 'applicationprofile', tenant=tenant) + + vs_list_with_snat_deactivated.append(alb_vs["name"]) + + if pl_config[0]["snat_translation"].get("type") == "LBSnatIpPool": + alb_vs["snat_ip"] = [] + snat_ip_pool = pl_config[0]["snat_translation"]["ip_addresses"] + for ip_pool in snat_ip_pool: + snat_ip = dict( + addr=ip_pool["ip_address"], + type="V4" + ) + alb_vs["snat_ip"].append(snat_ip) + + is_pool_group = False + if pool_ref: + p_tenant, pool_ref = conv_utils.get_tenant_ref(pool_ref) + if tenant: + p_tenant = tenant + pool_ref = pl_name + persist_ref = self.get_persist_ref(lb_vs) + avi_persistence = alb_config['ApplicationPersistenceProfile'] + persist_type = None + if persist_ref: + # Called tenant ref to get object name + persist_ref = conv_utils.get_tenant_ref(persist_ref)[1] + if prefix: + persist_ref = '{}-{}'.format(prefix, persist_ref) + persist_profile_objs = ( + [ob for ob in avi_persistence if ob['name'] == + self.merge_object_mapping['app_per_profile'].get( + persist_ref)] or + [obj for obj in avi_persistence if + (obj["name"] == persist_ref or persist_ref in obj.get( + "dup_of", []))]) + persist_type = (persist_profile_objs[0]['persistence_type'] if + persist_profile_objs else None) + # cookie persistence or app profile type is different and poolgroup + # cloned + pool_ref, is_pool_group = conv_utils.clone_pool_if_shared( + pool_ref, alb_config, name, tenant, p_tenant, persist_type, + controller_version, alb_vs['application_profile_ref'],is_pool_group_used, + cloud_name=cloud_name, prefix=prefix) + is_pg_created = is_pool_group + main_pool_ref = pool_ref + if is_pool_group: + is_pool_group_used[pool_ref] = alb_vs['name'] + else: + pool_present = True + if cloud_type == 'Vlan': + if is_pool_group: + self.add_placement_network_to_pool_group(pool_ref, pool_segment, + alb_config, cloud_name, tenant) + + else: + self.add_placement_network_to_pool(alb_config['Pool'], + pool_ref, pool_segment, cloud_name, tenant) + + if persist_ref: + if is_pool_group: + self.add_poolgroup_with_persistence(alb_config, nsx_lb_config, lb_vs, + pool_ref, prefix, cloud_name, tenant) + else: + self.add_pool_with_persistence(alb_config, nsx_lb_config, lb_vs, + pool_ref, prefix, cloud_name, tenant) + + if server_pki: + pki_server_profile_name = pki_server_profile["name"] + if is_pool_group: + self.add_pki_to_pool_group(alb_config, pool_ref, pki_server_profile_name, + tenant) + else: + self.add_pki_to_pool(alb_config, pool_ref, pki_server_profile_name, + tenant) + if is_pool_group: + if lb_vs.get('default_pool_member_ports'): + self.add_port_to_pool_group(pool_ref, alb_config, lb_vs) + + else: + if lb_vs.get('default_pool_member_ports'): + self.add_port_to_pool(pool_ref, alb_config, lb_vs) + + persist_type = None + if sry_pool_present: + if is_pg_created: + self.add_sorry_pool_member_to_poolgroup(alb_config, main_pool_ref, sorry_pool_ref) + else: + self.attach_pool_to_sry_pool_group(alb_config, main_pool_ref, + sorry_pool_ref, tenant, cloud_name) + main_pool_ref = sorry_pool_ref + is_pg_created = True + + if is_pg_created: + self.add_teir_to_poolgroup(main_pool_ref, alb_config, tier1_lr) + self.update_poolgroup_with_cloud(main_pool_ref, alb_config, cloud_name, tenant, cloud_tenant) + alb_vs['pool_group_ref'] = conv_utils.get_object_ref( + main_pool_ref, 'poolgroup', tenant=tenant, cloud_name=cloud_name) + elif pool_present: + self.add_tier_to_pool(main_pool_ref, alb_config, tier1_lr) + self.update_pool_with_cloud(main_pool_ref, alb_config, cloud_name, tenant, cloud_tenant) + alb_vs['pool_ref'] = conv_utils.get_object_ref( + main_pool_ref, 'pool',tenant=tenant,cloud_name=cloud_name) + + if lb_vs.get('server_ssl_profile_binding'): + # if lb_vs["server_ssl_profile_binding"] + server_ssl = lb_vs.get('server_ssl_profile_binding') + if is_pg_created: + self.update_poolgroup_with_ssl(alb_config, nsx_lb_config, lb_vs, main_pool_ref, + prefix, tenant, converted_alb_ssl_certs,ssh_root_password) + else: + self.add_ssl_to_pool(alb_config, nsx_lb_config, lb_vs, main_pool_ref, + prefix, tenant, converted_alb_ssl_certs,ssh_root_password) + + skipped_server_ssl = [val for val in server_ssl.keys() + if val not in self.server_ssl_attr] + indirect_server_attr = self.VS_server_ssl_indirect_attr + + indirect_server_ssl = [val for val in skipped_server_ssl if + val in indirect_server_attr] + skipped_server_ssl = [attr for attr in skipped_server_ssl if + attr not in indirect_server_ssl] + if skipped_server_ssl: + skipped.append({"server_ssl ": skipped_server_ssl}) + + if lb_vs.get('rules'): + + policy, skipped_rules = policy_converter.convert(lb_vs, alb_config, cloud_name, prefix, tenant) + converted_http_policy_sets.append(skipped_rules) + if policy: + updated_http_policy_ref = conv_utils.get_object_ref( + policy['name'], conv_const.OBJECT_TYPE_HTTP_POLICY_SET, + tenant) + http_policies = { + 'index': 11, + 'http_policy_set_ref': updated_http_policy_ref + } + alb_vs['http_policies'] = [] + alb_vs['http_policies'].append(http_policies) + alb_config['HTTPPolicySet'].append(policy) + + # If vlan VS then check if VLAN network is configured as a BGP peer, + # if yes, then advertise bgp otherwise don't advertise + is_vlan_configured, vlan_segment, network_type, return_mesg = is_segment_configured_with_subnet \ + (lb_vs["id"], cloud_name) + if network_type == "Vlan": + if is_vlan_configured: + LOG.info("%s is configured with subnet" % lb_vs["display_name"]) + is_bgp_configured = is_vlan_configured_with_bgp \ + (cloud_name=cloud_name, tenant=tenant, vlan_segment=vlan_segment) + if is_bgp_configured: + if migration_input_config and migration_input_config.get('bgp_peer_configured_for_vlan'): + alb_vs['enable_rhi'] = True + LOG.info("ALB Plugin : vlan_configured_with_bgp : {}".format(vlan_segment)) + else: + LOG.warning("%s vlan is not configured with bgp" % lb_vs["display_name"]) + else: + LOG.warning("%s data path won't work as %s" % (lb_vs["display_name"], return_mesg)) + vs_data_path_not_work.append(name) + + indirect = self.vs_indirect_attr + u_ignore = [] + ignore_for_defaults = {} + conv_status = conv_utils.get_conv_status( + skipped, indirect, ignore_for_defaults, nsx_lb_config['LbVirtualServers'], + u_ignore, na_list) + if indirect_client_ssl: + conv_status["indirect"].append({"client_ssl": indirect_client_ssl}) + if indirect_server_ssl: + conv_status["indirect"].append({"server_ssl": indirect_server_ssl}) + na_list = [val for val in na_list if val not in self.common_na_attr] + conv_status["na_list"] = na_list + conv_utils.add_conv_status('virtualservice', None, alb_vs['name'], conv_status, + alb_vs) + alb_config['VirtualService'].append(alb_vs) + + msg = "Server conversion started..." + conv_utils.print_progress_bar(progressbar_count, total_size, msg, + prefix='Progress', suffix='') + if len(conv_status['skipped']) > 0: + LOG.debug('[VirtualService] Skipped Attribute {}:{}'.format(lb_vs['display_name'], + conv_status['skipped'])) + + LOG.info('[VirtualService] Migration completed for HM {}'.format(lb_vs['display_name'])) + except Exception as e: + LOG.error("[VirtualService] Failed to convert VirtualService: {}".format(e)) + update_count('error') + LOG.error("[VirtualServer] Failed to convert Monitor: %s" % lb_vs['display_name'], + exc_info=True) + conv_utils.add_status_row('virtualservice', None, lb_vs['display_name'], + conv_const.STATUS_ERROR) + for cert in converted_alb_ssl_certs: + indirect = [] + u_ignore = [] + ignore_for_defaults = {} + conv_status = conv_utils.get_conv_status( + [], indirect, ignore_for_defaults, [], + u_ignore, []) + conv_utils.add_conv_status('ssl_key_and_certificate', None, cert['name'], conv_status, + [{"ssl_cert_key": cert}]) + if self.object_merge_check: + self.update_ssl_key_refernce(alb_config) + self.update_pki_refernce(alb_config) + + + def update_pool_with_ssl(self, alb_config, nsx_lb_config, lb_vs, pool_name, object_merge_check, + merge_object_mapping, + prefix, tenant, + converted_alb_ssl_certs,ssh_root_password): + for pool in alb_config['Pool']: + if pool.get('name') == pool_name: + server_ssl = lb_vs['server_ssl_profile_binding'] + if server_ssl.get('ssl_profile_path'): + ssl_ref_id = server_ssl['ssl_profile_path'].split('/')[-1] + ssl_config = list( + filter(lambda ssl: ssl["id"] == ssl_ref_id, nsx_lb_config["LbServerSslProfiles"])) + ssl_name = ssl_config[0]["display_name"] + if prefix: + ssl_name = prefix + '-' + ssl_name + if object_merge_check: + ssl_merge_name = merge_object_mapping['ssl_profile'].get(ssl_name) + if ssl_merge_name: + ssl_name = ssl_merge_name + pool['ssl_profile_ref'] = conv_utils.get_object_ref(ssl_name, "sslprofile", tenant=tenant) + if server_ssl.get('client_certificate_path', None): + ca_cert_obj = self.update_ca_cert_obj(pool_name, alb_config, [], tenant, prefix, + ssl_type='server_ssl', ssl_data=server_ssl, + nsxt_ip=self.nsxt_ip, ssh_root_password= ssh_root_password) + + pool[ + "ssl_key_and_certificate_ref"] = conv_utils.get_object_ref \ + (ca_cert_obj.get("name"), "sslkeyandcertificate", tenant=tenant) + + converted_alb_ssl_certs.append(ca_cert_obj) + + def update_pool_with_persistence(self, alb_pool_config, nsx_lb_config, lb_vs, pool_name, + prefix, tenant): + persis_id = lb_vs.get('lb_persistence_profile_path').split('/')[-1] + persis_config = list( + filter(lambda pp: pp["id"] == persis_id, nsx_lb_config["LbPersistenceProfiles"])) + if persis_config: + persis_name = persis_config[0]["display_name"] + if prefix: + persis_name = prefix + "-" + persis_name + if self.object_merge_check: + persis_name = self.merge_object_mapping['app_per_profile'].get(persis_name) + for pool in alb_pool_config: + if pool.get('name') == pool_name: + pool['application_persistence_profile_ref'] = conv_utils.get_object_ref \ + (persis_name, 'applicationpersistenceprofile', tenant=tenant) + + def get_vs_app_profile_ref(self, alb_profile_config, profile_name, object_merge_check, + merge_object_mapping, profile_type, tenant): + if object_merge_check: + app_profile_merge_name = merge_object_mapping['app_profile'].get(profile_name) + if app_profile_merge_name: + profile_name = app_profile_merge_name + return '/api/applicationprofile/?tenant=%s&name=%s' % (tenant, profile_name) + np_prodile_merge_name = merge_object_mapping['network_profile'].get(profile_name) + if np_prodile_merge_name: + profile_name = np_prodile_merge_name + return '/api/networkprofile/?tenant=%s&name=%s' % (tenant, profile_name) + if profile_type == "network": + return '/api/networkprofile/?tenant=%s&name=%s' % (tenant, profile_name) + return '/api/applicationprofile/?tenant=%s&name=%s' % (tenant, profile_name) + + def update_ca_cert_obj(self, name, avi_config, converted_objs, tenant, prefix, + cert_type='SSL_CERTIFICATE_TYPE_CA', ca_cert=None, + ssl_type=None, ssl_data=None, nsxt_ip=None, ssh_root_password=None): + """ + This method create the certs if certificate not present at location + it create placeholder certificate. + :return: + """ + + cert_name = [cert['name'] for cert in avi_config.get("SSLKeyAndCertificate", []) + if cert['name'].__contains__(name) and cert['type'] == cert_type] + + if cert_name: + LOG.warning( + 'SSL ca cert is already exist') + + for cert in avi_config.get("SSLKeyAndCertificate", []): + if cert['name'].__contains__(name) and cert['type'] == cert_type: + return cert + return None + + if not ca_cert: + if ssl_type == 'client_ssl': + certificate_ref = ssl_data.get("default_certificate_path", None) + elif ssl_type == 'server_ssl': + certificate_ref = ssl_data.get("client_certificate_path", None) + if certificate_ref: + certificate_ref = certificate_ref.split('/')[-1] + + key, ca_cert = get_certificate_data(certificate_ref, nsxt_ip, ssh_root_password) + + LOG.debug("Fetched data for certificate_ref {}".format(certificate_ref)) + if not ca_cert: + key, ca_cert = conv_utils.create_self_signed_cert() + name = '%s-%s' % (name, final.PLACE_HOLDER_STR) + LOG.warning('Create self certificate and key for : %s' % name) + + ssl_kc_obj = None + + if ca_cert: + cert = {"certificate": ca_cert if type(ca_cert) == str else ca_cert.decode()} + ssl_kc_obj = { + 'name': name, + 'tenant_ref': conv_utils.get_object_ref(tenant, 'tenant'), + 'key': key if type(key) == str else key.decode(), + 'certificate': cert, + 'type': 'SSL_CERTIFICATE_TYPE_VIRTUALSERVICE' + } + LOG.info('Added new ca certificate for %s' % name) + if ssl_kc_obj and self.object_merge_check: + if final.PLACE_HOLDER_STR not in ssl_kc_obj['name']: + conv_utils.update_skip_duplicates( + ssl_kc_obj, avi_config['SSLKeyAndCertificate'], + 'ssl_cert_key', converted_objs, name, None, + self.merge_object_mapping, None, prefix, + self.sys_dict['SSLKeyAndCertificate']) + else: + converted_objs.append({'ssl_cert_key': ssl_kc_obj}) + avi_config['SSLKeyAndCertificate'].append(ssl_kc_obj) + self.certkey_count += 1 + else: + converted_objs.append({'ssl_cert_key': ssl_kc_obj}) + avi_config['SSLKeyAndCertificate'].append(ssl_kc_obj) + return ssl_kc_obj + + def update_app_with_snat(self, profile_name, profile_type, alb_app_config, object_merge_check, + merge_object_mapping): + app_prof_obj = [obj for obj in alb_app_config if obj['name'] == profile_name] + if object_merge_check: + app_profile_merge_name = merge_object_mapping['app_profile'].get(profile_name) + app_prof_obj = [obj for obj in alb_app_config if obj['name'] == app_profile_merge_name] + cme = True + if profile_type == 'APPLICATION_PROFILE_TYPE_HTTP': + cme = app_prof_obj[0]['http_profile'].get( + 'connection_multiplexing_enabled', False) + app_name = profile_name + if app_prof_obj and not cme: + # Check if already cloned profile present + app_prof_cmd = [obj for obj in alb_app_config if + obj['name'] == '%s-cmd' % profile_name] + if app_prof_cmd: + app_name = app_prof_cmd[0]['name'] + app_obj_ref = conv_utils.get_object_ref( + app_name, 'applicationprofile', + tenant=conv_utils.get_name(app_prof_cmd[0]['tenant_ref'])) + else: + app_prof_cmd = copy.deepcopy(app_prof_obj[0]) + app_prof_cmd['name'] = '%s-cmd' % app_prof_cmd['name'] + if 'http_profile' in app_prof_cmd: + app_prof_cmd['http_profile'][ + 'connection_multiplexing_enabled'] = False + app_prof_cmd["preserve_client_ip"] = True + alb_app_config.append(app_prof_cmd) + app_name = app_prof_cmd['name'] + app_obj_ref = conv_utils.get_object_ref( + app_name, 'applicationprofile', + tenant=conv_utils.get_name(app_prof_cmd['tenant_ref'])) + conv_status = conv_utils.get_conv_status_by_obj_name(profile_name) + conv_utils.add_conv_status('applicationprofile', "LBHttpProfile", + app_prof_cmd['name'], conv_status, + [{'application_http_profile': app_prof_cmd}]) + return app_name + + def get_ca_cert(self, ca_url, nsxt_ip, ssh_root_password): + if ca_url: + certificate_ref = ca_url[0].split('/')[-1] + + key, ca_cert = get_certificate_data(certificate_ref, nsxt_ip, ssh_root_password) + LOG.debug("Fetched ca cert data for certificate_ref".format(certificate_ref)) + if not ca_cert: + key, ca_cert = conv_utils.create_self_signed_cert() + + return ca_cert + + def get_crl_cert(self, crl_url): + crl_id = crl_url[0].split("/")[-1] + "-CRL-Certificates" + return crl_id + + def update_app_with_pki(self, profile_name, app_config, pki_name, tenant): + for profile in app_config: + if profile_name == profile["name"]: + profile["pki_profile_ref"] = '/api/pkiprofile/?tenant=%s&name=%s' % (tenant, pki_name) + + def update_pool_with_pki(self, pool_config, pool_name, pki_name, tenant): + for pool in pool_config: + if pool_name == pool["name"]: + pool["pki_profile_ref"] = '/api/pkiprofile/?tenant=%s&name=%s' % (tenant, pki_name) + + def update_pool_with_app_attr(self, profile_name, pool_name, alb_config): + profile = [profile for profile in alb_config["ApplicationProfile"] + if profile["name"] == profile_name] + if profile: + for pool in alb_config["Pool"]: + if pool["name"] == pool_name: + pool["server_timeout"] = profile[0].get("response_timeout", 0) + if profile[0].get("http_redirect_to"): + pool["fail_action"] = dict( + type="FAIL_ACTION_HTTP_REDIRECT", + redirect=dict( + host=profile[0]["http_redirect_to"] + ) + ) + break + + def update_pool_with_subnets(self, pool_name, pool_segment, alb_pl, old_pool_name, cloud_name, cloud_type, tenant): + + pool_present = False + for pool in alb_pl: + if pool["name"] == pool_name: + pool_obj = pool + pool_present = True + elif pool["name"] == old_pool_name: + pool_obj = copy.deepcopy(pool) + pool_obj["name"] = pool_name + else: + continue + if cloud_type == "Vlan": + pool_obj["placement_networks"] = list() + for sub in pool_segment: + ip_addreses = dict( + addr=sub["subnets"]["network_range"].split("/")[0], + type="V4" + ) + subnets = dict( + subnet={ + "ip_addr": ip_addreses, + "mask": sub["subnets"]["network_range"].split("/")[-1] + }, + network_ref=conv_utils.get_object_ref( + sub["seg_name"], 'network', tenant="admin", cloud_name=cloud_name) + ) + pool_obj["placement_networks"].append(subnets) + if not pool_present: + alb_pl.append(pool_obj) + conv_status = conv_utils.get_conv_status_by_obj_name(old_pool_name) + conv_utils.add_conv_status( + 'pool', None, pool_obj["name"], conv_status, + {'pools': [pool_obj]}) + break + + def create_pool_group(self, cloud_name, pg_obj, alb_config, lb_pool, vs_name, backup_pool=None, sorry_pool=None, + sry_pool_present=False, tenant="admin"): + + if backup_pool: + alb_pool_config = [pl for pl in alb_config["Pool"] if pl["name"] == backup_pool] + suffix = "backup_pool" + elif sorry_pool: + alb_pool_config = [pl for pl in alb_config["Pool"] if pl["name"] == sorry_pool] + suffix = "sorry_pool" + new_pool_config = [] + if alb_pool_config[0]["name"] in pool_attached_with_vs_poolref: + new_pool_config = copy.deepcopy(alb_pool_config[0]) + pool_bmd = [] + pool_bme = [] + for member in alb_pool_config[0].get("servers"): + if member.get("backup_member"): + pool_bme.append(member) + else: + pool_bmd.append(member) + if pool_bme and pool_bmd: + new_bme_pool = copy.deepcopy(alb_pool_config[0]) + new_bme_pool["name"] = new_bme_pool["name"] + "-" + suffix + new_bme_pool["servers"] = pool_bme + conv_status = conv_utils.get_conv_status_by_obj_name(alb_pool_config[0]["name"]) + conv_utils.add_conv_status( + 'pool', None, new_bme_pool['name'], conv_status, + {'pools': [new_bme_pool]}) + alb_config["Pool"].append(new_bme_pool) + pool_attached_with_poolgroup.append(new_bme_pool['name']) + if suffix == "backup_pool": + bmd_priority = "3" + bme_priority = "2" + else: + bmd_priority = "1" + bme_priority = "0" + pg_obj["members"].append(dict( + ratio="1", + priority_label=bme_priority, + pool_ref=conv_utils.get_object_ref( + new_bme_pool["name"], 'pool', tenant=tenant, cloud_name=cloud_name) + )) + if new_pool_config: + new_pool_config["name"] = new_pool_config["name"] + "-" + vs_name + new_pool_config["servers"] = pool_bmd + conv_utils.add_conv_status( + 'pool', None, new_pool_config['name'], conv_status, + {'pools': [new_pool_config]}) + alb_config["Pool"].append(new_pool_config) + pg_obj["members"].append(dict( + ratio="1", + priority_label=bmd_priority, + pool_ref=conv_utils.get_object_ref( + new_pool_config["name"], 'pool', tenant=tenant, cloud_name=cloud_name) + ) + ) + pool_attached_with_poolgroup.append(new_pool_config["name"]) + else: + alb_pool_config[0]["servers"] = pool_bmd + pg_obj["members"].append(dict( + ratio="1", + priority_label=bmd_priority, + pool_ref=conv_utils.get_object_ref( + alb_pool_config[0]["name"], 'pool', tenant=tenant, cloud_name=cloud_name) + ) + ) + pool_attached_with_poolgroup.append(alb_pool_config[0]["name"]) + + elif pool_bme: + if suffix == "backup_pool": + priority = "2" + else: + priority = "0" + if new_pool_config: + new_pool_config["name"] = new_pool_config["name"] + "-" + vs_name + new_pool_config["servers"] = pool_bme + conv_status = conv_utils.get_conv_status_by_obj_name(alb_pool_config[0]["name"]) + conv_utils.add_conv_status( + 'pool', None, new_pool_config['name'], conv_status, + {'pools': [new_pool_config]}) + alb_config["Pool"].append(new_pool_config) + pg_obj["members"].append(dict( + ratio="1", + priority_label=priority, + pool_ref=conv_utils.get_object_ref( + new_pool_config["name"], 'pool', tenant=tenant, cloud_name=cloud_name) + )) + pool_attached_with_poolgroup.append(new_pool_config["name"]) + else: + alb_pool_config[0]["servers"] = pool_bme + pg_obj["members"].append(dict( + ratio="1", + priority_label=priority, + pool_ref=conv_utils.get_object_ref( + alb_pool_config[0]["name"], 'pool', tenant=tenant, cloud_name=cloud_name) + )) + pool_attached_with_poolgroup.append(alb_pool_config[0]["name"]) + else: + if sry_pool_present: + if suffix == "backup_pool": + priority = "3" + else: + priority = "1" + if new_pool_config: + new_pool_config["name"] = new_pool_config["name"] + "-" + vs_name + new_pool_config["servers"] = pool_bmd + conv_status = conv_utils.get_conv_status_by_obj_name(alb_pool_config[0]["name"]) + conv_utils.add_conv_status( + 'pool', None, new_pool_config['name'], conv_status, + {'pools': [new_pool_config]}) + alb_config["Pool"].append(new_pool_config) + pg_obj["members"].append(dict( + ratio="1", + priority_label=priority, + pool_ref=conv_utils.get_object_ref( + new_pool_config["name"], 'pool', tenant=tenant, cloud_name=cloud_name) + )) + pool_attached_with_poolgroup.append(new_pool_config["name"]) + else: + alb_pool_config[0]["servers"] = pool_bmd + pg_obj["members"].append(dict( + ratio="1", + priority_label=priority, + pool_ref=conv_utils.get_object_ref( + alb_pool_config[0]["name"], 'pool', tenant=tenant, cloud_name=cloud_name) + )) + pool_attached_with_poolgroup.append(alb_pool_config[0]["name"]) + + def add_placement_network_to_vip(self, vip_config, vip_segments, tenant, cloud_name): + vip_config[0]['placement_networks'] = list() + for sub in vip_segments: + ip_addreses = dict( + addr=sub["subnets"]["network_range"].split("/")[0], + type="V4" + ) + subnets = dict( + subnet={ + "ip_addr": ip_addreses, + "mask": sub["subnets"]["network_range"].split("/")[-1] + }, + network_ref=conv_utils.get_object_ref( + sub["seg_name"], 'network', tenant="admin", cloud_name=cloud_name) + ) + vip_config[0]['placement_networks'].append(subnets) + + def get_persist_ref(self, nsx_vs): + """ + + :param nsx_vs: parsed nsx vs dict + :return: + """ + persist_ref = nsx_vs.get("lb_persistence_profile_path", None) + if persist_ref: + persist_ref = nsx_vs['lb_persistence_profile_path'].split('/')[-1] + return persist_ref + + def add_placement_network_to_pool_group(self, pool_group_ref, pool_segment, alb_config, cloud_name, tenant): + pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == pool_group_ref] + if pool_group: + pool_group = pool_group[0] + for member in pool_group['members']: + pool_name = conv_utils.get_name(member['pool_ref']) + self.add_placement_network_to_pool( + alb_config['Pool'], pool_name, pool_segment, cloud_name, tenant) + + def add_placement_network_to_pool(self, avi_pool_list, pool_ref, pool_segment, cloud_name, + tenant='admin'): + """ + :param avi_pool_list: List of pools to search pool object + :param pool_ref: name of the pool + """ + for pool_obj in avi_pool_list: + if pool_ref == pool_obj["name"]: + pool_obj["placement_networks"] = list() + for sub in pool_segment: + ip_addreses = dict( + addr=sub["subnets"]["network_range"].split("/")[0], + type="V4" + ) + subnets = dict( + subnet={ + "ip_addr": ip_addreses, + "mask": sub["subnets"]["network_range"].split("/")[-1] + }, + network_ref=conv_utils.get_object_ref( + sub["seg_name"], 'network', tenant="admin", cloud_name=cloud_name) + ) + pool_obj["placement_networks"].append(subnets) + break + + def get_name(self, url): + + return url.split('/')[-1] + + def add_poolgroup_with_persistence(self, alb_config, nsx_lb_config, lb_vs, pool_group_ref, prefix, cloud_name, + tenant): + pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == pool_group_ref] + if pool_group: + pool_group = pool_group[0] + for member in pool_group['members']: + pool_name = conv_utils.get_name(member['pool_ref']) + self.add_pool_with_persistence(alb_config, nsx_lb_config, lb_vs, pool_name, prefix, cloud_name,tenant) + + def add_pool_with_persistence(self, alb_config, nsx_lb_config, lb_vs, pool_name, prefix, cloud_name, tenant): + persis_id = lb_vs.get('lb_persistence_profile_path').split('/')[-1] + persis_config = list( + filter(lambda pp: pp["id"] == persis_id, nsx_lb_config["LbPersistenceProfiles"])) + if persis_config: + persis_name = persis_config[0]["display_name"] + if prefix: + persis_name = prefix + "-" + persis_name + if self.object_merge_check: + persis_name = self.merge_object_mapping['app_per_profile'].get(persis_name) + for pool in alb_config['Pool']: + if pool.get('name') == pool_name: + pool['application_persistence_profile_ref'] = conv_utils.get_object_ref \ + (persis_name, 'applicationpersistenceprofile', tenant=tenant) + break + + def add_pki_to_pool_group(self, alb_config, pool_group_name, pki_name, tenant): + pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == pool_group_name] + if pool_group: + pool_group = pool_group[0] + for member in pool_group['members']: + pool_name = conv_utils.get_name(member['pool_ref']) + self.add_pki_to_pool(alb_config, pool_name, pki_name, tenant) + + def add_pki_to_pool(self, alb_config, pool_name, pki_name, tenant): + for pool in alb_config['Pool']: + if pool_name == pool["name"]: + pool["pki_profile_ref"] = '/api/pkiprofile/?tenant=%s&name=%s' % (tenant, pki_name) + break + + def add_port_to_pool_group(self, pg_ref, alb_config, lb_vs): + pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == pg_ref] + if pool_group: + pool_group = pool_group[0] + for member in pool_group['members']: + pool_name = conv_utils.get_name(member['pool_ref']) + self.add_port_to_pool(pool_name, alb_config, lb_vs) + + def add_port_to_pool(self, pool_name, alb_config, lb_vs): + for pool in alb_config['Pool']: + if pool_name == pool["name"]: + pool['default_port'] = int(lb_vs.get['default_pool_member_ports'][0]) + break + + def add_teir_to_poolgroup(self, pg_ref, alb_config, tier1_lr): + pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == pg_ref] + if pool_group: + pool_group = pool_group[0] + pool_group['tier1_lr'] = tier1_lr + for member in pool_group['members']: + pool_name = conv_utils.get_name(member['pool_ref']) + self.add_tier_to_pool(pool_name, alb_config, tier1_lr) + + def add_tier_to_pool(self, pool_name, alb_config, tier1_lr): + for pool in alb_config['Pool']: + if pool_name == pool["name"]: + pool['tier1_lr'] = tier1_lr + break + + def update_poolgroup_with_cloud(self, pg_ref, alb_config, cloud_name, tenant, cloud_tenant): + pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == pg_ref] + if pool_group: + pool_group = pool_group[0] + pool_group['cloud_ref'] = '/api/cloud/?name=%s' % (cloud_name) + for member in pool_group['members']: + pool_name = conv_utils.get_name(member['pool_ref']) + member['pool_ref'] = conv_utils.get_object_ref(pool_name, 'pool', tenant=tenant, cloud_name=cloud_name) + self.update_pool_with_cloud(pool_name, alb_config, cloud_name, tenant, cloud_tenant) + + def update_pool_with_cloud(self, pool_name, alb_config, cloud_name, tenant, cloud_tenant): + for pool in alb_config['Pool']: + if pool_name == pool["name"]: + pool['cloud_ref'] = conv_utils.get_object_ref(cloud_name, 'cloud', cloud_tenant=cloud_tenant) + break + + def add_sorry_pool_member_to_poolgroup(self, alb_config, main_pg_ref, sry_pg_ref): + pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == main_pg_ref] + pool_group = pool_group[0] + sry_pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == sry_pg_ref] + sry_pool_group = sry_pool_group[0] + for sry_member in sry_pool_group['members']: + pool_group['members'].append(sry_member) + + def attach_pool_to_sry_pool_group(self, alb_config, main_pool_ref, sorry_pg_ref, tenant, cloud_name): + sry_pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == sorry_pg_ref] + sry_pool_group = sry_pool_group[0] + pool_obj = [obj for obj in alb_config['Pool'] + if obj['name'] == main_pool_ref] + pool_obj = pool_obj[0] + + pool_member = dict( + ratio="1", + priority_label='3', + pool_ref=conv_utils.get_object_ref(main_pool_ref,'pool',tenant=tenant,cloud_name=cloud_name) + ) + sry_pool_group['members'].append(pool_member) + + + + def update_poolgroup_with_ssl(self, alb_config, nsx_lb_config, lb_vs, pg_name, + prefix, tenant, + converted_alb_ssl_certs,ssh_root_password): + + pool_group = [obj for obj in alb_config['PoolGroup'] + if obj['name'] == pg_name] + if pool_group: + pool_group = pool_group[0] + for member in pool_group['members']: + pool_name = conv_utils.get_name(member['pool_ref']) + self.add_ssl_to_pool(alb_config, nsx_lb_config, lb_vs, pool_name, + prefix, tenant, converted_alb_ssl_certs, ssh_root_password) + + def add_ssl_to_pool(self, alb_config, nsx_lb_config, lb_vs, pool_name, + prefix, tenant, converted_alb_ssl_certs,ssh_root_password): + for pool in alb_config['Pool']: + if pool.get('name') == pool_name: + server_ssl = lb_vs['server_ssl_profile_binding'] + if server_ssl.get('ssl_profile_path'): + ssl_ref_id = server_ssl['ssl_profile_path'].split('/')[-1] + ssl_config = list( + filter(lambda ssl: ssl["id"] == ssl_ref_id, nsx_lb_config["LbServerSslProfiles"])) + ssl_name = ssl_config[0]["display_name"] + if prefix: + ssl_name = prefix + '-' + ssl_name + if self.object_merge_check: + ssl_merge_name = self.merge_object_mapping['ssl_profile'].get(ssl_name) + if ssl_merge_name: + ssl_name = ssl_merge_name + pool['ssl_profile_ref'] = conv_utils.get_object_ref(ssl_name, "sslprofile", tenant=tenant) + if server_ssl.get('client_certificate_path', None): + ca_cert_obj = self.update_ca_cert_obj(pool_name, alb_config, [], tenant, prefix, + ssl_type='server_ssl', ssl_data=server_ssl, + nsxt_ip=self.nsxt_ip, ssh_root_password=ssh_root_password) + + pool[ + "ssl_key_and_certificate_ref"] = conv_utils.get_object_ref \ + (ca_cert_obj.get("name"), "sslkeyandcertificate", tenant=tenant) + + converted_alb_ssl_certs.append(ca_cert_obj) + + break + + def update_ssl_key_refernce(self, alb_config): + for pool in alb_config['Pool']: + if pool.get('ssl_key_and_certificate_ref'): + ssl_key_name = pool['ssl_key_and_certificate_ref'].split('name=')[-1] + ssl_key_ref = pool['ssl_key_and_certificate_ref'].split(ssl_key_name)[0] + ssl_key_name = self.merge_object_mapping['ssl_cert_key'].get(ssl_key_name) + pool['ssl_key_and_certificate_ref'] = ssl_key_ref + ssl_key_name + + for vs in alb_config['VirtualService']: + if vs.get('ssl_key_and_certificate_refs'): + vs_ssl_list = vs.get('ssl_key_and_certificate_refs') + for index, vs_ssl in enumerate(vs_ssl_list): + ssl_key_name = vs_ssl.split('name=')[-1] + ssl_key_ref = vs_ssl.split(ssl_key_name)[0] + ssl_key_name = self.merge_object_mapping['ssl_cert_key'].get(ssl_key_name) + vs_ssl_list[index] = ssl_key_ref + ssl_key_name + + def update_pki_refernce(self, alb_config): + for pool in alb_config['Pool']: + if pool.get('pki_profile_ref'): + pki_name = pool['pki_profile_ref'].split('name=')[-1] + pki_profile_ref = pool['pki_profile_ref'].split(pki_name)[0] + pki_name = self.merge_object_mapping['pki_profile'].get(pki_name) + pool['pki_profile_ref'] = pki_profile_ref + pki_name + + for app in alb_config['ApplicationProfile']: + if app.get('pki_profile_ref'): + pki_name = app['pki_profile_ref'].split('name=')[-1] + pki_profile_ref = app['pki_profile_ref'].split(pki_name)[0] + pki_name = self.merge_object_mapping['pki_profile'].get(pki_name) + app['pki_profile_ref'] = pki_profile_ref + pki_name + + for app in alb_config['NetworkProfile']: + if app.get('pki_profile_ref'): + pki_name = app['pki_profile_ref'].split('name=')[-1] + pki_profile_ref = app['pki_profile_ref'].split(pki_name)[0] + pki_name = self.merge_object_mapping['pki_profile'].get(pki_name) + app['pki_profile_ref'] = pki_profile_ref + pki_name diff --git a/python/avi/migrationtools/nsxt_discovery.py b/python/avi/migrationtools/nsxt_discovery.py new file mode 100644 index 0000000000..e4a938b652 --- /dev/null +++ b/python/avi/migrationtools/nsxt_discovery.py @@ -0,0 +1,442 @@ +import argparse +import json +import os +from datetime import datetime + +import xlsxwriter + +from avi.migrationtools.nsxt_converter import nsxt_client as nsx_client_util +import pprint + +from avi.sdk.avi_api import ApiSession + +pp = pprint.PrettyPrinter(indent=4) + + +def get_name_and_entity(url): + """ + Parses reference string to extract object type and + :param url: reference url to be parsed + :return: entity and object name + """ + parsed = url.split('/') + return parsed[-2], parsed[-1] + + +class NSXDiscoveryConv(): + nsx_api_client = None + + def __init__(self, nsx_un, nsx_pw, nsx_ip, nsx_port, c_pw, c_ip, c_un, c_vr): + self.nsx_api_client = nsx_client_util.create_nsx_policy_api_client( + nsx_un, nsx_pw, nsx_ip, nsx_port, auth_type=nsx_client_util.BASIC_AUTH) + self.session = ApiSession.get_session(c_ip, c_un, c_pw, tenant="admin", api_version=c_vr) + self.cloud = self.session.get("cloud/").json()["results"] + self.avi_vs_object = [] + self.avi_object_temp = {} + self.avi_pool_object = [] + self.enabled_pool_list = [] + self.lb_services = {} + + def get_cloud_type(self,avi_cloud_list, tz_id): + for cl in avi_cloud_list: + if cl.get("vtype") == "CLOUD_NSXT": + if cl.get("nsxt_configuration"): + if cl["nsxt_configuration"].get("transport_zone"): + tz = cl["nsxt_configuration"].get("transport_zone") + elif cl["nsxt_configuration"].get("management_network_config"): + tz = cl["nsxt_configuration"]["management_network_config"].get("transport_zone") + if tz == tz_id: + return cl.get("name") + + return "Cloud Not Found" + + def get_lb_services_details(self): + lb_services = self.nsx_api_client.infra.LbServices.list().to_dict().get('results', []) + for lb in lb_services: + tier = get_name_and_entity(lb["connectivity_path"])[-1] + ls_id = self.nsx_api_client.infra.tier_1s.LocaleServices.list(tier).results[0].id + interface_list = self.nsx_api_client.infra.tier_1s.locale_services.Interfaces.list(tier, ls_id).results + network = None + tz_id = None + if len(interface_list): + interface = interface_list[0].id + segment_id = get_name_and_entity(interface_list[0].segment_path)[-1] + segment = self.nsx_api_client.infra.Segments.get(segment_id) + tz_path = segment.transport_zone_path + tz_id = get_name_and_entity(tz_path)[-1] + cloud_name = self.get_cloud_type(self.cloud, tz_id) + if hasattr(segment, "vlan_ids"): + network = "Vlan" + else: + network = "Overlay" + else: + segment_list = self.nsx_api_client.infra.Segments.list().to_dict().get('results', []) + for seg in segment_list: + if seg.get("connectivity_path"): + gateway_name = get_name_and_entity(seg["connectivity_path"])[-1] + if gateway_name == tier: + tz_path = seg.get("transport_zone_path") + tz_id = get_name_and_entity(tz_path)[-1] + if seg.get("vlan_ids"): + network = "Vlan" + else: + network = "Overlay" + cloud_name = self.get_cloud_type(self.cloud, tz_id) + self.lb_services[lb["id"]] ={ + "Network": network, + "Cloud": cloud_name} + + def get_all_virtual_service(self): + """ + :return:list of virtual server objects + """ + virtual_services = self.nsx_api_client.infra.LbVirtualServers.list().to_dict().get('results', []) + return virtual_services + + def get_all_pool(self): + """ + returns the list of all pools + """ + pool = self.nsx_api_client.infra.LbPools.list().to_dict().get("results", []) + return pool + + def get_inventory(self): + self.get_lb_services_details() + # lb_vs_config = lb_vs_config["LbVirtualServers"] + virtual_service = self.get_all_virtual_service() + vs_stats = dict() + vs_with_rules = 0 + normal_vs = 0 + enab_vs = 0 + disab_vs = 0 + vs_stats["vs_count"] = len(virtual_service) + for vs in virtual_service: + vs_object = { + 'name': vs["display_name"], + 'id':vs["id"] + } + lb = get_name_and_entity(vs["lb_service_path"])[-1] + lb_details = self.lb_services.get(lb) + vs_object["Network_type"] = lb_details.get("Network") + vs_object["Cloud"] = lb_details.get("Cloud") + if vs["enabled"]: + vs_object["enabled"] = True + else: + vs_object["enabled"] = False + if vs.get('pool_path'): + pool = vs.get("pool_path") + pool_partition, pool_name = get_name_and_entity(pool) + if pool_name: + vs_object['pool'] = { + 'name': pool_name + } + self.enabled_pool_list.append(pool_name) + pool_obj = self.nsx_api_client.infra.LbPools.get(pool_name) + vs_object["pool"]["pool_id"] = pool_obj.id + if pool_obj.active_monitor_paths: + health_monitors = [ + get_name_and_entity(monitors)[1] + for monitors in pool_obj.active_monitor_paths + if monitors + ] + if health_monitors: + vs_object['pool']['health_monitors'] = \ + health_monitors + if pool_obj.members: + members = [ + { + 'name': pool_member.display_name, + 'address': pool_member.ip_address, + 'state': pool_member.admin_state + } + for pool_member in + pool_obj.members if pool_member + ] + if members: + vs_object['pool']['members'] = members + if vs_object["enabled"]: + vs_object['pool']["vs_enabled"] = vs_object["enabled"] + if vs.get("application_profile_path"): + profile_name = get_name_and_entity(vs["application_profile_path"])[1] + vs_object["profiles"] = profile_name + prof_obj_list = self.nsx_api_client.infra.LbAppProfiles.list().to_dict().get("results", []) + prof_obj = [prof for prof in prof_obj_list if prof["display_name"] == profile_name] + prof_type = prof_obj[0].get("resource_type") + if prof_type == "LBHttpProfile": + vs_type = "L7" + else: + vs_type = "L4" + vs_object["vs_type"] = vs_type + + if vs.get('rules'): + vs_object["rules"] = True + vs_with_rules += 1 + else: + vs_object["rules"] = False + normal_vs += 1 + if vs.get("enabled"): + enab_vs += 1 + else: + disab_vs += 1 + self.avi_object_temp[vs_object['name']] = vs_object + self.avi_vs_object.append(self.avi_object_temp) + vs_stats["complex_vs"] = vs_with_rules + vs_stats["normal_vs"] = normal_vs + vs_stats["enabled_vs"] = enab_vs + vs_stats["disabled_vs"] = disab_vs + print(vs_stats) + + def get_pool_details(self): + temp_pool_list = {} + pool_list = self.get_all_pool() + for pool in pool_list: + pool_obj = { + 'name': pool["display_name"], + 'id': pool["id"] + } + if pool["display_name"] in self.enabled_pool_list: + pool_obj["enabled"] = "connected" + else: + pool_obj["disabled"] = "disconnected" + temp_pool_list[pool_obj["name"]] = pool_obj + self.avi_pool_object.append(temp_pool_list) + + def write_output(self, path, nsx_ip): + # Print the Summary + workbook = xlsxwriter.Workbook( + path + os.sep + '{}_discovery_data.xlsx'.format(nsx_ip)) + + bold = workbook.add_format({'bold': True}) + deactivated = workbook.add_format({'font_color': 'red'}) + enabled = workbook.add_format({'font_color': 'green'}) + + large_heading = workbook.add_format({'bold': True, 'font_size': '20'}) + large_heading.set_align('center') + + worksheet_summary = workbook.add_worksheet('Summary') + worksheet_summary.merge_range(3, 4, 3, 7, 'Summary', large_heading) + worksheet_summary.set_row(3, 40) + worksheet_summary.set_column(5, 6, width=24) + + worksheet_summary.write(5, 5, "Ip Address", bold) + worksheet_summary.write(5, 6, str(nsx_ip)) + + worksheet_summary.write(6, 5, "Created on", bold) + worksheet_summary.write(6, 6, str(datetime.now()).split('.')[0]) + + total_vs = total_pools = total_enabled_vs = total_enabled_pools = total_complex_vs = 0 + total_disabled_pools = 0 + total_disabled_vs = 0 + total_vs_in_vlan = 0 + total_vs_in_overlay = 0 + total_l4_vs = 0 + total_l7_vs = 0 + + obj_data = self.avi_vs_object[0] + total_input = self.avi_vs_object + pool_obj_data = self.avi_pool_object[0] + pool_list = [] + vs_list = [] + + for vs in obj_data.keys(): + total_vs = total_vs + 1 + vsval = obj_data[vs] + if vsval.get("rules"): + total_complex_vs += 1 + if vsval.get("vs_type") == "L4": + total_l4_vs += 1 + else: + total_l7_vs += 1 + if vsval.get('pool'): + if vsval['pool'].get('members'): + pool_details = vsval['pool']['members'][0] + pool_list.append({ + 'name': vsval["pool"]['name'], + 'status': pool_details.get('state'), + 'vs_enabled': vsval["enabled"], + "id": vsval["pool"]["pool_id"] + }) + else: + pool_list.append({ + 'name': vsval["pool"]['name'], + 'status': vsval["enabled"], + 'vs_enabled': vsval["enabled"], + "id": vsval["pool"]["pool_id"] + }) + + + worksheet = workbook.add_worksheet('VS') + worksheet_pool = workbook.add_worksheet('Pools') + + # writing pools + row = 0 + col = 1 + worksheet_pool.write('A1', 'Name', bold) + worksheet_pool.write('B1', "Enabled", bold) + worksheet_pool.write('C1', 'Status', bold) + for pool in pool_obj_data: + total_pools += 1 + pool_val = pool_obj_data[pool] + row = row + 1 + worksheet_pool.write(row, 0, pool_val['name'], bold) + if pool_val.get("enabled"): + worksheet_pool.write(row, 1, pool_val['enabled'], enabled) + elif pool_val.get("disabled"): + worksheet_pool.write(row, 1, pool_val['disabled'], deactivated) + pool_status = self.nsx_api_client.infra.realized_state.RealizedEntities. \ + list(intent_path="/infra/lb-pools/" + pool_val["id"]).to_dict()["results"][0]["runtime_status"] + if pool_status == "UP": + worksheet_pool.write(row, 2, pool_status, enabled) + else: + worksheet_pool.write(row, 2, pool_status, deactivated) + if pool_status == "UP" and pool_val.get("enabled"): + total_enabled_pools += 1 + else: + total_disabled_pools += 1 + col += 1 + + row, col = 0, 1 + + + # write vs details + worksheet.write('A1', 'Name', bold) + worksheet.write('B1', 'Enabled', bold) + worksheet.write('C1', "Type", bold) + worksheet.write('D1', "Complexity", bold) + worksheet.write('E1', 'Status', bold) + worksheet.write("F1", "Network", bold) + worksheet.write("G1", "Cloud", bold) + init = 0 + for vs in obj_data.keys(): + row += 1 + vsval = obj_data[vs] + vs_id = vsval["id"] + vs_name = vsval["name"] + worksheet.write(row, 0, vs_name, bold) + status = vsval["enabled"] + v = "N" + if status: + v = "Y" + worksheet.write(row, 1, v, enabled) + else: + worksheet.write(row, 1, v, deactivated) + worksheet.write(row, 2, vsval["vs_type"]) + complexity = "Basic" + if vsval.get("rules"): + complexity = "Advanced" + worksheet.write(row, 3, complexity) + vs_status = self.nsx_api_client.infra.realized_state.RealizedEntities. \ + list(intent_path="/infra/lb-virtual-servers/" + vs_id).to_dict()["results"][0]["runtime_status"] + if vs_status == "UP": + worksheet.write(row, 4, vs_status, enabled) + elif vs_status == "DISABLED": + worksheet.write(row, 4, "DEACTIVATED", deactivated) + else: + worksheet.write(row, 4, vs_status, deactivated) + if vs_status == "UP" and v == "Y": + total_enabled_vs += 1 + else: + total_disabled_vs += 1 + network = vsval.get("Network_type") + worksheet.write(row, 5, network) + if network == "Vlan": + total_vs_in_vlan += 1 + if network == "Overlay": + total_vs_in_overlay += 1 + cloud = vsval.get("Cloud") + worksheet.write(row, 6, cloud) + + # adding some more summary + worksheet_summary.write(9, 5, "Total vs", bold) + worksheet_summary.write(9, 6, str(total_vs)) + + worksheet_summary.write(10, 5, "Total vs UP", bold) + worksheet_summary.write(10, 6, str(total_enabled_vs)) + + worksheet_summary.write(11, 5, "Total pools", bold) + worksheet_summary.write(11, 6, str(total_pools)) + + worksheet_summary.write(12, 5, "Total pools UP", bold) + worksheet_summary.write(12, 6, str(total_enabled_pools)) + + worksheet_summary.write(13, 5, "Total complex vs", bold) + worksheet_summary.write(13, 6, str(total_complex_vs)) + + worksheet_summary.write(14, 5, "Total l4 vs", bold) + worksheet_summary.write(14, 6, str(total_l4_vs)) + + worksheet_summary.write(15, 5, "Total l7 vs", bold) + worksheet_summary.write(15, 6, str(total_l7_vs)) + + worksheet_summary.write(16, 5, "Total vs in VLAN", bold) + worksheet_summary.write(16, 6, str(total_vs_in_vlan)) + + worksheet_summary.write(17, 5, "Total vs in OVERLAY", bold) + worksheet_summary.write(17, 6, str(total_vs_in_overlay)) + + print("====================") + print(" Summary") + print("====================") + print("Total vs: ", total_vs) + print("Total vs UP: ", total_enabled_vs) + print("Total pools: ", total_pools) + print("Total pools UP: ", total_enabled_pools) + print("Total complex vs: ", total_complex_vs) + print("Total l4 vs: ", total_l4_vs) + print("Total l7 vs: ", total_l7_vs) + print("Total vs in VLAN", total_vs_in_vlan) + print("Total vs in OVERLAY", total_vs_in_overlay) + + print("--------------------") + + workbook.close() + + +if __name__ == '__main__': + HELP_STR = ''' + Nsx instance Discovery + Example to get the inventory of NSX instance: + nsxt_discovery.py --nsx_ip xxx.xxx.xxx.xxx --nsx_user + admin --nsx_password xxxxx -o output/ + ''' + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter, + description=HELP_STR) + parser.add_argument('--nsx_user', help='nsx host username') + parser.add_argument('--nsx_ip', help='host ip of nsx instance') + parser.add_argument('--nsx_password', help='nsx host password') + parser.add_argument('-o', '--output_file_path', + help='Folder path for output files to be created in', + ) + parser.add_argument('-port', '--nsx_port', default=443, + help='NSX-T Port') + parser.add_argument('--password', + help='controller password') + parser.add_argument('-c', '--controller_ip', + help='controller ip ') + parser.add_argument('--user', + help='controller username ') + parser.add_argument("--controller_version" , help="version of controller", default='17.2.1') + + args = parser.parse_args() + if not args.nsx_ip: + print('Please provide nsx host') + exit(0) + if not args.nsx_user: + print('Please provide ssh username of nsx host') + exit(0) + if not args.nsx_password: + print('Please provide ssh password of nsx host') + exit(0) + + if not os.path.isdir(args.output_file_path): + print("Creating output directory ...") + os.makedirs(args.output_file_path) + + nsx_inventory = NSXDiscoveryConv(args.nsx_user, args.nsx_password, args.nsx_ip, args.nsx_port,\ + args.password, args.controller_ip, args.user, args.controller_version) + nsx_inventory.get_inventory() + nsx_inventory.get_pool_details() + nsx_inventory.write_output( + args.output_file_path, args.nsx_ip) diff --git a/python/avi/migrationtools/vs_filter.py b/python/avi/migrationtools/vs_filter.py index 44d4d86c8d..37d13a1321 100644 --- a/python/avi/migrationtools/vs_filter.py +++ b/python/avi/migrationtools/vs_filter.py @@ -19,7 +19,7 @@ warning_list = [] -def filter_for_vs(avi_config, vs_names): +def filter_for_vs(avi_config, vs_names, prefix=None): """ Filters vs and its references from full configuration :param avi_config: full configuration @@ -35,6 +35,9 @@ def filter_for_vs(avi_config, vs_names): virtual_services = vs_names for vs_name in virtual_services: + if prefix: + if not vs_name.startswith(prefix): + vs_name = prefix+"-"+vs_name vs = [vs for vs in avi_config['VirtualService'] if vs['name'] == vs_name] if not vs: @@ -67,7 +70,7 @@ def search_obj(entity, name, new_config, avi_config, depth): found_obj = [obj for obj in found_obj_list if obj['name'] == name] if found_obj: found_obj = found_obj[0] - print(' | '*depth + '|- %s(%s)' % (name, path_key_map[entity])) + print('| '*depth + '|- %s(%s)' % (name, path_key_map[entity])) elif entity in ['applicationprofile', 'networkprofile', 'healthmonitor', 'sslkeyandcertificate', 'sslprofile']: if str.startswith(str(name), 'System-'):