From 4213d2ec7aec2ae5279a022be39598a08af187b7 Mon Sep 17 00:00:00 2001 From: uttampawar Date: Fri, 29 Sep 2017 11:20:24 -0700 Subject: [PATCH] Revert " Adding support for Node-DC-SSR" --- Node-DC-EIS-client/README.md | 39 +-- Node-DC-EIS-client/config-ssr.json | 29 -- Node-DC-EIS-client/node_dc_eis_testurls.py | 83 +++--- .../process_time_based_output.py | 54 ++-- Node-DC-EIS-client/runspec.py | 260 ++++++------------ Node-DC-EIS-client/util.py | 11 +- 6 files changed, 162 insertions(+), 314 deletions(-) delete mode 100644 Node-DC-EIS-client/config-ssr.json diff --git a/Node-DC-EIS-client/README.md b/Node-DC-EIS-client/README.md index af3c0a2..0aad66a 100644 --- a/Node-DC-EIS-client/README.md +++ b/Node-DC-EIS-client/README.md @@ -1,4 +1,4 @@ -Copyright (c) 2016 Intel Corporation +Copyright (c) 2016 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -10,29 +10,30 @@ Copyright (c) 2016 Intel Corporation See the License for the specific language governing permissions and limitations under the License. -# This client directory contains, -- runspec.py- A toplevel runspec script; the main script to launch workload. - - 2 modes: time based run which is the default(1) and request based run (2) -- node_els-testurls.py - The benchmark driver file which sends actual requests using python's requests module. -- config.json - The input client configuration file. This file is used by runspec.py when run with -f option -- config-ssr.json - The input client configuration file for SSR workloads. This file is used by runspec.py when run with -f option -- process_time_based_output.py - Script to post process and summarize time based run data - - Creates temporary log files for every given interval which is post processed to create a summary. -- summary_file_sample.txt - Sample file of the summary file that is generated after a run. -- a results sub directory will be created after the run which contains all the result directories which are designated by the date and timestamp. - +This client directory contains, + - runspec.py- A toplevel runspec script; the main script to launch workload. + -2 modes: time based run which is the default(1) and request based run (2) + - node_els-testurls.py - The benchmark driver file which sends actual requests using python's requests module. + - config.json - The input client configuration file. This file is used by runspec.py when run with -f option + - process_time_based_output.py - Script to post process and summarize time based run data + - Creates temporary log files for every given interval which is post processed to create a summary. + - summary_file_sample.txt - Sample file of the summary file that is generated after a run. + - a results sub directory will be created after the run which contains all the result directories which are designated by the date and timestamp. + ## Client help: -- Run the main script "python runspec.py ". - - You will need to change the IP address and port of your server in config.json or config-ssr.json. - - Takes additional command line parameters. - - Default parameters in the script or can be read from a configuration file with -f/--config option (command line has the maximum priority). - - h gives the available options. - - Configurable option -g or --showgraph for graph generation(1 to generate output graphs or 0 for no graph) - - This script sends requests using python's requests module, generates log file and generates output graphs. + - Run the main script “python runspec.py ”. + - You will need to change the IP address and port of your server in ‘runspec.py’ or in config.json. + - Takes additional command line parameters. + - Default parameters in the script or can be read from a configuration file with -f/--config option (command line has the maximum priority). + - h gives the available options. + - Configurable option -g or --showgraph for graph generation(1 to generate output graphs or 0 for no graph) + - the server ip address and port can be changed in config.json or directly in runspec.py + - This script sends requests using python's requests module, generates log file and generates output graphs. ## Client result files - A temporary log file (request-based-run) which contains details like request-number,write-time, read-time, response-time for every request. - A summary file (RTdata) which has client information, database information and a summary of the run (throughput, min max,average response time). - Two output graphs; one is the throughput graph and the second is the latency graph. + diff --git a/Node-DC-EIS-client/config-ssr.json b/Node-DC-EIS-client/config-ssr.json deleted file mode 100644 index c36bf93..0000000 --- a/Node-DC-EIS-client/config-ssr.json +++ /dev/null @@ -1,29 +0,0 @@ -{ -"client_params": - { - "MT_interval" : "100", - "request" : "10000", - "concurrency" : "200", - "run_mode" : "1", - "interval" : "10", - "rampup_rampdown": "25", - "tempfile": "RTdata", - "total_urls": "100", - "server_ipaddress": "localhost", - "server_port": "3000", - "root_endpoint": "/", - "no_db": true, - "get_endpoints": [ - "count/10000/15" - ], - "http_headers": [ - "User-Agent: Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36" - ], - "html": true - }, -"memory_params": - { - "memstat_interval": "1", - "memlogfile": "memlog_file" - } -} diff --git a/Node-DC-EIS-client/node_dc_eis_testurls.py b/Node-DC-EIS-client/node_dc_eis_testurls.py index f84c2cf..36c759b 100644 --- a/Node-DC-EIS-client/node_dc_eis_testurls.py +++ b/Node-DC-EIS-client/node_dc_eis_testurls.py @@ -19,7 +19,6 @@ import os import urlparse import re -import sys from functools import partial from eventlet.green import urllib2 from eventlet.green import socket @@ -64,6 +63,7 @@ #globals to implement file optimization start_time = 0 file_cnt = 0 +log="" ip_cache = {} def get_ip(hostname): @@ -83,7 +83,7 @@ def get_ip(hostname): ip_cache[hostname] = ip return ip -def get_url(url, url_type, request_num, phase, accept_header, http_headers): +def get_url(url, url_type, request_num, log, phase, accept_header): """ # Desc : Function to send get requests to the server. Type 1 is get requests # handles 3 types of GET requests based on ID, last_name and zipcode. @@ -108,13 +108,13 @@ def get_url(url, url_type, request_num, phase, accept_header, http_headers): req_path = '{}{}'.format(urlo.path, query) - req = '''GET {} HTTP/1.1\r -Host: {}\r -Accept: {}\r -Connection: close\r -{}\r -'''.format(req_path, urlo.netloc, - accept_header, ''.join(hh + '\r\n' for hh in http_headers)) + req = '''GET {} HTTP/1.1 +Host: {} +User-Agent: runspec/0.9 +Accept: {} +Connection: close + +'''.format(req_path, urlo.netloc, accept_header) try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -183,7 +183,7 @@ def post_function(url, post_data): print e return r -def post_url(url, url_type, request_num, phase): +def post_url(url,url_type,request_num,log,phase): """ # Desc : Function to send post requests to the server. Type 2 is post requests # Retries if the post request fails @@ -238,7 +238,7 @@ def post_url(url, url_type, request_num, phase): util.printlog(log,phase,url_type,request_num,url,start,end,response_time,total_length) return -def delete_url(url, url_type, request_num, phase): +def delete_url(url,url_type,request_num,log,phase): """ # Desc : Function to send delete requests to the server. Type 3 is delete requests # also captures the data record being deleted and saves it in a list(post/_datalist) @@ -272,7 +272,6 @@ def delete_url(url, url_type, request_num, phase): if 'employee' in response: post_datalist.insert(front_oflist,response) else: - print url print "Warning : Record not found" start = time.time() r = s.delete(url, headers=headers) @@ -336,24 +335,8 @@ def calculate_len_postdel(response): total_length = header_len + content_len return total_length -def open_log(log_dir): - try: - log = open(os.path.join(log_dir, "tempfile_" + str(file_cnt)), "w") - except IOError: - print "[%s] Could not open templog file for writing." % (util.get_current_time()) - sys.exit(1) - - return log - -def clean_up_log(queue): - ''' Used to clean up last log file ''' - global log - log.close() - queue.put(('PROCESS', log.name, file_cnt)) - log = None - def main_entry(url, request_num, url_type, log_dir, phase, interval, - run_mode, temp_log, accept_header, queue, http_headers): + run_mode, temp_log, accept_header): """ # Desc : main entry function to determine the type of url - GET,POST or DELETE # creates log file which captures per request data depending on the type of run. @@ -365,36 +348,36 @@ def main_entry(url, request_num, url_type, log_dir, phase, interval, """ global start_time global file_cnt - global init global log + global init if run_mode == 1: if not init: - start_time = time.time(); - log = open_log(log_dir) - init = True - - if time.time() - start_time > float(interval): - old_log = log - old_file_cnt = file_cnt - file_cnt += 1 + start_time=time.time(); + try: + log = open(os.path.join(log_dir,"tempfile_"+str(file_cnt)),"w") + init = True + except IOError: + print ("[%s] Could not open templog file for writing." % (util.get_current_time())) + sys.exit(1) + if(time.time()-start_time > float(interval)): + file_cnt +=1 start_time = time.time() - - log = open_log(log_dir) - - old_log.close() - queue.put(('PROCESS', old_log.name, old_file_cnt)) - + try: + log = open(os.path.join(log_dir,"tempfile_"+str(file_cnt)),"w") + except IOError: + print ("[%s] Could not open templog file for writing." % (util.get_current_time())) + sys.exit(1) else: try: - log = open(os.path.join(log_dir, temp_log), "a") - except IOError: - print "Error: %s File not found." % temp_log + log = open(os.path.join(log_dir,temp_log), "a") + except IOError as e: + print("Error: %s File not found." % temp_log) sys.exit(1) if url_type == 1: - get_url(url, url_type, request_num, phase, accept_header, http_headers) + get_url(url,url_type,request_num, log, phase, accept_header) if url_type == 2: - post_url(url, url_type, request_num, phase) + post_url(url,url_type,request_num,log,phase) if url_type == 3: - delete_url(url, url_type, request_num, phase) + delete_url(url,url_type,request_num,log,phase) diff --git a/Node-DC-EIS-client/process_time_based_output.py b/Node-DC-EIS-client/process_time_based_output.py index ed3a5bb..28210cd 100644 --- a/Node-DC-EIS-client/process_time_based_output.py +++ b/Node-DC-EIS-client/process_time_based_output.py @@ -32,8 +32,7 @@ percent99 = 0 percent95 = 0 -def process_tempfile(results_dir, interval, rampup_rampdown, request, - temp_log, instance_id, multiple_instance, queue): +def process_tempfile(results_dir,interval,rampup_rampdown,request,temp_log,instance_id,multiple_instance): """ # Desc : Function to process each intermediate files. # waits for interval and then calls process_data on the next templog file @@ -42,33 +41,33 @@ def process_tempfile(results_dir, interval, rampup_rampdown, request, # total time for the measurement, instance ID, flag to check multiple insatnce run # Output: None """ + number_of_files = int(math.ceil((2.0 * rampup_rampdown + request) / interval)) + file_cnt=0 try: - temp_log = open(os.path.join(results_dir, temp_log),"a") + temp_log = open(os.path.join(results_dir,temp_log),"a") except IOError: - print "[%s] Could not open templog file for writing." % util.get_current_time() - sys.exit(1) - + print ("[%s] Could not open templog file for writing." % (util.get_current_time())) temp_log.flush() - - while True: - event = queue.get() - if event[0] == 'EXIT': - break - _, tempfile, file_cnt = event - try: - temp_file = open(tempfile, "r") - except IOError: - print "[%s] Could not open %s file for reading." % (util.get_current_time(), tempfile) - sys.exit(1) - - with temp_file: - print "[%s] Processing Output File tempfile_[%d]." % (util.get_current_time(), file_cnt) - process_data(temp_file, temp_log, results_dir, file_cnt, interval) - - if file_cnt == 0 and multiple_instance: - util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)), "start_processing", instance_id, temp_log.name) - os.remove(tempfile) + time.sleep(60) + while file_cnt < number_of_files: + tempfile = os.path.join(results_dir,"tempfile_"+str(file_cnt)) + if(os.path.exists(tempfile)): + time.sleep(interval) + try: + temp_file = open(tempfile,"r") + print ("[%s] Processing Output File tempfile_[%d]." % (util.get_current_time(),file_cnt)) + process_data(temp_file,temp_log,results_dir,file_cnt,interval) + temp_file.close() + if(file_cnt == 0 and multiple_instance): + util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)),"start_processing", instance_id, temp_log.name) + os.remove(tempfile) + file_cnt +=1 + except IOError: + print ("[%s] Could not open templog file for reading." % (util.get_current_time())) + sys.exit(1) + else: + time.sleep(interval) print ("[%s] Closing main templog file." % (util.get_current_time())) temp_log.close() @@ -281,7 +280,7 @@ def post_process(temp_log,output_file,results_dir,interval,memlogfile,no_graph): print("\nThe memory usage graph is located at " +os.path.abspath(os.path.join(results_dir,'memory_usage.png'))) print ("[%s] Plotting graphs done." % (util.get_current_time())) -def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_log,output_file,memlogfile,instance_id,multiple_instance,no_graph, queue): +def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_log,output_file,memlogfile,instance_id,multiple_instance,no_graph): """ # Desc : Main function which handles all the Output Processing # This function is run by the Child Function @@ -291,8 +290,7 @@ def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_ # Output: None """ print ("[%s] Starting process for post processing." % (util.get_current_time())) - process_tempfile(results_dir, interval, rampup_rampdown, request, temp_log, - instance_id, multiple_instance, queue) + process_tempfile(results_dir,interval,rampup_rampdown,request,temp_log,instance_id,multiple_instance) if multiple_instance: util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)),"done_processing", instance_id, "") # #Post Processing Function diff --git a/Node-DC-EIS-client/runspec.py b/Node-DC-EIS-client/runspec.py index fd82fc2..873355a 100755 --- a/Node-DC-EIS-client/runspec.py +++ b/Node-DC-EIS-client/runspec.py @@ -16,6 +16,7 @@ import argparse import json import os +import subprocess import csv import sys import re @@ -41,7 +42,7 @@ from itertools import izip from threading import Thread,Timer from multiprocessing import Process -from multiprocessing import Queue +from multiprocessing import Pool import node_dc_eis_testurls from node_dc_eis_testurls import * from process_time_based_output import process_time_based_output @@ -59,13 +60,16 @@ concurrency = 200 rampup_rampdown = 10 total_urls = 100 +server_ipaddress = "localhost" +server_port = "9000" urllist= [] memstat_interval = 3 memlogfile = "memlog_file" no_graph = False #if set to True, output graphs will not be generated. -directory = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') -output_file = str(directory + '_summary-report.txt') +i = datetime.now() +directory = i.strftime('%H-%M-%S_%m-%d-%Y') +output_file = str(directory + 'summary-report.txt') idmatches_index = 0 postid_index = 0 @@ -85,8 +89,6 @@ cpuCount = -1 run_mode = 1 -http_headers = [] - """ # Database parameters - defaults """ @@ -116,7 +118,7 @@ """ # Results directory - defaults """ -results_dir = "results_node_DC" +results_dir = "results_node_DC_EIS" """ # Server end points (urls) to test @@ -131,7 +133,17 @@ cpuinfo_endpoint = "getcpuinfo" checkdb_endpoint = "checkdb" -get_endpoints_urls = [] +""" +# Absolute server url prefix to build all the urls +""" +server_url = "http://" + server_ipaddress + ":" + server_port + server_root_endpoint +loaddb_url = server_url + loaddb_endpoint +id_url = server_url + id_endpoint +name_url = server_url + name_endpoint +zipcode_url = server_url + zipcode_endpoint +meminfo_url = server_url + meminfo_endpoint +cpuinfo_url = server_url + cpuinfo_endpoint +checkdb_url = server_url + checkdb_endpoint """ # Type of URL ratio - defaults @@ -159,7 +171,6 @@ def setup(): # Input : None # Output: None """ - global version global appName global cpuCount global directory @@ -180,14 +191,11 @@ def setup(): """ print "Exception -- Decoding of result from cpuinfo failed. Exiting" exit(1) - if result: if 'appName' in result: appName = result["appName"] if 'cpuCount' in result: cpuCount = result["cpuCount"] - if 'appVersion' in result: - version = result['appVersion'] print "Starting "+version + " in "+appName + " Mode" if(instance_id): directory = directory+"-"+appName+"-"+instance_id @@ -206,7 +214,7 @@ def setup(): os.makedirs(os.path.join(results_dir, directory)) return -def main(): +def arg_parse(): """ # Desc : Function to parse command line arguments # Input : None @@ -218,12 +226,6 @@ def main(): global multiple_instance global run_mode global no_graph - global no_db - global get_endpoints - global get_endpoints_urls - - get_endpoints = None - no_db = False print ("[%s] Parsing arguments." % (util.get_current_time())) parser = argparse.ArgumentParser() @@ -252,7 +254,7 @@ def main(): action="store", help='Number of rampup-rampdown requests to perform') parser.add_argument('-r','--run_mode',dest="run_mode", - action="store", choices=[1, 2], type=int, + action="store", help='1 for time based run. 2 for request based run. Default is 1') parser.add_argument('-int','--interval',dest="interval",action="store", help='Interval after which logging switches to next temp log file') @@ -280,24 +282,6 @@ def main(): parser.add_argument('-H', '--html', dest='html', default=False, action='store_true', help='Request HTML instead of JSON from server') - - parser.add_argument('-nd', '--no-db', action='store_true', - help='Skips all database loading and checking actions') - - parser.add_argument('-ge', '--get-endpoints', nargs='+', - help='Directly specific which endpoints to use during ' - 'GET operations (bypasses id, name, and zip ratios)') - - def header_check(value): - header = [ss.strip() for ss in value.split(':', 1)] - if len(header) != 2: - raise argparse.ArgumentTypeError('"%s" is not of the form ' - '"key: value"' % value) - return ': '.join(header) - - parser.add_argument('-hh', '--http-headers', nargs='+', type=header_check, - help='Extra HTTP headers to send to the server') - options = parser.parse_args() print('Input options config files: %s' % options.config) @@ -401,26 +385,6 @@ def header_check(value): if "html" in json_data["client_params"]: use_html = json_data["client_params"]["html"] - if "no_db" in json_data["client_params"]: - no_db = str(json_data["client_params"]["no_db"]).lower() in ( - 'y', 'yes', - 't', 'true', - '1', - ) - - if "get_endpoints" in json_data["client_params"]: - get_endpoints = json_data["client_params"]["get_endpoints"] - - if "http_headers" in json_data["client_params"]: - try: - headers = [header_check(hh) - for hh in json_data["client_params"]["http_headers"]] - except Exception as e: - print 'Error: http_headers: %s' % e - sys.exit(1) - - http_headers.extend(headers) - #database setup parameters if "db_params" in json_data: if "dbrecord_count" in json_data["db_params"]: @@ -513,15 +477,6 @@ def header_check(value): if options.html: use_html = options.html - if options.no_db: - no_db = options.no_db - - if options.get_endpoints: - get_endpoints = options.get_endpoints - - if options.http_headers: - http_headers.extend(options.http_headers) - server_url = "http://" + server_ipaddress + ":" + server_port + server_root_endpoint loaddb_url = server_url + loaddb_endpoint id_url = server_url + id_endpoint @@ -531,9 +486,6 @@ def header_check(value): cpuinfo_url = server_url + cpuinfo_endpoint checkdb_url = server_url + checkdb_endpoint - if get_endpoints: - get_endpoints_urls = [server_url + endpoint for endpoint in get_endpoints] - if int(concurrency) > int(request): print "Warning -- concurrency cannot be greater than number of requests. Setting concurrency == number of requests" concurrency = request @@ -556,26 +508,17 @@ def run_printenv(log): print >> log, "# requests :"+ str(request) +" (Default value = 10000)" print >> log, "# concurrency :"+ str(concurrency) +" (Default value = 200)" print >> log, "# URLs :" +str(total_urls) +" (Default value = 100)" - print >> log, "# Use HTML: %s (Default value = False)" % use_html - if http_headers: - print >> log, "# Extra HTTP headers:" - for hh in http_headers: - print >> log, "# ", hh - - if not get_endpoints_urls: - print >> log, "# get url ratio:%s (Default value = 80)" % get_ratio - print >> log, "# post url ratio:%s (Default value = 10)" % post_ratio - print >> log, "# delete url ratio:%s (Default value = 10)" % delete_ratio - print >> log, "# id_url:%s (Default value = 50)" % idurl_ratio - print >> log, "# name url ratio:%s (Default value = 25)" % nameurl_ratio - print >> log, "# zip url ratio:%s (Default value = 25)" % zipurl_ratio - - if not no_db: - print >> log, "====Database Parameters====" - print >> log, "# records :%s (Default value = 10000)" % dbrecord_count - print >> log, "# unique name:%s (Default value = 25)" % name_dbratio - print >> log, "# unique zips:%s (Default value = 25)" % zip_dbratio - + print >> log, "# get url ratio:" + str(get_ratio) +" (Default value = 80)" + print >> log,"# post url ratio:"+ str(post_ratio) +" (Default value = 10)" + print >> log, "# delete url ratio:"+ str(delete_ratio)+" (Default value = 10)" + print >> log, "# id_url:"+ str(idurl_ratio) +" (Default value = 50)" + print >> log, "# name url ratio:"+ str(nameurl_ratio) +" (Default value = 25)" + print >> log,"# zip url ratio:"+ str(zipurl_ratio)+" (Default value = 25)" + print >> log, "====Database Parameters====" + print >> log, "# records :"+ str(dbrecord_count) +" (Default value = 10000)" + print >> log, "# unique name:" + str(name_dbratio) +" (Default value = 25)" + print >> log, "# unique zips:" + str(zip_dbratio) +" (Default value = 25)" + return def get_data(): """ @@ -587,34 +530,10 @@ def get_data(): # Input : None # Output: None """ + global employee_idlist #Populate database - if not no_db: - run_loaddb() - - if get_endpoints_urls: - generate_urls_from_list() - else: - generate_urls_from_db() - - if multiple_instance: - util.create_indicator_file(rundir,"loaddb_done", instance_id,"") - util.check_startfile(rundir) - #Send requests - send_request() - -def generate_urls_from_list(): - urls_count = len(get_endpoints_urls) - for ii in xrange(int(total_urls)): - urls_idx = random.randint(0, urls_count - 1) - urllist.append({ - 'url': get_endpoints_urls[urls_idx], - 'method':'GET'}) - - print "[%s] Building list of Urls done." % util.get_current_time() - -def generate_urls_from_db(): - global employee_idlist + run_loaddb() print ("[%s] Build list of employee IDs." % (util.get_current_time())) try: @@ -672,7 +591,13 @@ def generate_urls_from_db(): zip_number = int(math.ceil((int(get_urlcount)*float(float(zipurl_ratio)/100)))) #start building the url list - builddburllist(employee_idlist, id_number, name_matches, name_number , zip_matches, zip_number, post_urlcount,delete_urlcount) + buildurllist(employee_idlist, id_number, name_matches, name_number , zip_matches, zip_number, post_urlcount,delete_urlcount) + if(multiple_instance): + util.create_indicator_file(rundir,"loaddb_done", instance_id,"") + util.check_startfile(rundir) + #Send requests + send_request(employee_idlist) + return def run_loaddb(): """ @@ -733,7 +658,7 @@ def check_db(): return checkdb_dict -def builddburllist(employee_idlist, id_number, name_matches, name_number, zip_matches, zip_number,post_urlcount, delete_urlcount): +def buildurllist(employee_idlist, id_number, name_matches, name_number , zip_matches, zip_number,post_urlcount,delete_urlcount): """ # Desc :Function build list of URLs with enough randomness for realistic # behavior @@ -924,7 +849,7 @@ def collect_meminfo(): heapTotlist.append(0) return -def send_request(): +def send_request(employee_idlist): """ # Desc : Main function initiates requests to server # Input : List of EmployeeId @@ -969,13 +894,12 @@ def send_request(): requestBasedRun(pool) mem_process.join() - if not no_db: - after_run = check_db() + after_run = check_db() print_summary() log.close() return -def execute_request(pool, queue=None): +def execute_request(pool): """ # Desc : Creates threadpool for concurrency, and sends concurrent requests # to server for the input #requests or based on time interval. @@ -1000,8 +924,8 @@ def execute_request(pool, queue=None): if(urllist[execute_request.url_index]['method']== 'GET'): url_type = 1 - tot_get = tot_get + 1 - if parsed.path == "/employees/id/": + tot_get = tot_get +1 + if not(parsed.path == "/employees/zipcode" or parsed.path == "/employees/name"): ids = getNextEmployeeId() url = url+ids if(urllist[execute_request.url_index]['method']== 'POST'): @@ -1023,9 +947,7 @@ def execute_request(pool, queue=None): interval, run_mode, temp_log, - 'text/html' if use_html else 'application/json', - queue, - http_headers + 'text/html' if use_html else 'application/json' ] if(int(concurrency) == 1): @@ -1070,14 +992,9 @@ def timebased_run(pool): url_index = 0 request_index = 0 # Initializing the Request Counter to 0 - queue = Queue() - #Spin Another Process to do processing of Data - post_processing = Process(target=process_time_based_output, - args=(log_dir, interval, rampup_rampdown, - MT_interval, temp_log, output_file, - memlogfile, instance_id, multiple_instance, - no_graph, queue)) + post_processing = Process(target=process_time_based_output,args=(log_dir,interval,rampup_rampdown,MT_interval,temp_log,output_file,memlogfile,instance_id, + multiple_instance,no_graph)) post_processing.start() print ("[%s] Starting time based run." % (util.get_current_time())) if ramp: @@ -1092,14 +1009,14 @@ def timebased_run(pool): print ("[%s] Started processing of requests with concurrency of [%d] for [%d] seconds" % (util.get_current_time(), int(concurrency), int(MT_interval))) if ramp: while(time.time()-start < int(rampup_rampdown)): - execute_request(pool, queue) + execute_request(pool) print ("[%s] Exiting RampUp time window." %(util.get_current_time())) phase = "MT" util.record_start_time() start=time.time() print ("[%s] Entering Measuring time window." %(util.get_current_time())) while(time.time()-start < int(MT_interval)): - execute_request(pool, queue) + execute_request(pool) print ("[%s] Exiting Measuring time window." %(util.get_current_time())) util.record_end_time() phase = "RD" @@ -1107,23 +1024,20 @@ def timebased_run(pool): start=time.time() print ("[%s] Entering RampDown time window." %(util.get_current_time())) while(time.time()-start < int(rampup_rampdown)): - execute_request(pool, queue) + execute_request(pool) print ("[%s] Exiting RampDown time window." %(util.get_current_time())) phase = "SD" print ("[%s] Entering ShutDown time window." %(util.get_current_time())) else: while(time.time()-start < int(MT_interval)): - execute_request(pool, queue) + execute_request(pool) print ("[%s] Exiting Measuring time window." %(util.get_current_time())) phase = "SD" print ("[%s] Entering ShutDown time window." %(util.get_current_time())) print("[%s] All requests done." % (util.get_current_time())) file = open(os.path.join(log_dir,memlogind),"w") - file.close() - pool.waitall() - node_dc_eis_testurls.clean_up_log(queue) + file.close() processing_complete = True - queue.put(('EXIT',)) post_processing.join() def requestBasedRun(pool): @@ -1223,17 +1137,17 @@ def post_process_request_based_data(temp_log,output_file): print "\n====Report Summary====" print "Primary Metrics:" - print 'Response time 99 percentile = %.3f sec' % percent - print 'Throughput = %.2f req/sec' % throughput + print 'Response time 99 percentile = ' + str(round(percent,3)) + " " +version+" sec" + print 'Throughput = ' + str(round(throughput,2)) + " " +version+ " req/sec" print "--------------------------------------\n" print >> processed_file, "\n====Report Summary====" print >> processed_file, "Primary Metrics:" - print >> processed_file, 'Throughput = %.2f req/sec' % throughput - print >> processed_file, '99 percentile = %.3f sec' % percent + print >> processed_file, 'Throughput = ' + str(round(throughput,2)) +" " +version+" req/sec" + print >> processed_file, '99 percentile = ' + str(round(percent,3)) +" " +version+" sec" print >> processed_file, "\nDetailed summary:" - print >> processed_file, 'Min Response time = %.3f sec' % minimum - print >> processed_file, 'Max Response time = %.3f sec' % maximum - print >> processed_file, 'Mean Response time = %.3f sec' % mean + print >> processed_file, 'Min Response time = ' + str(round(minimum,3)) +" " +version+" sec" + print >> processed_file, 'Max Response time = ' + str(round(maximum,3)) +" " +version+ " sec" + print >> processed_file, 'Mean Response time = ' + str(round(mean,3)) +" " +version+" sec" logfile.close() processed_file.flush() @@ -1277,15 +1191,9 @@ def print_summary(): print "Exception -- Decoding of result from cpuinfo failed. Exiting" sys.exit(1) if result: - - print >> processed_file, "\n====System under test====" - - print >> processed_file, '====Application====' - print >> processed_file, 'App Mode:', appName - print >> processed_file, 'App Version', version - #hardware details if 'hw' in result: + print >> processed_file, "\n====System under test====" print >> processed_file, "\n====SUT Hardware Details====" if 'architecture' in result['hw']: architecture = result['hw']['architecture'] @@ -1371,38 +1279,27 @@ def print_summary(): print >> processed_file, "Bad Url Error = " + str(node_dc_eis_testurls.bad_url) print >> processed_file, "Static posts = " + str(node_dc_eis_testurls.static_post) print >> processed_file, "\n====Validation Report====" - - if not no_db: - print >> processed_file, "Database Validation:" - print >> processed_file, "Actual database record count: ", dbrecord_count - print >> processed_file, "Database record count after loadDB: ", after_dbload["message"] - print >> processed_file, "Database record count after the run: ", after_run["message"] - print >> processed_file, "--------------------------------------" - - if get_endpoints_urls: - print >> processed_file, 'URL Validation:' - print >> processed_file, 'Endpoints:' - print >> processed_file, ' GET:', ', '.join(get_endpoints) - else: - print >> processed_file, "URL ratio Validation:" - print >> processed_file, "Total number of urls generated: " +str(count) - print >> processed_file, "Number of get urls generated: "+ str(int(id_count)+int(name_count)+int(zip_count)) +" ("+str(get_ratio)+"% of "+str(count)+")" - print >> processed_file, " Number of get id urls generated: " +str(id_count) +" ("+str(idurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" - print >> processed_file, " Number of get name urls generated: " +str(name_count) +" ("+str(nameurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" - print >> processed_file, " Number of get zip urls generated: " +str(zip_count) +" ("+str(zipurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" - print >> processed_file, "Number of post urls generated: " +str(post_count) +" ("+str(post_ratio)+"% of "+str(count)+")" - print >> processed_file, "Number of delete urls generated: " +str(delete_count) +" ("+str(delete_ratio)+"% of "+str(count)+")" - + print >> processed_file, "Database Validation:" + print >> processed_file, "Actual database record count: "+str(dbrecord_count) + print >> processed_file, "Database record count after loadDB: "+str(after_dbload["message"]) + print >> processed_file, "Database record count after the run: " +str(after_run["message"]) + print >> processed_file, "--------------------------------------" + print >> processed_file, "URL ratio Validation:" + print >> processed_file, "Total number of urls generated: " +str(count) + print >> processed_file, "Number of get urls generated: "+ str(int(id_count)+int(name_count)+int(zip_count)) +" ("+str(get_ratio)+"% of "+str(count)+")" + print >> processed_file, " Number of get id urls generated: " +str(id_count) +" ("+str(idurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" + print >> processed_file, " Number of get name urls generated: " +str(name_count) +" ("+str(nameurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" + print >> processed_file, " Number of get zip urls generated: " +str(zip_count) +" ("+str(zipurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" + print >> processed_file, "Number of post urls generated: " +str(post_count) +" ("+str(post_ratio)+"% of "+str(count)+")" + print >> processed_file, "Number of delete urls generated: " +str(delete_count) +" ("+str(delete_ratio)+"% of "+str(count)+")" print >> processed_file, "--------------------------------------" - print >> processed_file, "Requests Validation:" print >> processed_file, "Total runtime duration: " +str(int(MT_interval)) print >> processed_file, "Total number of get requests: " +str(tot_get) print >> processed_file, "Total number of post requests: " +str(tot_post) print >> processed_file, "Total number of delete requests: " +str(tot_del) - processed_file.flush() - processed_file.close() + processed_file.flush() processed_file = open(processed_filename, "r") print processed_file.read() processed_file.close() @@ -1493,5 +1390,4 @@ def plot_graph_request_based_run(output_file): """ # Desc : This the main entry call. """ -if __name__ == "__main__": - main() \ No newline at end of file +arg_parse() diff --git a/Node-DC-EIS-client/util.py b/Node-DC-EIS-client/util.py index daefee9..f5877e4 100644 --- a/Node-DC-EIS-client/util.py +++ b/Node-DC-EIS-client/util.py @@ -84,12 +84,11 @@ def create_indicator_file(rundir,file_name,instance_id,string_towrite): # string to be written in the new file created # Output: creates a new indicator file """ - print "[%s] Creating indicator file." % get_current_time() - with open(os.path.join(rundir, '%s%s.syncpt' % (file_name, instance_id)), - 'w') as ind_file: - if string_towrite: - ind_file.write(string_towrite) - + print ("[%s] Creating indicator file." % (get_current_time())) + ind_file = open(os.path.join(rundir,file_name+str(instance_id)+".syncpt"),'w') + if string_towrite: + ind_file.write(string_towrite) + def calculate_throughput(log_dir,concurrency,cpuCount): """