From 8ab80268e1f02aa2e2d8268baa53d49b59949b0b Mon Sep 17 00:00:00 2001 From: Enrico Galli Date: Tue, 26 Sep 2017 14:43:56 -0700 Subject: [PATCH] Adding support for Node-DC-SSR and fixes * Added --no-db option to skip database initialization and checks * Added --get-endpoints to override which URLs are used to query the server * Added support for extracting application version number from server * Fixed processing of tempfiles so it isn't so slow * Fixed indentation and spacing on README --- Node-DC-EIS-client/README.md | 39 ++- Node-DC-EIS-client/config-ssr.json | 29 ++ Node-DC-EIS-client/node_dc_eis_testurls.py | 81 +++--- .../process_time_based_output.py | 54 ++-- Node-DC-EIS-client/runspec.py | 260 ++++++++++++------ Node-DC-EIS-client/util.py | 11 +- 6 files changed, 313 insertions(+), 161 deletions(-) create mode 100644 Node-DC-EIS-client/config-ssr.json diff --git a/Node-DC-EIS-client/README.md b/Node-DC-EIS-client/README.md index 0aad66a..af3c0a2 100644 --- a/Node-DC-EIS-client/README.md +++ b/Node-DC-EIS-client/README.md @@ -1,4 +1,4 @@ -Copyright (c) 2016 Intel Corporation +Copyright (c) 2016 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -10,30 +10,29 @@ Copyright (c) 2016 Intel Corporation See the License for the specific language governing permissions and limitations under the License. -This client directory contains, - - runspec.py- A toplevel runspec script; the main script to launch workload. - -2 modes: time based run which is the default(1) and request based run (2) - - node_els-testurls.py - The benchmark driver file which sends actual requests using python's requests module. - - config.json - The input client configuration file. This file is used by runspec.py when run with -f option - - process_time_based_output.py - Script to post process and summarize time based run data - - Creates temporary log files for every given interval which is post processed to create a summary. - - summary_file_sample.txt - Sample file of the summary file that is generated after a run. - - a results sub directory will be created after the run which contains all the result directories which are designated by the date and timestamp. - +# This client directory contains, +- runspec.py- A toplevel runspec script; the main script to launch workload. + - 2 modes: time based run which is the default(1) and request based run (2) +- node_els-testurls.py - The benchmark driver file which sends actual requests using python's requests module. +- config.json - The input client configuration file. This file is used by runspec.py when run with -f option +- config-ssr.json - The input client configuration file for SSR workloads. This file is used by runspec.py when run with -f option +- process_time_based_output.py - Script to post process and summarize time based run data + - Creates temporary log files for every given interval which is post processed to create a summary. +- summary_file_sample.txt - Sample file of the summary file that is generated after a run. +- a results sub directory will be created after the run which contains all the result directories which are designated by the date and timestamp. + ## Client help: - - Run the main script “python runspec.py ”. - - You will need to change the IP address and port of your server in ‘runspec.py’ or in config.json. - - Takes additional command line parameters. - - Default parameters in the script or can be read from a configuration file with -f/--config option (command line has the maximum priority). - - h gives the available options. - - Configurable option -g or --showgraph for graph generation(1 to generate output graphs or 0 for no graph) - - the server ip address and port can be changed in config.json or directly in runspec.py - - This script sends requests using python's requests module, generates log file and generates output graphs. +- Run the main script "python runspec.py ". + - You will need to change the IP address and port of your server in config.json or config-ssr.json. + - Takes additional command line parameters. + - Default parameters in the script or can be read from a configuration file with -f/--config option (command line has the maximum priority). + - h gives the available options. + - Configurable option -g or --showgraph for graph generation(1 to generate output graphs or 0 for no graph) + - This script sends requests using python's requests module, generates log file and generates output graphs. ## Client result files - A temporary log file (request-based-run) which contains details like request-number,write-time, read-time, response-time for every request. - A summary file (RTdata) which has client information, database information and a summary of the run (throughput, min max,average response time). - Two output graphs; one is the throughput graph and the second is the latency graph. - diff --git a/Node-DC-EIS-client/config-ssr.json b/Node-DC-EIS-client/config-ssr.json new file mode 100644 index 0000000..c36bf93 --- /dev/null +++ b/Node-DC-EIS-client/config-ssr.json @@ -0,0 +1,29 @@ +{ +"client_params": + { + "MT_interval" : "100", + "request" : "10000", + "concurrency" : "200", + "run_mode" : "1", + "interval" : "10", + "rampup_rampdown": "25", + "tempfile": "RTdata", + "total_urls": "100", + "server_ipaddress": "localhost", + "server_port": "3000", + "root_endpoint": "/", + "no_db": true, + "get_endpoints": [ + "count/10000/15" + ], + "http_headers": [ + "User-Agent: Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36" + ], + "html": true + }, +"memory_params": + { + "memstat_interval": "1", + "memlogfile": "memlog_file" + } +} diff --git a/Node-DC-EIS-client/node_dc_eis_testurls.py b/Node-DC-EIS-client/node_dc_eis_testurls.py index bf6158f..1db802d 100644 --- a/Node-DC-EIS-client/node_dc_eis_testurls.py +++ b/Node-DC-EIS-client/node_dc_eis_testurls.py @@ -19,6 +19,7 @@ import os import urlparse import re +import sys from functools import partial from eventlet.green import urllib2 from eventlet.green import socket @@ -63,7 +64,6 @@ #globals to implement file optimization start_time = 0 file_cnt = 0 -log="" ip_cache = {} def get_ip(hostname): @@ -83,7 +83,7 @@ def get_ip(hostname): ip_cache[hostname] = ip return ip -def get_url(url, url_type, request_num, log, phase, accept_header): +def get_url(url, url_type, request_num, phase, accept_header, http_headers): """ # Desc : Function to send get requests to the server. Type 1 is get requests # handles 3 types of GET requests based on ID, last_name and zipcode. @@ -108,12 +108,12 @@ def get_url(url, url_type, request_num, log, phase, accept_header): req_path = '{}{}'.format(urlo.path, query) - req = '''GET {} HTTP/1.1 -Host: {} -User-Agent: runspec/0.9 -Accept: {} - -'''.format(req_path, urlo.netloc, accept_header) + req = '''GET {} HTTP/1.1\r +Host: {}\r +Accept: {}\r +{}\r +'''.format(req_path, urlo.netloc, + accept_header, ''.join(hh + '\r\n' for hh in http_headers)) try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -182,7 +182,7 @@ def post_function(url, post_data): print e return r -def post_url(url,url_type,request_num,log,phase): +def post_url(url, url_type, request_num, phase): """ # Desc : Function to send post requests to the server. Type 2 is post requests # Retries if the post request fails @@ -237,7 +237,7 @@ def post_url(url,url_type,request_num,log,phase): util.printlog(log,phase,url_type,request_num,url,start,end,response_time,total_length) return -def delete_url(url,url_type,request_num,log,phase): +def delete_url(url, url_type, request_num, phase): """ # Desc : Function to send delete requests to the server. Type 3 is delete requests # also captures the data record being deleted and saves it in a list(post/_datalist) @@ -271,6 +271,7 @@ def delete_url(url,url_type,request_num,log,phase): if 'employee' in response: post_datalist.insert(front_oflist,response) else: + print url print "Warning : Record not found" start = time.time() r = s.delete(url, headers=headers) @@ -334,8 +335,24 @@ def calculate_len_postdel(response): total_length = header_len + content_len return total_length +def open_log(log_dir): + try: + log = open(os.path.join(log_dir, "tempfile_" + str(file_cnt)), "w") + except IOError: + print "[%s] Could not open templog file for writing." % (util.get_current_time()) + sys.exit(1) + + return log + +def clean_up_log(queue): + ''' Used to clean up last log file ''' + global log + log.close() + queue.put(('PROCESS', log.name, file_cnt)) + log = None + def main_entry(url, request_num, url_type, log_dir, phase, interval, - run_mode, temp_log, accept_header): + run_mode, temp_log, accept_header, queue, http_headers): """ # Desc : main entry function to determine the type of url - GET,POST or DELETE # creates log file which captures per request data depending on the type of run. @@ -347,36 +364,36 @@ def main_entry(url, request_num, url_type, log_dir, phase, interval, """ global start_time global file_cnt - global log global init + global log if run_mode == 1: if not init: - start_time=time.time(); - try: - log = open(os.path.join(log_dir,"tempfile_"+str(file_cnt)),"w") - init = True - except IOError: - print ("[%s] Could not open templog file for writing." % (util.get_current_time())) - sys.exit(1) - if(time.time()-start_time > float(interval)): - file_cnt +=1 + start_time = time.time(); + log = open_log(log_dir) + init = True + + if time.time() - start_time > float(interval): + old_log = log + old_file_cnt = file_cnt + file_cnt += 1 start_time = time.time() - try: - log = open(os.path.join(log_dir,"tempfile_"+str(file_cnt)),"w") - except IOError: - print ("[%s] Could not open templog file for writing." % (util.get_current_time())) - sys.exit(1) + + log = open_log(log_dir) + + old_log.close() + queue.put(('PROCESS', old_log.name, old_file_cnt)) + else: try: - log = open(os.path.join(log_dir,temp_log), "a") - except IOError as e: - print("Error: %s File not found." % temp_log) + log = open(os.path.join(log_dir, temp_log), "a") + except IOError: + print "Error: %s File not found." % temp_log sys.exit(1) if url_type == 1: - get_url(url,url_type,request_num, log, phase, accept_header) + get_url(url, url_type, request_num, phase, accept_header, http_headers) if url_type == 2: - post_url(url,url_type,request_num,log,phase) + post_url(url, url_type, request_num, phase) if url_type == 3: - delete_url(url,url_type,request_num,log,phase) + delete_url(url, url_type, request_num, phase) diff --git a/Node-DC-EIS-client/process_time_based_output.py b/Node-DC-EIS-client/process_time_based_output.py index 28210cd..ed3a5bb 100644 --- a/Node-DC-EIS-client/process_time_based_output.py +++ b/Node-DC-EIS-client/process_time_based_output.py @@ -32,7 +32,8 @@ percent99 = 0 percent95 = 0 -def process_tempfile(results_dir,interval,rampup_rampdown,request,temp_log,instance_id,multiple_instance): +def process_tempfile(results_dir, interval, rampup_rampdown, request, + temp_log, instance_id, multiple_instance, queue): """ # Desc : Function to process each intermediate files. # waits for interval and then calls process_data on the next templog file @@ -41,33 +42,33 @@ def process_tempfile(results_dir,interval,rampup_rampdown,request,temp_log,insta # total time for the measurement, instance ID, flag to check multiple insatnce run # Output: None """ - number_of_files = int(math.ceil((2.0 * rampup_rampdown + request) / interval)) - file_cnt=0 try: - temp_log = open(os.path.join(results_dir,temp_log),"a") + temp_log = open(os.path.join(results_dir, temp_log),"a") except IOError: - print ("[%s] Could not open templog file for writing." % (util.get_current_time())) + print "[%s] Could not open templog file for writing." % util.get_current_time() + sys.exit(1) + temp_log.flush() - time.sleep(60) - while file_cnt < number_of_files: - tempfile = os.path.join(results_dir,"tempfile_"+str(file_cnt)) - if(os.path.exists(tempfile)): - time.sleep(interval) - try: - temp_file = open(tempfile,"r") - print ("[%s] Processing Output File tempfile_[%d]." % (util.get_current_time(),file_cnt)) - process_data(temp_file,temp_log,results_dir,file_cnt,interval) - temp_file.close() - if(file_cnt == 0 and multiple_instance): - util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)),"start_processing", instance_id, temp_log.name) - os.remove(tempfile) - file_cnt +=1 - except IOError: - print ("[%s] Could not open templog file for reading." % (util.get_current_time())) - sys.exit(1) - else: - time.sleep(interval) + + while True: + event = queue.get() + if event[0] == 'EXIT': + break + _, tempfile, file_cnt = event + try: + temp_file = open(tempfile, "r") + except IOError: + print "[%s] Could not open %s file for reading." % (util.get_current_time(), tempfile) + sys.exit(1) + + with temp_file: + print "[%s] Processing Output File tempfile_[%d]." % (util.get_current_time(), file_cnt) + process_data(temp_file, temp_log, results_dir, file_cnt, interval) + + if file_cnt == 0 and multiple_instance: + util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)), "start_processing", instance_id, temp_log.name) + os.remove(tempfile) print ("[%s] Closing main templog file." % (util.get_current_time())) temp_log.close() @@ -280,7 +281,7 @@ def post_process(temp_log,output_file,results_dir,interval,memlogfile,no_graph): print("\nThe memory usage graph is located at " +os.path.abspath(os.path.join(results_dir,'memory_usage.png'))) print ("[%s] Plotting graphs done." % (util.get_current_time())) -def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_log,output_file,memlogfile,instance_id,multiple_instance,no_graph): +def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_log,output_file,memlogfile,instance_id,multiple_instance,no_graph, queue): """ # Desc : Main function which handles all the Output Processing # This function is run by the Child Function @@ -290,7 +291,8 @@ def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_ # Output: None """ print ("[%s] Starting process for post processing." % (util.get_current_time())) - process_tempfile(results_dir,interval,rampup_rampdown,request,temp_log,instance_id,multiple_instance) + process_tempfile(results_dir, interval, rampup_rampdown, request, temp_log, + instance_id, multiple_instance, queue) if multiple_instance: util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)),"done_processing", instance_id, "") # #Post Processing Function diff --git a/Node-DC-EIS-client/runspec.py b/Node-DC-EIS-client/runspec.py index 873355a..fd82fc2 100755 --- a/Node-DC-EIS-client/runspec.py +++ b/Node-DC-EIS-client/runspec.py @@ -16,7 +16,6 @@ import argparse import json import os -import subprocess import csv import sys import re @@ -42,7 +41,7 @@ from itertools import izip from threading import Thread,Timer from multiprocessing import Process -from multiprocessing import Pool +from multiprocessing import Queue import node_dc_eis_testurls from node_dc_eis_testurls import * from process_time_based_output import process_time_based_output @@ -60,16 +59,13 @@ concurrency = 200 rampup_rampdown = 10 total_urls = 100 -server_ipaddress = "localhost" -server_port = "9000" urllist= [] memstat_interval = 3 memlogfile = "memlog_file" no_graph = False #if set to True, output graphs will not be generated. -i = datetime.now() -directory = i.strftime('%H-%M-%S_%m-%d-%Y') -output_file = str(directory + 'summary-report.txt') +directory = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') +output_file = str(directory + '_summary-report.txt') idmatches_index = 0 postid_index = 0 @@ -89,6 +85,8 @@ cpuCount = -1 run_mode = 1 +http_headers = [] + """ # Database parameters - defaults """ @@ -118,7 +116,7 @@ """ # Results directory - defaults """ -results_dir = "results_node_DC_EIS" +results_dir = "results_node_DC" """ # Server end points (urls) to test @@ -133,17 +131,7 @@ cpuinfo_endpoint = "getcpuinfo" checkdb_endpoint = "checkdb" -""" -# Absolute server url prefix to build all the urls -""" -server_url = "http://" + server_ipaddress + ":" + server_port + server_root_endpoint -loaddb_url = server_url + loaddb_endpoint -id_url = server_url + id_endpoint -name_url = server_url + name_endpoint -zipcode_url = server_url + zipcode_endpoint -meminfo_url = server_url + meminfo_endpoint -cpuinfo_url = server_url + cpuinfo_endpoint -checkdb_url = server_url + checkdb_endpoint +get_endpoints_urls = [] """ # Type of URL ratio - defaults @@ -171,6 +159,7 @@ def setup(): # Input : None # Output: None """ + global version global appName global cpuCount global directory @@ -191,11 +180,14 @@ def setup(): """ print "Exception -- Decoding of result from cpuinfo failed. Exiting" exit(1) + if result: if 'appName' in result: appName = result["appName"] if 'cpuCount' in result: cpuCount = result["cpuCount"] + if 'appVersion' in result: + version = result['appVersion'] print "Starting "+version + " in "+appName + " Mode" if(instance_id): directory = directory+"-"+appName+"-"+instance_id @@ -214,7 +206,7 @@ def setup(): os.makedirs(os.path.join(results_dir, directory)) return -def arg_parse(): +def main(): """ # Desc : Function to parse command line arguments # Input : None @@ -226,6 +218,12 @@ def arg_parse(): global multiple_instance global run_mode global no_graph + global no_db + global get_endpoints + global get_endpoints_urls + + get_endpoints = None + no_db = False print ("[%s] Parsing arguments." % (util.get_current_time())) parser = argparse.ArgumentParser() @@ -254,7 +252,7 @@ def arg_parse(): action="store", help='Number of rampup-rampdown requests to perform') parser.add_argument('-r','--run_mode',dest="run_mode", - action="store", + action="store", choices=[1, 2], type=int, help='1 for time based run. 2 for request based run. Default is 1') parser.add_argument('-int','--interval',dest="interval",action="store", help='Interval after which logging switches to next temp log file') @@ -282,6 +280,24 @@ def arg_parse(): parser.add_argument('-H', '--html', dest='html', default=False, action='store_true', help='Request HTML instead of JSON from server') + + parser.add_argument('-nd', '--no-db', action='store_true', + help='Skips all database loading and checking actions') + + parser.add_argument('-ge', '--get-endpoints', nargs='+', + help='Directly specific which endpoints to use during ' + 'GET operations (bypasses id, name, and zip ratios)') + + def header_check(value): + header = [ss.strip() for ss in value.split(':', 1)] + if len(header) != 2: + raise argparse.ArgumentTypeError('"%s" is not of the form ' + '"key: value"' % value) + return ': '.join(header) + + parser.add_argument('-hh', '--http-headers', nargs='+', type=header_check, + help='Extra HTTP headers to send to the server') + options = parser.parse_args() print('Input options config files: %s' % options.config) @@ -385,6 +401,26 @@ def arg_parse(): if "html" in json_data["client_params"]: use_html = json_data["client_params"]["html"] + if "no_db" in json_data["client_params"]: + no_db = str(json_data["client_params"]["no_db"]).lower() in ( + 'y', 'yes', + 't', 'true', + '1', + ) + + if "get_endpoints" in json_data["client_params"]: + get_endpoints = json_data["client_params"]["get_endpoints"] + + if "http_headers" in json_data["client_params"]: + try: + headers = [header_check(hh) + for hh in json_data["client_params"]["http_headers"]] + except Exception as e: + print 'Error: http_headers: %s' % e + sys.exit(1) + + http_headers.extend(headers) + #database setup parameters if "db_params" in json_data: if "dbrecord_count" in json_data["db_params"]: @@ -477,6 +513,15 @@ def arg_parse(): if options.html: use_html = options.html + if options.no_db: + no_db = options.no_db + + if options.get_endpoints: + get_endpoints = options.get_endpoints + + if options.http_headers: + http_headers.extend(options.http_headers) + server_url = "http://" + server_ipaddress + ":" + server_port + server_root_endpoint loaddb_url = server_url + loaddb_endpoint id_url = server_url + id_endpoint @@ -486,6 +531,9 @@ def arg_parse(): cpuinfo_url = server_url + cpuinfo_endpoint checkdb_url = server_url + checkdb_endpoint + if get_endpoints: + get_endpoints_urls = [server_url + endpoint for endpoint in get_endpoints] + if int(concurrency) > int(request): print "Warning -- concurrency cannot be greater than number of requests. Setting concurrency == number of requests" concurrency = request @@ -508,17 +556,26 @@ def run_printenv(log): print >> log, "# requests :"+ str(request) +" (Default value = 10000)" print >> log, "# concurrency :"+ str(concurrency) +" (Default value = 200)" print >> log, "# URLs :" +str(total_urls) +" (Default value = 100)" - print >> log, "# get url ratio:" + str(get_ratio) +" (Default value = 80)" - print >> log,"# post url ratio:"+ str(post_ratio) +" (Default value = 10)" - print >> log, "# delete url ratio:"+ str(delete_ratio)+" (Default value = 10)" - print >> log, "# id_url:"+ str(idurl_ratio) +" (Default value = 50)" - print >> log, "# name url ratio:"+ str(nameurl_ratio) +" (Default value = 25)" - print >> log,"# zip url ratio:"+ str(zipurl_ratio)+" (Default value = 25)" - print >> log, "====Database Parameters====" - print >> log, "# records :"+ str(dbrecord_count) +" (Default value = 10000)" - print >> log, "# unique name:" + str(name_dbratio) +" (Default value = 25)" - print >> log, "# unique zips:" + str(zip_dbratio) +" (Default value = 25)" - return + print >> log, "# Use HTML: %s (Default value = False)" % use_html + if http_headers: + print >> log, "# Extra HTTP headers:" + for hh in http_headers: + print >> log, "# ", hh + + if not get_endpoints_urls: + print >> log, "# get url ratio:%s (Default value = 80)" % get_ratio + print >> log, "# post url ratio:%s (Default value = 10)" % post_ratio + print >> log, "# delete url ratio:%s (Default value = 10)" % delete_ratio + print >> log, "# id_url:%s (Default value = 50)" % idurl_ratio + print >> log, "# name url ratio:%s (Default value = 25)" % nameurl_ratio + print >> log, "# zip url ratio:%s (Default value = 25)" % zipurl_ratio + + if not no_db: + print >> log, "====Database Parameters====" + print >> log, "# records :%s (Default value = 10000)" % dbrecord_count + print >> log, "# unique name:%s (Default value = 25)" % name_dbratio + print >> log, "# unique zips:%s (Default value = 25)" % zip_dbratio + def get_data(): """ @@ -530,10 +587,34 @@ def get_data(): # Input : None # Output: None """ - global employee_idlist #Populate database - run_loaddb() + if not no_db: + run_loaddb() + + if get_endpoints_urls: + generate_urls_from_list() + else: + generate_urls_from_db() + + if multiple_instance: + util.create_indicator_file(rundir,"loaddb_done", instance_id,"") + util.check_startfile(rundir) + #Send requests + send_request() + +def generate_urls_from_list(): + urls_count = len(get_endpoints_urls) + for ii in xrange(int(total_urls)): + urls_idx = random.randint(0, urls_count - 1) + urllist.append({ + 'url': get_endpoints_urls[urls_idx], + 'method':'GET'}) + + print "[%s] Building list of Urls done." % util.get_current_time() + +def generate_urls_from_db(): + global employee_idlist print ("[%s] Build list of employee IDs." % (util.get_current_time())) try: @@ -591,13 +672,7 @@ def get_data(): zip_number = int(math.ceil((int(get_urlcount)*float(float(zipurl_ratio)/100)))) #start building the url list - buildurllist(employee_idlist, id_number, name_matches, name_number , zip_matches, zip_number, post_urlcount,delete_urlcount) - if(multiple_instance): - util.create_indicator_file(rundir,"loaddb_done", instance_id,"") - util.check_startfile(rundir) - #Send requests - send_request(employee_idlist) - return + builddburllist(employee_idlist, id_number, name_matches, name_number , zip_matches, zip_number, post_urlcount,delete_urlcount) def run_loaddb(): """ @@ -658,7 +733,7 @@ def check_db(): return checkdb_dict -def buildurllist(employee_idlist, id_number, name_matches, name_number , zip_matches, zip_number,post_urlcount,delete_urlcount): +def builddburllist(employee_idlist, id_number, name_matches, name_number, zip_matches, zip_number,post_urlcount, delete_urlcount): """ # Desc :Function build list of URLs with enough randomness for realistic # behavior @@ -849,7 +924,7 @@ def collect_meminfo(): heapTotlist.append(0) return -def send_request(employee_idlist): +def send_request(): """ # Desc : Main function initiates requests to server # Input : List of EmployeeId @@ -894,12 +969,13 @@ def send_request(employee_idlist): requestBasedRun(pool) mem_process.join() - after_run = check_db() + if not no_db: + after_run = check_db() print_summary() log.close() return -def execute_request(pool): +def execute_request(pool, queue=None): """ # Desc : Creates threadpool for concurrency, and sends concurrent requests # to server for the input #requests or based on time interval. @@ -924,8 +1000,8 @@ def execute_request(pool): if(urllist[execute_request.url_index]['method']== 'GET'): url_type = 1 - tot_get = tot_get +1 - if not(parsed.path == "/employees/zipcode" or parsed.path == "/employees/name"): + tot_get = tot_get + 1 + if parsed.path == "/employees/id/": ids = getNextEmployeeId() url = url+ids if(urllist[execute_request.url_index]['method']== 'POST'): @@ -947,7 +1023,9 @@ def execute_request(pool): interval, run_mode, temp_log, - 'text/html' if use_html else 'application/json' + 'text/html' if use_html else 'application/json', + queue, + http_headers ] if(int(concurrency) == 1): @@ -992,9 +1070,14 @@ def timebased_run(pool): url_index = 0 request_index = 0 # Initializing the Request Counter to 0 + queue = Queue() + #Spin Another Process to do processing of Data - post_processing = Process(target=process_time_based_output,args=(log_dir,interval,rampup_rampdown,MT_interval,temp_log,output_file,memlogfile,instance_id, - multiple_instance,no_graph)) + post_processing = Process(target=process_time_based_output, + args=(log_dir, interval, rampup_rampdown, + MT_interval, temp_log, output_file, + memlogfile, instance_id, multiple_instance, + no_graph, queue)) post_processing.start() print ("[%s] Starting time based run." % (util.get_current_time())) if ramp: @@ -1009,14 +1092,14 @@ def timebased_run(pool): print ("[%s] Started processing of requests with concurrency of [%d] for [%d] seconds" % (util.get_current_time(), int(concurrency), int(MT_interval))) if ramp: while(time.time()-start < int(rampup_rampdown)): - execute_request(pool) + execute_request(pool, queue) print ("[%s] Exiting RampUp time window." %(util.get_current_time())) phase = "MT" util.record_start_time() start=time.time() print ("[%s] Entering Measuring time window." %(util.get_current_time())) while(time.time()-start < int(MT_interval)): - execute_request(pool) + execute_request(pool, queue) print ("[%s] Exiting Measuring time window." %(util.get_current_time())) util.record_end_time() phase = "RD" @@ -1024,20 +1107,23 @@ def timebased_run(pool): start=time.time() print ("[%s] Entering RampDown time window." %(util.get_current_time())) while(time.time()-start < int(rampup_rampdown)): - execute_request(pool) + execute_request(pool, queue) print ("[%s] Exiting RampDown time window." %(util.get_current_time())) phase = "SD" print ("[%s] Entering ShutDown time window." %(util.get_current_time())) else: while(time.time()-start < int(MT_interval)): - execute_request(pool) + execute_request(pool, queue) print ("[%s] Exiting Measuring time window." %(util.get_current_time())) phase = "SD" print ("[%s] Entering ShutDown time window." %(util.get_current_time())) print("[%s] All requests done." % (util.get_current_time())) file = open(os.path.join(log_dir,memlogind),"w") - file.close() + file.close() + pool.waitall() + node_dc_eis_testurls.clean_up_log(queue) processing_complete = True + queue.put(('EXIT',)) post_processing.join() def requestBasedRun(pool): @@ -1137,17 +1223,17 @@ def post_process_request_based_data(temp_log,output_file): print "\n====Report Summary====" print "Primary Metrics:" - print 'Response time 99 percentile = ' + str(round(percent,3)) + " " +version+" sec" - print 'Throughput = ' + str(round(throughput,2)) + " " +version+ " req/sec" + print 'Response time 99 percentile = %.3f sec' % percent + print 'Throughput = %.2f req/sec' % throughput print "--------------------------------------\n" print >> processed_file, "\n====Report Summary====" print >> processed_file, "Primary Metrics:" - print >> processed_file, 'Throughput = ' + str(round(throughput,2)) +" " +version+" req/sec" - print >> processed_file, '99 percentile = ' + str(round(percent,3)) +" " +version+" sec" + print >> processed_file, 'Throughput = %.2f req/sec' % throughput + print >> processed_file, '99 percentile = %.3f sec' % percent print >> processed_file, "\nDetailed summary:" - print >> processed_file, 'Min Response time = ' + str(round(minimum,3)) +" " +version+" sec" - print >> processed_file, 'Max Response time = ' + str(round(maximum,3)) +" " +version+ " sec" - print >> processed_file, 'Mean Response time = ' + str(round(mean,3)) +" " +version+" sec" + print >> processed_file, 'Min Response time = %.3f sec' % minimum + print >> processed_file, 'Max Response time = %.3f sec' % maximum + print >> processed_file, 'Mean Response time = %.3f sec' % mean logfile.close() processed_file.flush() @@ -1191,9 +1277,15 @@ def print_summary(): print "Exception -- Decoding of result from cpuinfo failed. Exiting" sys.exit(1) if result: + + print >> processed_file, "\n====System under test====" + + print >> processed_file, '====Application====' + print >> processed_file, 'App Mode:', appName + print >> processed_file, 'App Version', version + #hardware details if 'hw' in result: - print >> processed_file, "\n====System under test====" print >> processed_file, "\n====SUT Hardware Details====" if 'architecture' in result['hw']: architecture = result['hw']['architecture'] @@ -1279,27 +1371,38 @@ def print_summary(): print >> processed_file, "Bad Url Error = " + str(node_dc_eis_testurls.bad_url) print >> processed_file, "Static posts = " + str(node_dc_eis_testurls.static_post) print >> processed_file, "\n====Validation Report====" - print >> processed_file, "Database Validation:" - print >> processed_file, "Actual database record count: "+str(dbrecord_count) - print >> processed_file, "Database record count after loadDB: "+str(after_dbload["message"]) - print >> processed_file, "Database record count after the run: " +str(after_run["message"]) - print >> processed_file, "--------------------------------------" - print >> processed_file, "URL ratio Validation:" - print >> processed_file, "Total number of urls generated: " +str(count) - print >> processed_file, "Number of get urls generated: "+ str(int(id_count)+int(name_count)+int(zip_count)) +" ("+str(get_ratio)+"% of "+str(count)+")" - print >> processed_file, " Number of get id urls generated: " +str(id_count) +" ("+str(idurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" - print >> processed_file, " Number of get name urls generated: " +str(name_count) +" ("+str(nameurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" - print >> processed_file, " Number of get zip urls generated: " +str(zip_count) +" ("+str(zipurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" - print >> processed_file, "Number of post urls generated: " +str(post_count) +" ("+str(post_ratio)+"% of "+str(count)+")" - print >> processed_file, "Number of delete urls generated: " +str(delete_count) +" ("+str(delete_ratio)+"% of "+str(count)+")" + + if not no_db: + print >> processed_file, "Database Validation:" + print >> processed_file, "Actual database record count: ", dbrecord_count + print >> processed_file, "Database record count after loadDB: ", after_dbload["message"] + print >> processed_file, "Database record count after the run: ", after_run["message"] + print >> processed_file, "--------------------------------------" + + if get_endpoints_urls: + print >> processed_file, 'URL Validation:' + print >> processed_file, 'Endpoints:' + print >> processed_file, ' GET:', ', '.join(get_endpoints) + else: + print >> processed_file, "URL ratio Validation:" + print >> processed_file, "Total number of urls generated: " +str(count) + print >> processed_file, "Number of get urls generated: "+ str(int(id_count)+int(name_count)+int(zip_count)) +" ("+str(get_ratio)+"% of "+str(count)+")" + print >> processed_file, " Number of get id urls generated: " +str(id_count) +" ("+str(idurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" + print >> processed_file, " Number of get name urls generated: " +str(name_count) +" ("+str(nameurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" + print >> processed_file, " Number of get zip urls generated: " +str(zip_count) +" ("+str(zipurl_ratio)+"% of "+str(int(id_count)+int(name_count)+int(zip_count))+")" + print >> processed_file, "Number of post urls generated: " +str(post_count) +" ("+str(post_ratio)+"% of "+str(count)+")" + print >> processed_file, "Number of delete urls generated: " +str(delete_count) +" ("+str(delete_ratio)+"% of "+str(count)+")" + print >> processed_file, "--------------------------------------" + print >> processed_file, "Requests Validation:" print >> processed_file, "Total runtime duration: " +str(int(MT_interval)) print >> processed_file, "Total number of get requests: " +str(tot_get) print >> processed_file, "Total number of post requests: " +str(tot_post) print >> processed_file, "Total number of delete requests: " +str(tot_del) - processed_file.flush() + processed_file.flush() + processed_file.close() processed_file = open(processed_filename, "r") print processed_file.read() processed_file.close() @@ -1390,4 +1493,5 @@ def plot_graph_request_based_run(output_file): """ # Desc : This the main entry call. """ -arg_parse() +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/Node-DC-EIS-client/util.py b/Node-DC-EIS-client/util.py index f5877e4..daefee9 100644 --- a/Node-DC-EIS-client/util.py +++ b/Node-DC-EIS-client/util.py @@ -84,11 +84,12 @@ def create_indicator_file(rundir,file_name,instance_id,string_towrite): # string to be written in the new file created # Output: creates a new indicator file """ - print ("[%s] Creating indicator file." % (get_current_time())) - ind_file = open(os.path.join(rundir,file_name+str(instance_id)+".syncpt"),'w') - if string_towrite: - ind_file.write(string_towrite) - + print "[%s] Creating indicator file." % get_current_time() + with open(os.path.join(rundir, '%s%s.syncpt' % (file_name, instance_id)), + 'w') as ind_file: + if string_towrite: + ind_file.write(string_towrite) + def calculate_throughput(log_dir,concurrency,cpuCount): """