Skip to content

Commit

Permalink
Merge pull request #64 from Node-DC/revert-62-master
Browse files Browse the repository at this point in the history
Revert " Adding support for Node-DC-SSR"
  • Loading branch information
uttampawar authored Sep 29, 2017
2 parents 42efcff + 4213d2e commit f07e14e
Show file tree
Hide file tree
Showing 6 changed files with 162 additions and 314 deletions.
39 changes: 20 additions & 19 deletions Node-DC-EIS-client/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
Copyright (c) 2016 Intel Corporation
Copyright (c) 2016 Intel Corporation

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -10,29 +10,30 @@ Copyright (c) 2016 Intel Corporation
See the License for the specific language governing permissions and
limitations under the License.

# This client directory contains,
- runspec.py- A toplevel runspec script; the main script to launch workload.
- 2 modes: time based run which is the default(1) and request based run (2)
- node_els-testurls.py - The benchmark driver file which sends actual requests using python's requests module.
- config.json - The input client configuration file. This file is used by runspec.py when run with -f option
- config-ssr.json - The input client configuration file for SSR workloads. This file is used by runspec.py when run with -f option
- process_time_based_output.py - Script to post process and summarize time based run data
- Creates temporary log files for every given interval which is post processed to create a summary.
- summary_file_sample.txt - Sample file of the summary file that is generated after a run.
- a results sub directory will be created after the run which contains all the result directories which are designated by the date and timestamp.

This client directory contains,
- runspec.py- A toplevel runspec script; the main script to launch workload.
-2 modes: time based run which is the default(1) and request based run (2)
- node_els-testurls.py - The benchmark driver file which sends actual requests using python's requests module.
- config.json - The input client configuration file. This file is used by runspec.py when run with -f option
- process_time_based_output.py - Script to post process and summarize time based run data
- Creates temporary log files for every given interval which is post processed to create a summary.
- summary_file_sample.txt - Sample file of the summary file that is generated after a run.
- a results sub directory will be created after the run which contains all the result directories which are designated by the date and timestamp.


## Client help:
- Run the main script "python runspec.py <optional parameters example -t, -n, -c ,-h>".
- You will need to change the IP address and port of your server in config.json or config-ssr.json.
- Takes additional command line parameters.
- Default parameters in the script or can be read from a configuration file with -f/--config option (command line has the maximum priority).
- h gives the available options.
- Configurable option -g or --showgraph for graph generation(1 to generate output graphs or 0 for no graph)
- This script sends requests using python's requests module, generates log file and generates output graphs.
- Run the main script “python runspec.py <optional parameters example -t, -n, -c ,-h>”.
- You will need to change the IP address and port of your server in ‘runspec.py’ or in config.json.
- Takes additional command line parameters.
- Default parameters in the script or can be read from a configuration file with -f/--config option (command line has the maximum priority).
- h gives the available options.
- Configurable option -g or --showgraph for graph generation(1 to generate output graphs or 0 for no graph)
- the server ip address and port can be changed in config.json or directly in runspec.py
- This script sends requests using python's requests module, generates log file and generates output graphs.


## Client result files
- A temporary log file (request-based-run) which contains details like request-number,write-time, read-time, response-time for every request.
- A summary file (RTdata) which has client information, database information and a summary of the run (throughput, min max,average response time).
- Two output graphs; one is the throughput graph and the second is the latency graph.

29 changes: 0 additions & 29 deletions Node-DC-EIS-client/config-ssr.json

This file was deleted.

83 changes: 33 additions & 50 deletions Node-DC-EIS-client/node_dc_eis_testurls.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import os
import urlparse
import re
import sys
from functools import partial
from eventlet.green import urllib2
from eventlet.green import socket
Expand Down Expand Up @@ -64,6 +63,7 @@
#globals to implement file optimization
start_time = 0
file_cnt = 0
log=""

ip_cache = {}
def get_ip(hostname):
Expand All @@ -83,7 +83,7 @@ def get_ip(hostname):
ip_cache[hostname] = ip
return ip

def get_url(url, url_type, request_num, phase, accept_header, http_headers):
def get_url(url, url_type, request_num, log, phase, accept_header):
"""
# Desc : Function to send get requests to the server. Type 1 is get requests
# handles 3 types of GET requests based on ID, last_name and zipcode.
Expand All @@ -108,13 +108,13 @@ def get_url(url, url_type, request_num, phase, accept_header, http_headers):

req_path = '{}{}'.format(urlo.path, query)

req = '''GET {} HTTP/1.1\r
Host: {}\r
Accept: {}\r
Connection: close\r
{}\r
'''.format(req_path, urlo.netloc,
accept_header, ''.join(hh + '\r\n' for hh in http_headers))
req = '''GET {} HTTP/1.1
Host: {}
User-Agent: runspec/0.9
Accept: {}
Connection: close
'''.format(req_path, urlo.netloc, accept_header)

try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Expand Down Expand Up @@ -183,7 +183,7 @@ def post_function(url, post_data):
print e
return r

def post_url(url, url_type, request_num, phase):
def post_url(url,url_type,request_num,log,phase):
"""
# Desc : Function to send post requests to the server. Type 2 is post requests
# Retries if the post request fails
Expand Down Expand Up @@ -238,7 +238,7 @@ def post_url(url, url_type, request_num, phase):
util.printlog(log,phase,url_type,request_num,url,start,end,response_time,total_length)
return

def delete_url(url, url_type, request_num, phase):
def delete_url(url,url_type,request_num,log,phase):
"""
# Desc : Function to send delete requests to the server. Type 3 is delete requests
# also captures the data record being deleted and saves it in a list(post/_datalist)
Expand Down Expand Up @@ -272,7 +272,6 @@ def delete_url(url, url_type, request_num, phase):
if 'employee' in response:
post_datalist.insert(front_oflist,response)
else:
print url
print "Warning : Record not found"
start = time.time()
r = s.delete(url, headers=headers)
Expand Down Expand Up @@ -336,24 +335,8 @@ def calculate_len_postdel(response):
total_length = header_len + content_len
return total_length

def open_log(log_dir):
try:
log = open(os.path.join(log_dir, "tempfile_" + str(file_cnt)), "w")
except IOError:
print "[%s] Could not open templog file for writing." % (util.get_current_time())
sys.exit(1)

return log

def clean_up_log(queue):
''' Used to clean up last log file '''
global log
log.close()
queue.put(('PROCESS', log.name, file_cnt))
log = None

def main_entry(url, request_num, url_type, log_dir, phase, interval,
run_mode, temp_log, accept_header, queue, http_headers):
run_mode, temp_log, accept_header):
"""
# Desc : main entry function to determine the type of url - GET,POST or DELETE
# creates log file which captures per request data depending on the type of run.
Expand All @@ -365,36 +348,36 @@ def main_entry(url, request_num, url_type, log_dir, phase, interval,
"""
global start_time
global file_cnt
global init
global log
global init
if run_mode == 1:
if not init:
start_time = time.time();
log = open_log(log_dir)
init = True

if time.time() - start_time > float(interval):
old_log = log
old_file_cnt = file_cnt
file_cnt += 1
start_time=time.time();
try:
log = open(os.path.join(log_dir,"tempfile_"+str(file_cnt)),"w")
init = True
except IOError:
print ("[%s] Could not open templog file for writing." % (util.get_current_time()))
sys.exit(1)
if(time.time()-start_time > float(interval)):
file_cnt +=1
start_time = time.time()

log = open_log(log_dir)

old_log.close()
queue.put(('PROCESS', old_log.name, old_file_cnt))

try:
log = open(os.path.join(log_dir,"tempfile_"+str(file_cnt)),"w")
except IOError:
print ("[%s] Could not open templog file for writing." % (util.get_current_time()))
sys.exit(1)
else:
try:
log = open(os.path.join(log_dir, temp_log), "a")
except IOError:
print "Error: %s File not found." % temp_log
log = open(os.path.join(log_dir,temp_log), "a")
except IOError as e:
print("Error: %s File not found." % temp_log)
sys.exit(1)

if url_type == 1:
get_url(url, url_type, request_num, phase, accept_header, http_headers)
get_url(url,url_type,request_num, log, phase, accept_header)
if url_type == 2:
post_url(url, url_type, request_num, phase)
post_url(url,url_type,request_num,log,phase)
if url_type == 3:
delete_url(url, url_type, request_num, phase)
delete_url(url,url_type,request_num,log,phase)

54 changes: 26 additions & 28 deletions Node-DC-EIS-client/process_time_based_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@
percent99 = 0
percent95 = 0

def process_tempfile(results_dir, interval, rampup_rampdown, request,
temp_log, instance_id, multiple_instance, queue):
def process_tempfile(results_dir,interval,rampup_rampdown,request,temp_log,instance_id,multiple_instance):
"""
# Desc : Function to process each intermediate files.
# waits for interval and then calls process_data on the next templog file
Expand All @@ -42,33 +41,33 @@ def process_tempfile(results_dir, interval, rampup_rampdown, request,
# total time for the measurement, instance ID, flag to check multiple insatnce run
# Output: None
"""
number_of_files = int(math.ceil((2.0 * rampup_rampdown + request) / interval))

file_cnt=0
try:
temp_log = open(os.path.join(results_dir, temp_log),"a")
temp_log = open(os.path.join(results_dir,temp_log),"a")
except IOError:
print "[%s] Could not open templog file for writing." % util.get_current_time()
sys.exit(1)

print ("[%s] Could not open templog file for writing." % (util.get_current_time()))
temp_log.flush()

while True:
event = queue.get()
if event[0] == 'EXIT':
break
_, tempfile, file_cnt = event
try:
temp_file = open(tempfile, "r")
except IOError:
print "[%s] Could not open %s file for reading." % (util.get_current_time(), tempfile)
sys.exit(1)

with temp_file:
print "[%s] Processing Output File tempfile_[%d]." % (util.get_current_time(), file_cnt)
process_data(temp_file, temp_log, results_dir, file_cnt, interval)

if file_cnt == 0 and multiple_instance:
util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)), "start_processing", instance_id, temp_log.name)
os.remove(tempfile)
time.sleep(60)
while file_cnt < number_of_files:
tempfile = os.path.join(results_dir,"tempfile_"+str(file_cnt))
if(os.path.exists(tempfile)):
time.sleep(interval)
try:
temp_file = open(tempfile,"r")
print ("[%s] Processing Output File tempfile_[%d]." % (util.get_current_time(),file_cnt))
process_data(temp_file,temp_log,results_dir,file_cnt,interval)
temp_file.close()
if(file_cnt == 0 and multiple_instance):
util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)),"start_processing", instance_id, temp_log.name)
os.remove(tempfile)
file_cnt +=1
except IOError:
print ("[%s] Could not open templog file for reading." % (util.get_current_time()))
sys.exit(1)
else:
time.sleep(interval)

print ("[%s] Closing main templog file." % (util.get_current_time()))
temp_log.close()
Expand Down Expand Up @@ -281,7 +280,7 @@ def post_process(temp_log,output_file,results_dir,interval,memlogfile,no_graph):
print("\nThe memory usage graph is located at " +os.path.abspath(os.path.join(results_dir,'memory_usage.png')))
print ("[%s] Plotting graphs done." % (util.get_current_time()))

def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_log,output_file,memlogfile,instance_id,multiple_instance,no_graph, queue):
def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_log,output_file,memlogfile,instance_id,multiple_instance,no_graph):
"""
# Desc : Main function which handles all the Output Processing
# This function is run by the Child Function
Expand All @@ -291,8 +290,7 @@ def process_time_based_output(results_dir,interval,rampup_rampdown,request,temp_
# Output: None
"""
print ("[%s] Starting process for post processing." % (util.get_current_time()))
process_tempfile(results_dir, interval, rampup_rampdown, request, temp_log,
instance_id, multiple_instance, queue)
process_tempfile(results_dir,interval,rampup_rampdown,request,temp_log,instance_id,multiple_instance)
if multiple_instance:
util.create_indicator_file(os.path.dirname(os.path.dirname(results_dir)),"done_processing", instance_id, "")
# #Post Processing Function
Expand Down
Loading

0 comments on commit f07e14e

Please sign in to comment.