Skip to content

Commit

Permalink
Test runner supports dynamic shape model running on evb.
Browse files Browse the repository at this point in the history
- support dynamic model test on evb.
- config dump file in config.toml
  • Loading branch information
zhangyang2057 committed Aug 30, 2023
1 parent e5f39af commit f20a300
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 21 deletions.
3 changes: 3 additions & 0 deletions tests/config.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
name = 'default_config'
root = 'tests_output'
kmodel_name = 'test.kmodel'
desc_name = 'kmodel.desc'
infer_name = 'infer_report.json'
dump_hist = false
dump_infer = false

Expand Down
45 changes: 29 additions & 16 deletions tests/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,23 +14,14 @@ def data_shape_list_string(data):
return '\n'.join(map(lambda d: ' '.join(map(lambda x: str(x), d['model_shape'])), data))


def generate_kmodel_data_info(inputs, outputs, infer_dir):
input_shapes = data_shape_list_string(inputs)
output_shapes = data_shape_list_string(outputs)
# input_shapes = '\n'.join(map(lambda input: ' '.join(map(lambda x: str(x), input['model_shape'])), inputs))
s = f"{len(inputs)} {len(outputs)}\n{input_shapes}\n{output_shapes}"
with open(os.path.join(infer_dir, "kmodel.desc"), "w+") as f:
f.write(s)


class Inference:
def run_inference(self, compiler, target, ptq_enabled, infer_dir):
in_ci = test_utils.in_ci()
kpu_targets = test_utils.kpu_targets()
nuc_ip = test_utils.nuc_ip()
nuc_port = test_utils.nuc_port()
test_executable = test_utils.test_executable(target)
running_on_evb = in_ci and target in kpu_targets and nuc_ip is not None and nuc_port is not None and test_executable is not None and len(
running_on_evb = target in kpu_targets and nuc_ip is not None and nuc_port is not None and test_executable is not None and len(
self.inputs) > 0 and len(self.outputs) > 0

if self.cfg['dump_infer']:
Expand All @@ -46,16 +37,16 @@ def run_inference(self, compiler, target, ptq_enabled, infer_dir):

compiler.compile()
kmodel = compiler.gencode_tobytes()
if self.dynamic:
generate_kmodel_data_info(self.inputs, self.outputs, infer_dir)
os.makedirs(infer_dir, exist_ok=True)
if self.dynamic:
dump_kmodel_desc(os.path.join(infer_dir, self.cfg['desc_name']))
if not in_ci:
with open(os.path.join(infer_dir, 'test.kmodel'), 'wb') as f:
with open(os.path.join(infer_dir, self.cfg['kmodel_name']), 'wb') as f:
f.write(kmodel)

compile_opt = self.cfg['compile_opt']
if running_on_evb:
outputs = self.run_evb(target, kmodel, compile_opt)
outputs = self.run_evb(target, kmodel, compile_opt, infer_dir)
else:
sim = nncase.Simulator()
sim.load_model(kmodel)
Expand Down Expand Up @@ -86,6 +77,13 @@ def set_infer_input(self, sim, compile_opt):

sim.set_input_tensor(idx, nncase.RuntimeTensor.from_numpy(data))

def dump_kmodel_desc(file):
input_shapes = data_shape_list_string(self.inputs)
output_shapes = data_shape_list_string(self.outputs)
s = f"{len(self.inputs)} {len(self.outputs)}\n{input_shapes}\n{output_shapes}"
with open(file, "w+") as f:
f.write(s)

def dump_infer_output(self, sim, compile_opt, infer_dir):
outputs = []
for i in range(sim.outputs_size):
Expand All @@ -105,7 +103,7 @@ def dump_infer_output(self, sim, compile_opt, infer_dir):
dump_txt_file(os.path.join(infer_dir, f'nncase_result_{i}.txt'), output)
return outputs

def run_evb(self, target, kmodel, compile_opt):
def run_evb(self, target, kmodel, compile_opt, infer_dir):
ip = test_utils.nuc_ip()
port = test_utils.nuc_port()
test_executable = test_utils.test_executable(target)
Expand All @@ -127,6 +125,7 @@ def run_evb(self, target, kmodel, compile_opt):
header_dict['app'] = 1
header_dict['kmodel'] = 1
header_dict['inputs'] = len(self.inputs)
header_dict['description'] = 1 if self.dynamic else 0
header_dict['outputs'] = len(self.outputs)
client_socket.sendall(json.dumps(header_dict).encode())

Expand All @@ -142,7 +141,7 @@ def run_evb(self, target, kmodel, compile_opt):

# send kmodel
dummy = client_socket.recv(1024)
file_dict['file_name'] = 'test.kmodel'
file_dict['file_name'] = self.cfg['kmodel_name']
file_dict['file_size'] = len(kmodel)
client_socket.sendall(json.dumps(file_dict).encode())
dummy = client_socket.recv(1024)
Expand All @@ -159,6 +158,17 @@ def run_evb(self, target, kmodel, compile_opt):
dummy = client_socket.recv(1024)
client_socket.sendall(data.tobytes())

# send kmodel.desc
if self.dynamic:
dummy = client_socket.recv(1024)
desc_file = os.path.join(infer_dir, self.cfg['desc_name'])
file_dict['file_name'] = os.path.basename(desc_file)
file_dict['file_size'] = os.path.getsize(desc_file)
client_socket.sendall(json.dumps(file_dict).encode())
dummy = client_socket.recv(1024)
with open(desc_file, 'rb') as f:
client_socket.sendall(f.read())

# get infer result
outputs = []
result_dict = {}
Expand Down Expand Up @@ -187,6 +197,9 @@ def run_evb(self, target, kmodel, compile_opt):

output = np.frombuffer(buffer, dtype=self.outputs[i]['dtype'])
outputs.append(output)
if not test_utils.in_ci():
dump_bin_file(os.path.join(infer_dir, f'nncase_result_{i}.bin'), output)
dump_txt_file(os.path.join(infer_dir, f'nncase_result_{i}.txt'), output)
client_socket.sendall(f"recv nncase_result_{i}.bin succeed".encode())

client_socket.close()
Expand Down
5 changes: 3 additions & 2 deletions tests/nuc_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,8 @@ def recv_worker(conn, target):
target.logger.info("test case = {0}".format(new_case))
case_dir = os.path.join(target.nfs_dir, new_case)
os.makedirs(case_dir)
file_num = header_dict['app'] + header_dict['kmodel'] + header_dict['inputs']
file_num = header_dict['app'] + header_dict['kmodel'] + \
header_dict['inputs'] + header_dict['description']

# recv all kinds of files(app + kmodel + inputs)
cmds = f'cd {target.working_dir}/{target.name}/{new_case};./'
Expand Down Expand Up @@ -166,7 +167,7 @@ def infer_worker(target):
target.s0.run_cmd('reboot')
time.sleep(20)
else:
dict['time'] = float(ret.split('\n')[1].split()[1])
dict['time'] = float(ret.split('\n')[-2].split()[1])
conn.sendall(json.dumps(dict).encode())
dummy = conn.recv(1024)

Expand Down
2 changes: 1 addition & 1 deletion tests/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def __init__(self, case_name, override_cfg: str = None) -> None:
self.dynamic = False

if self.cfg['dump_infer']:
self.infer_file = test_utils.infer_file()
self.infer_file = test_utils.infer_file(self.cfg['infer_name'])
self.infer_dict = {
'case': 'unknown',
'target': 'cpu',
Expand Down
4 changes: 2 additions & 2 deletions tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,5 +65,5 @@ def test_executable(target):
return os.getenv('TEST_EXECUTABLE_{0}'.format(target.upper()))


def infer_file():
return os.getenv('INFER_FILE', 'infer_report.json')
def infer_file(file):
return os.getenv('INFER_FILE', file)

0 comments on commit f20a300

Please sign in to comment.