Skip to content

Commit

Permalink
fix CI warnings across several files (#513)
Browse files Browse the repository at this point in the history
1) Fix logging format issues
2) Fix unused import
3) Fix use of double quotes

---------

Signed-off-by: David Korczynski <[email protected]>
  • Loading branch information
DavidKorczynski authored Jul 27, 2024
1 parent ed02140 commit 8593e96
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 11 deletions.
2 changes: 1 addition & 1 deletion data_prep/introspector.py
Original file line number Diff line number Diff line change
Expand Up @@ -732,7 +732,7 @@ def populate_benchmarks_using_introspector(project: str, language: str,

if len(potential_benchmarks) >= (limit * len(target_oracles)):
break
logger.info("Length of potential targets: %d" % (len(potential_benchmarks)))
logger.info('Length of potential targets: %d', len(potential_benchmarks))

return potential_benchmarks

Expand Down
11 changes: 6 additions & 5 deletions run_all_experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@
TIME_STAMP_FMT = '%Y-%m-%d %H:%M:%S'

LOG_LEVELS = {'debug', 'info'}
LOG_FMT = '%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s'
LOG_FMT = ('%(asctime)s.%(msecs)03d %(levelname)s '
'%(module)s - %(funcName)s: %(message)s')


class Result:
Expand Down Expand Up @@ -109,8 +110,8 @@ def get_experiment_configs(
benchmark_yamls = []
if args.benchmark_yaml:
logger.info(
f'A benchmark yaml file ({args.benchmark_yaml}) is provided. '
f'Will use it and ignore the files in {args.benchmarks_directory}.')
'A benchmark yaml file %s is provided. Will use it and ignore '
'the files in %s.', args.benchmark_yaml, args.benchmarks_directory)
benchmark_yamls = [args.benchmark_yaml]
else:
if args.generate_benchmarks:
Expand Down Expand Up @@ -370,8 +371,8 @@ def main():
experiment_configs = get_experiment_configs(args)
experiment_results = []

logger.info(f'Running %s experiment(s) in parallels of %s.',
len(experiment_configs), str(NUM_EXP))
logger.info('Running %d experiment(s) in parallels of %d.',
len(experiment_configs), NUM_EXP)
if NUM_EXP == 1:
for config in experiment_configs:
result = run_experiments(*config)
Expand Down
9 changes: 4 additions & 5 deletions run_one_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@

from data_prep import project_targets
from data_prep.project_context.context_introspector import ContextRetriever
from experiment import benchmark as benchmarklib
from experiment import builder_runner as builder_runner_lib
from experiment import evaluator as exp_evaluator
from experiment import oss_fuzz_checkout
Expand Down Expand Up @@ -87,8 +86,8 @@ def generate_targets(benchmark: Benchmark,
builder: prompt_builder.PromptBuilder,
debug: bool = DEBUG) -> list[str]:
"""Generates fuzz target with LLM."""
logger.info(f'Generating targets for {benchmark.project} '
f'{benchmark.function_signature} using {model.name}..')
logger.info('Generating targets for %s %s using %s..', benchmark.project,
benchmark.function_signature, model.name)
model.query_llm(prompt, response_dir=work_dirs.raw_targets, log_output=debug)

_, target_ext = os.path.splitext(benchmark.target_path)
Expand All @@ -108,9 +107,9 @@ def generate_targets(benchmark: Benchmark,
if generated_targets:
targets_relpath = map(os.path.relpath, generated_targets)
targets_relpath_str = '\n '.join(targets_relpath)
logger.info(f'Generated:\n {targets_relpath_str}')
logger.info('Generated:\n %s', targets_relpath_str)
else:
logger.info(f'Failed to generate targets: {generated_targets}')
logger.info('Failed to generate targets: %s', generated_targets)
return generated_targets


Expand Down

0 comments on commit 8593e96

Please sign in to comment.