diff --git a/data_prep/introspector.py b/data_prep/introspector.py index 9972f172ae..d472b1b42a 100755 --- a/data_prep/introspector.py +++ b/data_prep/introspector.py @@ -732,7 +732,7 @@ def populate_benchmarks_using_introspector(project: str, language: str, if len(potential_benchmarks) >= (limit * len(target_oracles)): break - logger.info("Length of potential targets: %d" % (len(potential_benchmarks))) + logger.info('Length of potential targets: %d', len(potential_benchmarks)) return potential_benchmarks diff --git a/run_all_experiments.py b/run_all_experiments.py index bcf1a9cdc8..b24394fbc6 100755 --- a/run_all_experiments.py +++ b/run_all_experiments.py @@ -54,7 +54,8 @@ TIME_STAMP_FMT = '%Y-%m-%d %H:%M:%S' LOG_LEVELS = {'debug', 'info'} -LOG_FMT = '%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s' +LOG_FMT = ('%(asctime)s.%(msecs)03d %(levelname)s ' + '%(module)s - %(funcName)s: %(message)s') class Result: @@ -109,8 +110,8 @@ def get_experiment_configs( benchmark_yamls = [] if args.benchmark_yaml: logger.info( - f'A benchmark yaml file ({args.benchmark_yaml}) is provided. ' - f'Will use it and ignore the files in {args.benchmarks_directory}.') + 'A benchmark yaml file %s is provided. Will use it and ignore ' + 'the files in %s.', args.benchmark_yaml, args.benchmarks_directory) benchmark_yamls = [args.benchmark_yaml] else: if args.generate_benchmarks: @@ -370,8 +371,8 @@ def main(): experiment_configs = get_experiment_configs(args) experiment_results = [] - logger.info(f'Running %s experiment(s) in parallels of %s.', - len(experiment_configs), str(NUM_EXP)) + logger.info('Running %d experiment(s) in parallels of %d.', + len(experiment_configs), NUM_EXP) if NUM_EXP == 1: for config in experiment_configs: result = run_experiments(*config) diff --git a/run_one_experiment.py b/run_one_experiment.py index 5a4bd13914..24535449c3 100644 --- a/run_one_experiment.py +++ b/run_one_experiment.py @@ -23,7 +23,6 @@ from data_prep import project_targets from data_prep.project_context.context_introspector import ContextRetriever -from experiment import benchmark as benchmarklib from experiment import builder_runner as builder_runner_lib from experiment import evaluator as exp_evaluator from experiment import oss_fuzz_checkout @@ -87,8 +86,8 @@ def generate_targets(benchmark: Benchmark, builder: prompt_builder.PromptBuilder, debug: bool = DEBUG) -> list[str]: """Generates fuzz target with LLM.""" - logger.info(f'Generating targets for {benchmark.project} ' - f'{benchmark.function_signature} using {model.name}..') + logger.info('Generating targets for %s %s using %s..', benchmark.project, + benchmark.function_signature, model.name) model.query_llm(prompt, response_dir=work_dirs.raw_targets, log_output=debug) _, target_ext = os.path.splitext(benchmark.target_path) @@ -108,9 +107,9 @@ def generate_targets(benchmark: Benchmark, if generated_targets: targets_relpath = map(os.path.relpath, generated_targets) targets_relpath_str = '\n '.join(targets_relpath) - logger.info(f'Generated:\n {targets_relpath_str}') + logger.info('Generated:\n %s', targets_relpath_str) else: - logger.info(f'Failed to generate targets: {generated_targets}') + logger.info('Failed to generate targets: %s', generated_targets) return generated_targets