From cd61ea09dfacfe9969062e739476f98a8e168b9b Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Thu, 19 Dec 2024 13:58:17 +0000 Subject: [PATCH] Update llama-run to include temperature option This commit updates the `examples/run/README.md` file to include a new option for setting the temperature and updates the `run.cpp` file to parse this option. Signed-off-by: Eric Curtin --- examples/run/README.md | 2 ++ examples/run/run.cpp | 63 ++++++++++++++++++++++++++++++++---------- 2 files changed, 51 insertions(+), 14 deletions(-) diff --git a/examples/run/README.md b/examples/run/README.md index 874293516f4b6..a0680544120b9 100644 --- a/examples/run/README.md +++ b/examples/run/README.md @@ -19,6 +19,8 @@ Options: Context size (default: 2048) -n, --ngl Number of GPU layers (default: 0) + --temp + Temperature (default: 0.8) -v, --verbose, --log-verbose Set verbosity level to infinity (i.e. log all messages, useful for debugging) -h, --help diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 03da54ca3b2ef..9059e94c45854 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -55,6 +55,19 @@ static int printe(const char * fmt, ...) { class Opt { public: int init(int argc, const char ** argv) { + ctx_params_ = llama_context_default_params(); + model_params_ = llama_model_default_params(); + context_size_default = ctx_params_.n_batch; + ngl_default = model_params_.n_gpu_layers; + common_params_sampling sampling; + temperature_default = sampling.temp; + + if (argc < 2) { + printe("Error: No arguments provided.\n"); + help(); + return 1; + } + // Parse arguments if (parse(argc, argv)) { printe("Error: Failed to parse arguments.\n"); @@ -68,15 +81,24 @@ class Opt { return 2; } + ctx_params_.n_batch = context_size_ >= 0 ? context_size_ : context_size_default; + model_params_.n_gpu_layers = ngl_ >= 0 ? ngl_ : ngl_default; + temperature_ = temperature_ >= 0 ? temperature_ : temperature_default; + return 0; // Success } + llama_context_params ctx_params_; + llama_model_params model_params_; std::string model_; - std::string user_; - int context_size_ = -1, ngl_ = -1; + std::string user_; + int context_size_ = -1, ngl_ = -1; + float temperature_ = -1; bool verbose_ = false; private: + int context_size_default = -1, ngl_default = -1; + float temperature_default = -1; bool help_ = false; bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) { @@ -89,6 +111,17 @@ class Opt { } option_value = std::atoi(argv[++i]); + + return 0; + } + + int handle_option_with_value(int argc, const char ** argv, int & i, float & option_value) { + if (i + 1 >= argc) { + return 1; + } + + option_value = std::atof(argv[++i]); + return 0; } @@ -103,6 +136,10 @@ class Opt { if (handle_option_with_value(argc, argv, i, ngl_) == 1) { return 1; } + } else if (options_parsing && strcmp(argv[i], "--temperature") == 0) { + if (handle_option_with_value(argc, argv, i, temperature_) == 1) { + return 1; + } } else if (options_parsing && (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) { verbose_ = true; @@ -142,6 +179,8 @@ class Opt { " Context size (default: %d)\n" " -n, --ngl \n" " Number of GPU layers (default: %d)\n" + " --temp \n" + " Temperature (default: %.1f)\n" " -v, --verbose, --log-verbose\n" " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n" " -h, --help\n" @@ -170,7 +209,7 @@ class Opt { " llama-run file://some-file3.gguf\n" " llama-run --ngl 999 some-file4.gguf\n" " llama-run --ngl 999 some-file5.gguf Hello World\n", - llama_context_default_params().n_batch, llama_model_default_params().n_gpu_layers); + context_size_default, ngl_default, temperature_default); } }; @@ -495,12 +534,12 @@ class LlamaData { return 1; } - context = initialize_context(model, opt.context_size_); + context = initialize_context(model, opt); if (!context) { return 1; } - sampler = initialize_sampler(); + sampler = initialize_sampler(opt); return 0; } @@ -619,14 +658,12 @@ class LlamaData { // Initializes the model and returns a unique pointer to it llama_model_ptr initialize_model(Opt & opt) { ggml_backend_load_all(); - llama_model_params model_params = llama_model_default_params(); - model_params.n_gpu_layers = opt.ngl_ >= 0 ? opt.ngl_ : model_params.n_gpu_layers; resolve_model(opt.model_); printe( "\r%*s" "\rLoading model", get_terminal_width(), " "); - llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), model_params)); + llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), opt.model_params_)); if (!model) { printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str()); } @@ -636,10 +673,8 @@ class LlamaData { } // Initializes the context with the specified parameters - llama_context_ptr initialize_context(const llama_model_ptr & model, const int n_ctx) { - llama_context_params ctx_params = llama_context_default_params(); - ctx_params.n_ctx = ctx_params.n_batch = n_ctx >= 0 ? n_ctx : ctx_params.n_batch; - llama_context_ptr context(llama_new_context_with_model(model.get(), ctx_params)); + llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) { + llama_context_ptr context(llama_new_context_with_model(model.get(), opt.ctx_params_)); if (!context) { printe("%s: error: failed to create the llama_context\n", __func__); } @@ -648,10 +683,10 @@ class LlamaData { } // Initializes and configures the sampler - llama_sampler_ptr initialize_sampler() { + llama_sampler_ptr initialize_sampler(const Opt & opt) { llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params())); llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1)); - llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(0.8f)); + llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(opt.temperature_)); llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED)); return sampler;