diff --git a/sample_config/config_korean.yaml b/sample_config/config_korean.yaml index 9f5a8b4af..af7cdde93 100644 --- a/sample_config/config_korean.yaml +++ b/sample_config/config_korean.yaml @@ -7,19 +7,16 @@ node_lines: embedding_model: openai - module_type: bm25 bm25_tokenizer: ko_kiwi - top_k: 3 + top_k: 20 strategy: - metrics: - - retrieval_f1 - - retrieval_recall - - retrieval_precision + metrics: [ retrieval_recall, retrieval_precision, retrieval_map ] - node_type: passage_reranker modules: - module_type: koreranker - module_type: pass_reranker - module_type: cohere_reranker strategy: - metrics: [ retrieval_f1, retrieval_recall, retrieval_precision ] + metrics: [ retrieval_recall, retrieval_precision, retrieval_map ] top_k: 3 - node_line_name: post_retrieve_node_line nodes: @@ -30,21 +27,22 @@ node_lines: strategy: generator_modules: - batch: 2 - llm: openai - module_type: llama_index_llm + module_type: openai_llm metrics: - - bleu - - meteor - - rouge + - metric_name: rouge + - embedding_model: openai + metric_name: sem_score + - metric_name: bert_score + lang: ko - modules: - batch: 2 - llm: openai - model: gpt-4 - module_type: llama_index_llm + llm: gpt-4o + module_type: openai_llm node_type: generator strategy: metrics: - - metric_name: bleu - - metric_name: meteor + - metric_name: rouge - embedding_model: openai metric_name: sem_score + - metric_name: bert_score + lang: ko diff --git a/sample_config/simple_local.yaml b/sample_config/simple_local.yaml index 37dcb9b4d..3c547ac77 100644 --- a/sample_config/simple_local.yaml +++ b/sample_config/simple_local.yaml @@ -13,13 +13,13 @@ node_lines: nodes: - node_type: prompt_maker strategy: - metrics: [bleu, meteor, rouge] + metrics: [ meteor, rouge, bert_score ] modules: - module_type: fstring prompt: "Read the passages and answer the given question. \n Question: {query} \n Passage: {retrieved_contents} \n Answer : " - node_type: generator strategy: - metrics: [bleu, meteor, rouge] + metrics: [ meteor, rouge, bert_score ] modules: - module_type: vllm llm: mistralai/Mistral-7B-Instruct-v0.2 diff --git a/sample_config/simple_ollama.yaml b/sample_config/simple_ollama.yaml new file mode 100644 index 000000000..b4558a1e8 --- /dev/null +++ b/sample_config/simple_ollama.yaml @@ -0,0 +1,49 @@ +node_lines: + - node_line_name: retrieve_node_line + nodes: + - node_type: retrieval + strategy: + metrics: [ retrieval_f1, retrieval_recall, retrieval_precision ] + top_k: 3 + modules: + - module_type: bm25 + - module_type: vectordb + embedding_model: huggingface_all_mpnet_base_v2 + - module_type: hybrid_rrf + target_modules: ('bm25', 'vectordb') + rrf_k: [ 3, 5, 10 ] + - module_type: hybrid_cc + target_modules: ('bm25', 'vectordb') + weights: + - (0.5, 0.5) + - (0.3, 0.7) + - (0.7, 0.3) + - module_type: hybrid_rsf + target_modules: ('bm25', 'vectordb') + weights: + - (0.5, 0.5) + - (0.3, 0.7) + - (0.7, 0.3) + - module_type: hybrid_dbsf + target_modules: ('bm25', 'vectordb') + weights: + - (0.5, 0.5) + - (0.3, 0.7) + - (0.7, 0.3) + - node_line_name: post_retrieve_node_line + nodes: + - node_type: prompt_maker + strategy: + metrics: [ meteor, rouge, bert_score ] + modules: + - module_type: fstring + prompt: "Read the passages and answer the given question. \n Question: {query} \n Passage: {retrieved_contents} \n Answer : " + - node_type: generator + strategy: + metrics: [ meteor, rouge, bert_score ] + modules: + - module_type: llama_index_llm + llm: ollama + model: llama3 + temperature: [ 0.1, 0.5, 1.0 ] + batch: 1