Skip to content

Commit

Permalink
Feature/#550 (#551)
Browse files Browse the repository at this point in the history
* add simple_ollama.yaml

* fix few YAML file to the latest

* resolve YAML error at config_korean.yaml

---------

Co-authored-by: jeffrey <[email protected]>
  • Loading branch information
vkehfdl1 and jeffrey authored Jun 30, 2024
1 parent 8850fa4 commit be0e225
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 18 deletions.
30 changes: 14 additions & 16 deletions sample_config/config_korean.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,16 @@ node_lines:
embedding_model: openai
- module_type: bm25
bm25_tokenizer: ko_kiwi
top_k: 3
top_k: 20
strategy:
metrics:
- retrieval_f1
- retrieval_recall
- retrieval_precision
metrics: [ retrieval_recall, retrieval_precision, retrieval_map ]
- node_type: passage_reranker
modules:
- module_type: koreranker
- module_type: pass_reranker
- module_type: cohere_reranker
strategy:
metrics: [ retrieval_f1, retrieval_recall, retrieval_precision ]
metrics: [ retrieval_recall, retrieval_precision, retrieval_map ]
top_k: 3
- node_line_name: post_retrieve_node_line
nodes:
Expand All @@ -30,21 +27,22 @@ node_lines:
strategy:
generator_modules:
- batch: 2
llm: openai
module_type: llama_index_llm
module_type: openai_llm
metrics:
- bleu
- meteor
- rouge
- metric_name: rouge
- embedding_model: openai
metric_name: sem_score
- metric_name: bert_score
lang: ko
- modules:
- batch: 2
llm: openai
model: gpt-4
module_type: llama_index_llm
llm: gpt-4o
module_type: openai_llm
node_type: generator
strategy:
metrics:
- metric_name: bleu
- metric_name: meteor
- metric_name: rouge
- embedding_model: openai
metric_name: sem_score
- metric_name: bert_score
lang: ko
4 changes: 2 additions & 2 deletions sample_config/simple_local.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ node_lines:
nodes:
- node_type: prompt_maker
strategy:
metrics: [bleu, meteor, rouge]
metrics: [ meteor, rouge, bert_score ]
modules:
- module_type: fstring
prompt: "Read the passages and answer the given question. \n Question: {query} \n Passage: {retrieved_contents} \n Answer : "
- node_type: generator
strategy:
metrics: [bleu, meteor, rouge]
metrics: [ meteor, rouge, bert_score ]
modules:
- module_type: vllm
llm: mistralai/Mistral-7B-Instruct-v0.2
49 changes: 49 additions & 0 deletions sample_config/simple_ollama.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
node_lines:
- node_line_name: retrieve_node_line
nodes:
- node_type: retrieval
strategy:
metrics: [ retrieval_f1, retrieval_recall, retrieval_precision ]
top_k: 3
modules:
- module_type: bm25
- module_type: vectordb
embedding_model: huggingface_all_mpnet_base_v2
- module_type: hybrid_rrf
target_modules: ('bm25', 'vectordb')
rrf_k: [ 3, 5, 10 ]
- module_type: hybrid_cc
target_modules: ('bm25', 'vectordb')
weights:
- (0.5, 0.5)
- (0.3, 0.7)
- (0.7, 0.3)
- module_type: hybrid_rsf
target_modules: ('bm25', 'vectordb')
weights:
- (0.5, 0.5)
- (0.3, 0.7)
- (0.7, 0.3)
- module_type: hybrid_dbsf
target_modules: ('bm25', 'vectordb')
weights:
- (0.5, 0.5)
- (0.3, 0.7)
- (0.7, 0.3)
- node_line_name: post_retrieve_node_line
nodes:
- node_type: prompt_maker
strategy:
metrics: [ meteor, rouge, bert_score ]
modules:
- module_type: fstring
prompt: "Read the passages and answer the given question. \n Question: {query} \n Passage: {retrieved_contents} \n Answer : "
- node_type: generator
strategy:
metrics: [ meteor, rouge, bert_score ]
modules:
- module_type: llama_index_llm
llm: ollama
model: llama3
temperature: [ 0.1, 0.5, 1.0 ]
batch: 1

0 comments on commit be0e225

Please sign in to comment.