From ef174b935ed6863d4417095f7a0dfc4f0890ba6e Mon Sep 17 00:00:00 2001 From: Ravi Theja Date: Tue, 23 Jul 2024 22:24:08 +0530 Subject: [PATCH] Update LlamaIndex RAG tutorial --- docs/guides/basic-RAG.md | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/docs/guides/basic-RAG.md b/docs/guides/basic-RAG.md index 514f405..2349d6a 100644 --- a/docs/guides/basic-RAG.md +++ b/docs/guides/basic-RAG.md @@ -222,11 +222,10 @@ Visit our [community cookbook example](https://github.com/mistralai/cookbook/blo **Code:** ```python -from llama_index import VectorStoreIndex, SimpleDirectoryReader -from llama_index.llms import MistralAI -from llama_index.embeddings import MistralAIEmbedding -from llama_index import ServiceContext -from llama_index.query_engine import RetrieverQueryEngine +from llama_index.core import VectorStoreIndex, SimpleDirectoryReader +from llama_index.llms.mistralai import MistralAI +from llama_index.embeddings.mistralai import MistralAIEmbedding +from llama_index.core import Settings # Load data reader = SimpleDirectoryReader(input_files=["essay.txt"]) @@ -235,10 +234,10 @@ documents = reader.load_data() # Define LLM and embedding model llm = MistralAI(api_key=api_key, model="mistral-medium") embed_model = MistralAIEmbedding(model_name="mistral-embed", api_key=api_key) -service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) - +Settings.llm = llm +Settings.embed_model = embed_model # Create vector store index -index = VectorStoreIndex.from_documents(documents, service_context=service_context) +index = VectorStoreIndex.from_documents(documents) # Create query engine query_engine = index.as_query_engine(similarity_top_k=2)