Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add redisearch vectorstore #1307

Merged
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
204 changes: 204 additions & 0 deletions docs/modules/indexes/vectorstore_examples/redisearch.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
{
"cells": [
{
"cell_type": "markdown",
"source": [
"# RediSearch\n",
"\n",
"This notebook shows how to use functionality related to the RediSearch database."
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
}
},
{
"cell_type": "code",
"execution_count": 1,
"outputs": [],
"source": [
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores.redisearch import RediSearch"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 3,
"outputs": [],
"source": [
"from langchain.document_loaders import TextLoader\n",
"loader = TextLoader('../../state_of_the_union.txt')\n",
"documents = loader.load()\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
"docs = text_splitter.split_documents(documents)\n",
"\n",
"embeddings = OpenAIEmbeddings()"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 4,
"outputs": [],
"source": [
"rds = RediSearch.from_documents(docs, embeddings,redisearch_url=\"redis://localhost:6379\")"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 5,
"outputs": [
{
"data": {
"text/plain": "'b564189668a343648996bd5a1d353d4e'"
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rds.index_name"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 6,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
"\n",
"We cannot let this happen. \n",
"\n",
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
"\n",
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
"\n",
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
"\n",
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
]
}
],
"source": [
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
"results = rds.similarity_search(query)\n",
"print(results[0].page_content)"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 7,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['doc:333eadf75bd74be393acafa8bca48669']\n"
]
}
],
"source": [
"print(rds.add_texts([\"Ankush went to Princeton\"]))"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 8,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Ankush went to Princeton\n"
]
}
],
"source": [
"query = \"Princeton\"\n",
"results = rds.similarity_search(query)\n",
"print(results[0].page_content)"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
192 changes: 192 additions & 0 deletions langchain/vectorstores/redisearch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
from __future__ import annotations

import json
import uuid
from typing import Any, Callable, Iterable, List, Mapping, Optional

import numpy as np
from redis.commands.search.query import Query

from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore


class RediSearch(VectorStore):
def __init__(
self, redisearch_url: str, index_name: str, embedding_function: Callable, **kwargs: Any
):
"""Initialize with necessary components."""
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
self.embedding_function = embedding_function
self.index_name = index_name
try:
redis_client = redis.from_url(redisearch_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
self.client = redis_client

def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
# `prefix`: Maybe in the future we can let the user choose the index_name.
prefix = "doc" # prefix for the document keys

ids = []
# Check if index exists
for i, text in enumerate(texts):
key = f"{prefix}:{uuid.uuid4().hex}"
metadata = metadatas[i] if metadatas else {}
self.client.hset(
xinqiu marked this conversation as resolved.
Show resolved Hide resolved
key,
mapping={
"content": text,
"content_vector": np.array(
self.embedding_function(text), dtype=np.float32
).tobytes(),
"metadata": json.dumps(metadata),
},
)
ids.append(key)
return ids

def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
# Creates embedding vector from user query
embedding = self.embedding_function(query)

# Prepare the Query
return_fields = ["metadata", "content", "vector_score"]
vector_field = "content_vector"
hybrid_fields = "*"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does the user have the option to change this here. I might be missing some logic but our users love this.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These fields are currently fixed, similar to ElasticVectorSearch.

/langchain/vectorstores/elastic_vector_search.py#L189

request = {
                "_op_type": "index",
                "_index": index_name,
                "vector": embeddings[i],
                "text": text,
                "metadata": metadata,
            }

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yea which is ok, but that's fixed for when metadata is a key. Redis allows for arbitrarily named keys to be passed in this argument.

Assuming the user created and used the index with langchain, this is totally fine, just thinking about use cases other than the typical. again, one thing we can contribute after the initial version too.

base_query = (
f"{hybrid_fields}=>[KNN {k} @{vector_field} $vector AS vector_score]"
)
redis_query = (
Query(base_query)
.return_fields(*return_fields)
.sort_by("vector_score")
.paging(0, k)
.dialect(2)
)
params_dict: Mapping[str, str] = {
"vector": str(np.array(embedding).astype(dtype=np.float32).tobytes())
xinqiu marked this conversation as resolved.
Show resolved Hide resolved
}

# perform vector search
results = self.client.ft(self.index_name).search(redis_query, params_dict)

documents = [
Document(page_content=result.content, metadata=json.loads(result.metadata))
for result in results.docs
]

return documents

@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
**kwargs: Any,
) -> RediSearch:
"""Construct RediSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the RediSearch instance.
3. Adds the documents to the newly created RediSearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import RediSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redisearch_url="redis://username:password@localhost:6379"
)
"""
redisearch_url = get_from_dict_or_env(
kwargs, "redisearch_url", "REDISEARCH_URL"
)
try:
import redis
from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

show pinned version? might be difficult unless it's a set constant. Slight maintenance burden.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I referred to other code under 'vectorstores', but no specific version was given in the error prompt.

)
try:
# We need to first remove redisearch_url from kwargs, otherwise passing it to Redis will result in an error.
kwargs.pop("redisearch_url")
client = redis.from_url(url=redisearch_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
embeddings = embedding.embed_documents(texts)
dim = len(embeddings[0])
# Constants
vector_number = len(embeddings) # initial number of vectors
# name of the search index if not given
if not index_name:
index_name = uuid.uuid4().hex
prefix = "doc" # prefix for the document keys
distance_metric = (
"COSINE" # distance metric for the vectors (ex. COSINE, IP, L2)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Allow this to be changed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think default values can be used here. The purpose of this code is to create the corresponding data structure in Redis.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will work for now, but for many production use cases - users will need the ability to define and specify index name, prefix, and distance metrics. But that can come later as optional arguments.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@tylerhutcherson expressed the main point behind what I meant. users will want to use HNSW for larger indices and change distance metrics based on process and normalization. but for now, this is fine and awesome to see.

)
content = TextField(name="content")
metadata = TextField(name="metadata")
content_embedding = VectorField(
"content_vector",
"FLAT",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

allow this to be changed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think default values can be used here. The purpose of this code is to create the corresponding data structure in Redis.

{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": distance_metric,
"INITIAL_CAP": vector_number,
},
)
fields = [content, metadata, content_embedding]

# Check if index exists
try:
client.ft(index_name).info()
print("Index already exists")
except: # noqa
# Create RediSearch Index
client.ft(index_name).create_index(
fields=fields,
definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH),
)

pipeline = client.pipeline()
for i, text in enumerate(texts):
key = f"{prefix}:{str(uuid.uuid4().hex)}"
metadata = metadatas[i] if metadatas else {}
pipeline.hset(
key,
mapping={
"content": text,
"content_vector": np.array(
embeddings[i], dtype=np.float32
).tobytes(),
"metadata": json.dumps(metadata),
},
)
pipeline.execute()
return cls(redisearch_url, index_name, embedding.embed_query)
Loading