id
stringlengths 14
16
| text
stringlengths 31
2.41k
| source
stringlengths 53
121
|
---|---|---|
eedabc04c945-10 | client: Optional[chromadb.Client] = None, # Add this line
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
)
[docs] def delete(self, ids: List[str]) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
self._collection.delete(ids=ids) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
69ea6f4346f3-0 | Source code for langchain.vectorstores.redis
"""Wrapper around Redis vector database."""
from __future__ import annotations
import json
import logging
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Mapping,
Optional,
Tuple,
Type,
)
import numpy as np
from pydantic import BaseModel, root_validator
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from redis.client import Redis as RedisType
from redis.commands.search.query import Query
# required modules
REDIS_REQUIRED_MODULES = [
{"name": "search", "ver": 20400},
{"name": "searchlight", "ver": 20400},
]
# distance mmetrics
REDIS_DISTANCE_METRICS = Literal["COSINE", "IP", "L2"]
def _check_redis_module_exist(client: RedisType, required_modules: List[dict]) -> None:
"""Check if the correct Redis modules are installed."""
installed_modules = client.module_list()
installed_modules = {
module[b"name"].decode("utf-8"): module for module in installed_modules
}
for module in required_modules:
if module["name"] in installed_modules and int(
installed_modules[module["name"]][b"ver"]
) >= int(module["ver"]):
return
# otherwise raise error
error_message = (
"Redis cannot be used as a vector database without RediSearch >=2.4" | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-1 | "Redis cannot be used as a vector database without RediSearch >=2.4"
"Please head to https://redis.io/docs/stack/search/quick_start/"
"to know more about installing the RediSearch module within Redis Stack."
)
logging.error(error_message)
raise ValueError(error_message)
def _check_index_exists(client: RedisType, index_name: str) -> bool:
"""Check if Redis index exists."""
try:
client.ft(index_name).info()
except: # noqa: E722
logger.info("Index does not exist")
return False
logger.info("Index already exists")
return True
def _redis_key(prefix: str) -> str:
"""Redis key schema for a given prefix."""
return f"{prefix}:{uuid.uuid4().hex}"
def _redis_prefix(index_name: str) -> str:
"""Redis key prefix for a given index."""
return f"doc:{index_name}"
def _default_relevance_score(val: float) -> float:
return 1 - val
[docs]class Redis(VectorStore):
"""Wrapper around Redis vector database.
To use, you should have the ``redis`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Redis(
redis_url="redis://username:password@localhost:6379"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
"""
def __init__(
self,
redis_url: str,
index_name: str,
embedding_function: Callable, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-2 | index_name: str,
embedding_function: Callable,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
relevance_score_fn: Optional[
Callable[[float], float]
] = _default_relevance_score,
**kwargs: Any,
):
"""Initialize with necessary components."""
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis>=4.1.0`."
)
self.embedding_function = embedding_function
self.index_name = index_name
try:
# connect to redis from url
redis_client = redis.from_url(redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(redis_client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f"Redis failed to connect: {e}")
self.client = redis_client
self.content_key = content_key
self.metadata_key = metadata_key
self.vector_key = vector_key
self.relevance_score_fn = relevance_score_fn
def _create_index(
self, dim: int = 1536, distance_metric: REDIS_DISTANCE_METRICS = "COSINE"
) -> None:
try:
from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# Check if index exists
if not _check_index_exists(self.client, self.index_name): | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-3 | if not _check_index_exists(self.client, self.index_name):
# Define schema
schema = (
TextField(name=self.content_key),
TextField(name=self.metadata_key),
VectorField(
self.vector_key,
"FLAT",
{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": distance_metric,
},
),
)
prefix = _redis_prefix(self.index_name)
# Create Redis Index
self.client.ft(self.index_name).create_index(
fields=schema,
definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH),
)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
embeddings: Optional[List[List[float]]] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Add more texts to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional): Optional pre-generated
embeddings. Defaults to None.
keys (List[str]) or ids (List[str]): Identifiers of entries.
Defaults to None.
batch_size (int, optional): Batch size to use for writes. Defaults to 1000.
Returns:
List[str]: List of ids added to the vectorstore
"""
ids = []
prefix = _redis_prefix(self.index_name)
# Get keys or ids from kwargs | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-4 | prefix = _redis_prefix(self.index_name)
# Get keys or ids from kwargs
# Other vectorstores use ids
keys_or_ids = kwargs.get("keys", kwargs.get("ids"))
# Write data to redis
pipeline = self.client.pipeline(transaction=False)
for i, text in enumerate(texts):
# Use provided values by default or fallback
key = keys_or_ids[i] if keys_or_ids else _redis_key(prefix)
metadata = metadatas[i] if metadatas else {}
embedding = embeddings[i] if embeddings else self.embedding_function(text)
pipeline.hset(
key,
mapping={
self.content_key: text,
self.vector_key: np.array(embedding, dtype=np.float32).tobytes(),
self.metadata_key: json.dumps(metadata),
},
)
ids.append(key)
# Write batch
if i % batch_size == 0:
pipeline.execute()
# Cleanup final batch
pipeline.execute()
return ids
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
[docs] def similarity_search_limit_score( | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-5 | [docs] def similarity_search_limit_score(
self, query: str, k: int = 4, score_threshold: float = 0.2, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text within the
score_threshold range.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching score required for a document
to be considered a match. Defaults to 0.2.
Because the similarity calculation algorithm is based on cosine similarity,
the smaller the angle, the higher the similarity.
Returns:
List[Document]: A list of documents that are most similar to the query text,
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, score in docs_and_scores if score < score_threshold]
def _prepare_query(self, k: int) -> Query:
try:
from redis.commands.search.query import Query
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# Prepare the Query
hybrid_fields = "*"
base_query = (
f"{hybrid_fields}=>[KNN {k} @{self.vector_key} $vector AS vector_score]"
)
return_fields = [self.metadata_key, self.content_key, "vector_score"]
return ( | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-6 | return (
Query(base_query)
.return_fields(*return_fields)
.sort_by("vector_score")
.paging(0, k)
.dialect(2)
)
[docs] def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
# Creates embedding vector from user query
embedding = self.embedding_function(query)
# Creates Redis query
redis_query = self._prepare_query(k)
params_dict: Mapping[str, str] = {
"vector": np.array(embedding) # type: ignore
.astype(dtype=np.float32)
.tobytes()
}
# Perform vector search
results = self.client.ft(self.index_name).search(redis_query, params_dict)
# Prepare document results
docs = [
(
Document(
page_content=result.content, metadata=json.loads(result.metadata)
),
float(result.vector_score),
)
for result in results.docs
]
return docs
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
""" | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-7 | 0 is dissimilar, 1 is most similar.
"""
if self.relevance_score_fn is None:
raise ValueError(
"relevance_score_fn must be provided to"
" Redis constructor to normalize scores"
)
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores]
[docs] @classmethod
def from_texts_return_keys(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
distance_metric: REDIS_DISTANCE_METRICS = "COSINE",
**kwargs: Any,
) -> Tuple[Redis, List[str]]:
"""Create a Redis vectorstore from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in Redis.
3. Adds the documents to the newly created Redis index.
4. Returns the keys of the newly created documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch, keys = RediSearch.from_texts_return_keys(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
""" | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-8 | )
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
if "redis_url" in kwargs:
kwargs.pop("redis_url")
# Name of the search index if not given
if not index_name:
index_name = uuid.uuid4().hex
# Create instance
instance = cls(
redis_url,
index_name,
embedding.embed_query,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
**kwargs,
)
# Create embeddings over documents
embeddings = embedding.embed_documents(texts)
# Create the search index
instance._create_index(dim=len(embeddings[0]), distance_metric=distance_metric)
# Add data to Redis
keys = instance.add_texts(texts, metadatas, embeddings)
return instance, keys
[docs] @classmethod
def from_texts(
cls: Type[Redis],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Create a Redis vectorstore from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in Redis.
3. Adds the documents to the newly created Redis index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-9 | Example:
.. code-block:: python
from langchain.vectorstores import Redis
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
instance, _ = cls.from_texts_return_keys(
texts,
embedding,
metadatas=metadatas,
index_name=index_name,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
**kwargs,
)
return instance
[docs] @staticmethod
def delete(
ids: List[str],
**kwargs: Any,
) -> bool:
"""
Delete a Redis entry.
Args:
ids: List of ids (keys) to delete.
Returns:
bool: Whether or not the deletions were successful.
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
if ids is None:
raise ValueError("'ids' (keys)() were not provided.")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
except ValueError as e: | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-10 | except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# Check if index exists
try:
client.delete(*ids)
logger.info("Entries deleted")
return True
except: # noqa: E722
# ids does not exist
return False
[docs] @staticmethod
def drop_index(
index_name: str,
delete_documents: bool,
**kwargs: Any,
) -> bool:
"""
Drop a Redis search index.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# Check if index exists
try:
client.ft(index_name).dropindex(delete_documents)
logger.info("Drop index")
return True
except: # noqa: E722
# Index not exist
return False
[docs] @classmethod
def from_existing_index(
cls, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-11 | [docs] @classmethod
def from_existing_index(
cls,
embedding: Embeddings,
index_name: str,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Connect to an existing Redis index."""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(client, REDIS_REQUIRED_MODULES)
# ensure that the index already exists
assert _check_index_exists(
client, index_name
), f"Index {index_name} does not exist"
except Exception as e:
raise ValueError(f"Redis failed to connect: {e}")
return cls(
redis_url,
index_name,
embedding.embed_query,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
**kwargs,
)
[docs] def as_retriever(self, **kwargs: Any) -> RedisVectorStoreRetriever:
return RedisVectorStoreRetriever(vectorstore=self, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-12 | return RedisVectorStoreRetriever(vectorstore=self, **kwargs)
class RedisVectorStoreRetriever(VectorStoreRetriever, BaseModel):
vectorstore: Redis
search_type: str = "similarity"
k: int = 4
score_threshold: float = 0.4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "similarity_limit"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def get_relevant_documents(self, query: str) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, k=self.k)
elif self.search_type == "similarity_limit":
docs = self.vectorstore.similarity_search_limit_score(
query, k=self.k, score_threshold=self.score_threshold
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError("RedisVectorStoreRetriever does not support async")
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
return self.vectorstore.add_documents(documents, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]: | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
69ea6f4346f3-13 | ) -> List[str]:
"""Add documents to vectorstore."""
return await self.vectorstore.aadd_documents(documents, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/redis.html |
2df5d3448a75-0 | Source code for langchain.vectorstores.docarray.hnsw
"""Wrapper around Hnswlib store."""
from __future__ import annotations
from typing import Any, List, Literal, Optional
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.docarray.base import (
DocArrayIndex,
_check_docarray_import,
)
[docs]class DocArrayHnswSearch(DocArrayIndex):
"""Wrapper around HnswLib storage.
To use it, you should have the ``docarray`` package with version >=0.32.0 installed.
You can install it with `pip install "langchain[docarray]"`.
"""
[docs] @classmethod
def from_params(
cls,
embedding: Embeddings,
work_dir: str,
n_dim: int,
dist_metric: Literal["cosine", "ip", "l2"] = "cosine",
max_elements: int = 1024,
index: bool = True,
ef_construction: int = 200,
ef: int = 10,
M: int = 16,
allow_replace_deleted: bool = True,
num_threads: int = 1,
**kwargs: Any,
) -> DocArrayHnswSearch:
"""Initialize DocArrayHnswSearch store.
Args:
embedding (Embeddings): Embedding function.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
dist_metric (str): Distance metric for DocArrayHnswSearch can be one of:
"cosine", "ip", and "l2". Defaults to "cosine". | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/hnsw.html |
2df5d3448a75-1 | "cosine", "ip", and "l2". Defaults to "cosine".
max_elements (int): Maximum number of vectors that can be stored.
Defaults to 1024.
index (bool): Whether an index should be built for this field.
Defaults to True.
ef_construction (int): defines a construction time/accuracy trade-off.
Defaults to 200.
ef (int): parameter controlling query time/accuracy trade-off.
Defaults to 10.
M (int): parameter that defines the maximum number of outgoing
connections in the graph. Defaults to 16.
allow_replace_deleted (bool): Enables replacing of deleted elements
with new added ones. Defaults to True.
num_threads (int): Sets the number of cpu threads to use. Defaults to 1.
**kwargs: Other keyword arguments to be passed to the get_doc_cls method.
"""
_check_docarray_import()
from docarray.index import HnswDocumentIndex
doc_cls = cls._get_doc_cls(
dim=n_dim,
space=dist_metric,
max_elements=max_elements,
index=index,
ef_construction=ef_construction,
ef=ef,
M=M,
allow_replace_deleted=allow_replace_deleted,
num_threads=num_threads,
**kwargs,
)
doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir) # type: ignore
return cls(doc_index, embedding)
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
work_dir: Optional[str] = None, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/hnsw.html |
2df5d3448a75-2 | work_dir: Optional[str] = None,
n_dim: Optional[int] = None,
**kwargs: Any,
) -> DocArrayHnswSearch:
"""Create an DocArrayHnswSearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
**kwargs: Other keyword arguments to be passed to the __init__ method.
Returns:
DocArrayHnswSearch Vector Store
"""
if work_dir is None:
raise ValueError("`work_dir` parameter has not been set.")
if n_dim is None:
raise ValueError("`n_dim` parameter has not been set.")
store = cls.from_params(embedding, work_dir, n_dim, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/hnsw.html |
a064d064e258-0 | Source code for langchain.vectorstores.docarray.in_memory
"""Wrapper around in-memory storage."""
from __future__ import annotations
from typing import Any, Dict, List, Literal, Optional
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.docarray.base import (
DocArrayIndex,
_check_docarray_import,
)
[docs]class DocArrayInMemorySearch(DocArrayIndex):
"""Wrapper around in-memory storage for exact search.
To use it, you should have the ``docarray`` package with version >=0.32.0 installed.
You can install it with `pip install "langchain[docarray]"`.
"""
[docs] @classmethod
def from_params(
cls,
embedding: Embeddings,
metric: Literal[
"cosine_sim", "euclidian_dist", "sgeuclidean_dist"
] = "cosine_sim",
**kwargs: Any,
) -> DocArrayInMemorySearch:
"""Initialize DocArrayInMemorySearch store.
Args:
embedding (Embeddings): Embedding function.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
**kwargs: Other keyword arguments to be passed to the get_doc_cls method.
"""
_check_docarray_import()
from docarray.index import InMemoryExactNNIndex
doc_cls = cls._get_doc_cls(space=metric, **kwargs)
doc_index = InMemoryExactNNIndex[doc_cls]() # type: ignore
return cls(doc_index, embedding)
[docs] @classmethod
def from_texts( | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/in_memory.html |
a064d064e258-1 | [docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> DocArrayInMemorySearch:
"""Create an DocArrayInMemorySearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[Dict[Any, Any]]]): Metadata for each text
if it exists. Defaults to None.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
Returns:
DocArrayInMemorySearch Vector Store
"""
store = cls.from_params(embedding, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/docarray/in_memory.html |
98a23b307d98-0 | Source code for langchain.retrievers.remote_retriever
from typing import List, Optional
import aiohttp
import requests
from pydantic import BaseModel
from langchain.schema import BaseRetriever, Document
[docs]class RemoteLangChainRetriever(BaseRetriever, BaseModel):
url: str
headers: Optional[dict] = None
input_key: str = "message"
response_key: str = "response"
page_content_key: str = "page_content"
metadata_key: str = "metadata"
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
response = requests.post(
self.url, json={self.input_key: query}, headers=self.headers
)
result = response.json()
return [
Document(
page_content=r[self.page_content_key], metadata=r[self.metadata_key]
)
for r in result[self.response_key]
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST", self.url, headers=self.headers, json={self.input_key: query}
) as response:
result = await response.json()
return [
Document(
page_content=r[self.page_content_key], metadata=r[self.metadata_key]
)
for r in result[self.response_key]
] | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/remote_retriever.html |
88c9f77dcf63-0 | Source code for langchain.retrievers.elastic_search_bm25
"""Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
[docs]class ElasticSearchBM25Retriever(BaseRetriever):
"""Wrapper around Elasticsearch using BM25 as a retrieval method.
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
"""
def __init__(self, client: Any, index_name: str):
self.client = client
self.index_name = index_name
[docs] @classmethod
def create( | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html |
88c9f77dcf63-1 | self.index_name = index_name
[docs] @classmethod
def create(
cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75
) -> ElasticSearchBM25Retriever:
from elasticsearch import Elasticsearch
# Create an Elasticsearch client instance
es = Elasticsearch(elasticsearch_url)
# Define the index settings and mappings
settings = {
"analysis": {"analyzer": {"default": {"type": "standard"}}},
"similarity": {
"custom_bm25": {
"type": "BM25",
"k1": k1,
"b": b,
}
},
}
mappings = {
"properties": {
"content": {
"type": "text",
"similarity": "custom_bm25", # Use the custom BM25 similarity
}
}
}
# Create the index with the specified settings and mappings
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(es, index_name)
[docs] def add_texts(
self,
texts: Iterable[str],
refresh_indices: bool = True,
) -> List[str]:
"""Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings to add to the retriever.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the retriever.
"""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. " | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html |
88c9f77dcf63-2 | raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
for i, text in enumerate(texts):
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
"content": text,
"_id": _id,
}
ids.append(_id)
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
query_dict = {"query": {"match": {"content": query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res["hits"]["hits"]:
docs.append(Document(page_content=r["_source"]["content"]))
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html |
9395e9f1be48-0 | Source code for langchain.retrievers.contextual_compression
"""Retriever that wraps a base retriever and filters the results."""
from typing import List
from pydantic import BaseModel, Extra
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.schema import BaseRetriever, Document
[docs]class ContextualCompressionRetriever(BaseRetriever, BaseModel):
"""Retriever that wraps a base retriever and compresses the results."""
base_compressor: BaseDocumentCompressor
"""Compressor for compressing retrieved documents."""
base_retriever: BaseRetriever
"""Base Retriever to use for getting relevant documents."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.get_relevant_documents(query)
if docs:
compressed_docs = self.base_compressor.compress_documents(docs, query)
return list(compressed_docs)
else:
return []
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
docs = await self.base_retriever.aget_relevant_documents(query)
if docs:
compressed_docs = await self.base_compressor.acompress_documents(
docs, query | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html |
9395e9f1be48-1 | compressed_docs = await self.base_compressor.acompress_documents(
docs, query
)
return list(compressed_docs)
else:
return [] | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html |
1d83de5f7b01-0 | Source code for langchain.retrievers.weaviate_hybrid_search
"""Wrapper around weaviate vector database."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from uuid import uuid4
from pydantic import Extra
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
[docs]class WeaviateHybridSearchRetriever(BaseRetriever):
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
alpha: float = 0.5,
k: int = 4,
attributes: Optional[List[str]] = None,
create_schema_if_missing: bool = True,
):
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self.k = k
self.alpha = alpha
self._index_name = index_name
self._text_key = text_key
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
if create_schema_if_missing:
self._create_schema_if_missing()
def _create_schema_if_missing(self) -> None:
class_obj = {
"class": self._index_name,
"properties": [{"name": self._text_key, "dataType": ["text"]}], | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html |
1d83de5f7b01-1 | "properties": [{"name": self._text_key, "dataType": ["text"]}],
"vectorizer": "text2vec-openai",
}
if not self._client.schema.exists(self._index_name):
self._client.schema.create_class(class_obj)
[docs] class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
# added text_key
[docs] def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]:
"""Upload documents to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(docs):
metadata = doc.metadata or {}
data_properties = {self._text_key: doc.page_content, **metadata}
# If the UUID of one of the objects already exists
# then the existing objectwill be replaced by the new object.
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
else:
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
[docs] def get_relevant_documents(
self, query: str, where_filter: Optional[Dict[str, object]] = None
) -> List[Document]:
"""Look up similar documents in Weaviate."""
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if where_filter:
query_obj = query_obj.with_where(where_filter) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html |
1d83de5f7b01-2 | if where_filter:
query_obj = query_obj.with_where(where_filter)
result = query_obj.with_hybrid(query, alpha=self.alpha).with_limit(self.k).do()
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
[docs] async def aget_relevant_documents(
self, query: str, where_filter: Optional[Dict[str, object]] = None
) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html |
6be6acadc92c-0 | Source code for langchain.retrievers.vespa_retriever
"""Wrapper for retrieving documents from Vespa."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Sequence, Union
from langchain.schema import BaseRetriever, Document
if TYPE_CHECKING:
from vespa.application import Vespa
[docs]class VespaRetriever(BaseRetriever):
"""Retriever that uses the Vespa."""
def __init__(
self,
app: Vespa,
body: Dict,
content_field: str,
metadata_fields: Optional[Sequence[str]] = None,
):
self._application = app
self._query_body = body
self._content_field = content_field
self._metadata_fields = metadata_fields or ()
def _query(self, body: Dict) -> List[Document]:
response = self._application.query(body)
if not str(response.status_code).startswith("2"):
raise RuntimeError(
"Could not retrieve data from Vespa. Error code: {}".format(
response.status_code
)
)
root = response.json["root"]
if "errors" in root:
raise RuntimeError(json.dumps(root["errors"]))
docs = []
for child in response.hits:
page_content = child["fields"].pop(self._content_field, "")
if self._metadata_fields == "*":
metadata = child["fields"]
else:
metadata = {mf: child["fields"].get(mf) for mf in self._metadata_fields}
metadata["id"] = child["id"]
docs.append(Document(page_content=page_content, metadata=metadata))
return docs | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html |
6be6acadc92c-1 | docs.append(Document(page_content=page_content, metadata=metadata))
return docs
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
body = self._query_body.copy()
body["query"] = query
return self._query(body)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
[docs] def get_relevant_documents_with_filter(
self, query: str, *, _filter: Optional[str] = None
) -> List[Document]:
body = self._query_body.copy()
_filter = f" and {_filter}" if _filter else ""
body["yql"] = body["yql"] + _filter
body["query"] = query
return self._query(body)
[docs] @classmethod
def from_params(
cls,
url: str,
content_field: str,
*,
k: Optional[int] = None,
metadata_fields: Union[Sequence[str], Literal["*"]] = (),
sources: Union[Sequence[str], Literal["*"], None] = None,
_filter: Optional[str] = None,
yql: Optional[str] = None,
**kwargs: Any,
) -> VespaRetriever:
"""Instantiate retriever from params.
Args:
url (str): Vespa app URL.
content_field (str): Field in results to return as Document page_content.
k (Optional[int]): Number of Documents to return. Defaults to None.
metadata_fields(Sequence[str] or "*"): Fields in results to include in
document metadata. Defaults to empty tuple (). | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html |
6be6acadc92c-2 | document metadata. Defaults to empty tuple ().
sources (Sequence[str] or "*" or None): Sources to retrieve
from. Defaults to None.
_filter (Optional[str]): Document filter condition expressed in YQL.
Defaults to None.
yql (Optional[str]): Full YQL query to be used. Should not be specified
if _filter or sources are specified. Defaults to None.
kwargs (Any): Keyword arguments added to query body.
"""
try:
from vespa.application import Vespa
except ImportError:
raise ImportError(
"pyvespa is not installed, please install with `pip install pyvespa`"
)
app = Vespa(url)
body = kwargs.copy()
if yql and (sources or _filter):
raise ValueError(
"yql should only be specified if both sources and _filter are not "
"specified."
)
else:
if metadata_fields == "*":
_fields = "*"
body["summary"] = "short"
else:
_fields = ", ".join([content_field] + list(metadata_fields or []))
_sources = ", ".join(sources) if isinstance(sources, Sequence) else "*"
_filter = f" and {_filter}" if _filter else ""
yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}"
body["yql"] = yql
if k:
body["hits"] = k
return cls(app, body, content_field, metadata_fields=metadata_fields) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html |
08e3af67dfa4-0 | Source code for langchain.retrievers.llama_index
from typing import Any, Dict, List, cast
from pydantic import BaseModel, Field
from langchain.schema import BaseRetriever, Document
[docs]class LlamaIndexRetriever(BaseRetriever, BaseModel):
"""Question-answering with sources over an LlamaIndex data structure."""
index: Any
query_kwargs: Dict = Field(default_factory=dict)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query."""
try:
from llama_index.indices.base import BaseGPTIndex
from llama_index.response.schema import Response
except ImportError:
raise ImportError(
"You need to install `pip install llama-index` to use this retriever."
)
index = cast(BaseGPTIndex, self.index)
response = index.query(query, response_mode="no_text", **self.query_kwargs)
response = cast(Response, response)
# parse source nodes
docs = []
for source_node in response.source_nodes:
metadata = source_node.extra_info or {}
docs.append(
Document(page_content=source_node.source_text, metadata=metadata)
)
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError("LlamaIndexRetriever does not support async")
[docs]class LlamaIndexGraphRetriever(BaseRetriever, BaseModel):
"""Question-answering with sources over an LlamaIndex graph data structure."""
graph: Any
query_configs: List[Dict] = Field(default_factory=list) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/llama_index.html |
08e3af67dfa4-1 | graph: Any
query_configs: List[Dict] = Field(default_factory=list)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query."""
try:
from llama_index.composability.graph import (
QUERY_CONFIG_TYPE,
ComposableGraph,
)
from llama_index.response.schema import Response
except ImportError:
raise ImportError(
"You need to install `pip install llama-index` to use this retriever."
)
graph = cast(ComposableGraph, self.graph)
# for now, inject response_mode="no_text" into query configs
for query_config in self.query_configs:
query_config["response_mode"] = "no_text"
query_configs = cast(List[QUERY_CONFIG_TYPE], self.query_configs)
response = graph.query(query, query_configs=query_configs)
response = cast(Response, response)
# parse source nodes
docs = []
for source_node in response.source_nodes:
metadata = source_node.extra_info or {}
docs.append(
Document(page_content=source_node.source_text, metadata=metadata)
)
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError("LlamaIndexGraphRetriever does not support async") | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/llama_index.html |
af0f477dcb78-0 | Source code for langchain.retrievers.azure_cognitive_search
"""Retriever wrapper for Azure Cognitive Search."""
from __future__ import annotations
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
[docs]class AzureCognitiveSearchRetriever(BaseRetriever, BaseModel):
"""Wrapper around Azure Cognitive Search."""
service_name: str = ""
"""Name of Azure Cognitive Search service"""
index_name: str = ""
"""Name of Index inside Azure Cognitive Search service"""
api_key: str = ""
"""API Key. Both Admin and Query keys work, but for reading data it's
recommended to use a Query key."""
api_version: str = "2020-06-30"
"""API version"""
aiosession: Optional[aiohttp.ClientSession] = None
"""ClientSession, in case we want to reuse connection for better performance."""
content_key: str = "content"
"""Key in a retrieved result to set as the Document page_content."""
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that service name, index name and api key exists in environment."""
values["service_name"] = get_from_dict_or_env(
values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME"
)
values["index_name"] = get_from_dict_or_env(
values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME"
) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
af0f477dcb78-1 | )
values["api_key"] = get_from_dict_or_env(
values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY"
)
return values
def _build_search_url(self, query: str) -> str:
base_url = f"https://{self.service_name}.search.windows.net/"
endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}"
return base_url + endpoint_path + f"&search={query}"
@property
def _headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"api-key": self.api_key,
}
def _search(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f"Error in search request: {response}")
return json.loads(response.text)["value"]
async def _asearch(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self._headers) as response:
response_json = await response.json()
else:
async with self.aiosession.get(
search_url, headers=self._headers
) as response:
response_json = await response.json()
return response_json["value"]
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
search_results = self._search(query)
return [ | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
af0f477dcb78-2 | search_results = self._search(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
search_results = await self._asearch(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
] | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
d73dda26bf8a-0 | Source code for langchain.retrievers.pupmed
from typing import List
from langchain.schema import BaseRetriever, Document
from langchain.utilities.pupmed import PubMedAPIWrapper
[docs]class PubMedRetriever(BaseRetriever, PubMedAPIWrapper):
"""
It is effectively a wrapper for PubMedAPIWrapper.
It wraps load() to get_relevant_documents().
It uses all PubMedAPIWrapper arguments without any change.
"""
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.load_docs(query=query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pupmed.html |
ef81497e12f7-0 | Source code for langchain.retrievers.time_weighted_retriever
"""Retriever that combines embedding similarity with recency in retrieving values."""
import datetime
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float:
"""Get the hours passed between two datetime objects."""
return (time - ref_time).total_seconds() / 3600
[docs]class TimeWeightedVectorStoreRetriever(BaseRetriever, BaseModel):
"""Retriever combining embedding similarity with recency."""
vectorstore: VectorStore
"""The vectorstore to store documents and determine salience."""
search_kwargs: dict = Field(default_factory=lambda: dict(k=100))
"""Keyword arguments to pass to the vectorstore similarity search."""
# TODO: abstract as a queue
memory_stream: List[Document] = Field(default_factory=list)
"""The memory_stream of documents to search through."""
decay_rate: float = Field(default=0.01)
"""The exponential decay factor used as (1.0-decay_rate)**(hrs_passed)."""
k: int = 4
"""The maximum number of documents to retrieve in a given call."""
other_score_keys: List[str] = []
"""Other keys in the metadata to factor into the score, e.g. 'importance'."""
default_salience: Optional[float] = None
"""The salience to assign memories not retrieved from the vector store.
None assigns no salience to documents not fetched from the vector store.
"""
class Config: | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html |
ef81497e12f7-1 | """
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _get_combined_score(
self,
document: Document,
vector_relevance: Optional[float],
current_time: datetime.datetime,
) -> float:
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(
current_time,
document.metadata["last_accessed_at"],
)
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
[docs] def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]:
"""Return documents that are salient to the query."""
docs_and_scores: List[Tuple[Document, float]]
docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
results = {}
for fetched_doc, relevance in docs_and_scores:
if "buffer_idx" in fetched_doc.metadata:
buffer_idx = fetched_doc.metadata["buffer_idx"]
doc = self.memory_stream[buffer_idx]
results[buffer_idx] = (doc, relevance)
return results
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Return documents that are relevant to the query."""
current_time = datetime.datetime.now()
docs_and_scores = {
doc.metadata["buffer_idx"]: (doc, self.default_salience)
for doc in self.memory_stream[-self.k :] | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html |
ef81497e12f7-2 | for doc in self.memory_stream[-self.k :]
}
# If a doc is considered salient, update the salience score
docs_and_scores.update(self.get_salient_docs(query))
rescored_docs = [
(doc, self._get_combined_score(doc, relevance, current_time))
for doc, relevance in docs_and_scores.values()
]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
# Ensure frequently accessed memories aren't forgotten
for doc, _ in rescored_docs[: self.k]:
# TODO: Update vector store doc once `update` method is exposed.
buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]]
buffered_doc.metadata["last_accessed_at"] = current_time
result.append(buffered_doc)
return result
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
"""Return documents that are relevant to the query."""
raise NotImplementedError
[docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html |
ef81497e12f7-3 | doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return self.vectorstore.add_documents(dup_docs, **kwargs)
[docs] async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return await self.vectorstore.aadd_documents(dup_docs, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html |
03ea56542edd-0 | Source code for langchain.retrievers.metal
from typing import Any, List, Optional
from langchain.schema import BaseRetriever, Document
[docs]class MetalRetriever(BaseRetriever):
"""Retriever that uses the Metal API."""
def __init__(self, client: Any, params: Optional[dict] = None):
from metal_sdk.metal import Metal
if not isinstance(client, Metal):
raise ValueError(
"Got unexpected client, should be of type metal_sdk.metal.Metal. "
f"Instead, got {type(client)}"
)
self.client: Metal = client
self.params = params or {}
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
results = self.client.search({"text": query}, **self.params)
final_results = []
for r in results["data"]:
metadata = {k: v for k, v in r.items() if k != "text"}
final_results.append(Document(page_content=r["text"], metadata=metadata))
return final_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/metal.html |
8543eae0b331-0 | Source code for langchain.retrievers.docarray
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.utils import maximal_marginal_relevance
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
mmr = "mmr"
[docs]class DocArrayRetriever(BaseRetriever, BaseModel):
"""
Retriever class for DocArray Document Indices.
Currently, supports 5 backends:
InMemoryExactNNIndex, HnswDocumentIndex, QdrantDocumentIndex,
ElasticDocIndex, and WeaviateDocumentIndex.
Attributes:
index: One of the above-mentioned index instances
embeddings: Embedding model to represent text as vectors
search_field: Field to consider for searching in the documents.
Should be an embedding/vector/tensor.
content_field: Field that represents the main content in your document schema.
Will be used as a `page_content`. Everything else will go into `metadata`.
search_type: Type of search to perform (similarity / mmr)
filters: Filters applied for document retrieval.
top_k: Number of documents to return
"""
index: Any
embeddings: Embeddings
search_field: str
content_field: str
search_type: SearchType = SearchType.similarity
top_k: int = 1
filters: Optional[Any] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/docarray.html |
8543eae0b331-1 | """Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
query_emb = np.array(self.embeddings.embed_query(query))
if self.search_type == SearchType.similarity:
results = self._similarity_search(query_emb)
elif self.search_type == SearchType.mmr:
results = self._mmr_search(query_emb)
else:
raise ValueError(
f"Search type {self.search_type} does not exist. "
f"Choose either 'similarity' or 'mmr'."
)
return results
def _search(
self, query_emb: np.ndarray, top_k: int
) -> List[Union[Dict[str, Any], Any]]:
"""
Perform a search using the query embedding and return top_k documents.
Args:
query_emb: Query represented as an embedding
top_k: Number of documents to return
Returns:
A list of top_k documents matching the query
"""
from docarray.index import ElasticDocIndex, WeaviateDocumentIndex
filter_args = {}
search_field = self.search_field
if isinstance(self.index, WeaviateDocumentIndex):
filter_args["where_filter"] = self.filters
search_field = ""
elif isinstance(self.index, ElasticDocIndex):
filter_args["query"] = self.filters
else:
filter_args["filter_query"] = self.filters
if self.filters:
query = ( | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/docarray.html |
8543eae0b331-2 | if self.filters:
query = (
self.index.build_query() # get empty query object
.find(
query=query_emb, search_field=search_field
) # add vector similarity search
.filter(**filter_args) # add filter search
.build(limit=top_k) # build the query
)
# execute the combined query and return the results
docs = self.index.execute_query(query)
if hasattr(docs, "documents"):
docs = docs.documents
docs = docs[:top_k]
else:
docs = self.index.find(
query=query_emb, search_field=search_field, limit=top_k
).documents
return docs
def _similarity_search(self, query_emb: np.ndarray) -> List[Document]:
"""
Perform a similarity search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of documents most similar to the query
"""
docs = self._search(query_emb=query_emb, top_k=self.top_k)
results = [self._docarray_to_langchain_doc(doc) for doc in docs]
return results
def _mmr_search(self, query_emb: np.ndarray) -> List[Document]:
"""
Perform a maximal marginal relevance (mmr) search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of diverse documents related to the query
"""
docs = self._search(query_emb=query_emb, top_k=20)
mmr_selected = maximal_marginal_relevance(
query_emb,
[
doc[self.search_field]
if isinstance(doc, dict)
else getattr(doc, self.search_field)
for doc in docs
], | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/docarray.html |
8543eae0b331-3 | else getattr(doc, self.search_field)
for doc in docs
],
k=self.top_k,
)
results = [self._docarray_to_langchain_doc(docs[idx]) for idx in mmr_selected]
return results
def _docarray_to_langchain_doc(self, doc: Union[Dict[str, Any], Any]) -> Document:
"""
Convert a DocArray document (which also might be a dict)
to a langchain document format.
DocArray document can contain arbitrary fields, so the mapping is done
in the following way:
page_content <-> content_field
metadata <-> all other fields excluding
tensors and embeddings (so float, int, string)
Args:
doc: DocArray document
Returns:
Document in langchain format
Raises:
ValueError: If the document doesn't contain the content field
"""
fields = doc.keys() if isinstance(doc, dict) else doc.__fields__
if self.content_field not in fields:
raise ValueError(
f"Document does not contain the content field - {self.content_field}."
)
lc_doc = Document(
page_content=doc[self.content_field]
if isinstance(doc, dict)
else getattr(doc, self.content_field)
)
for name in fields:
value = doc[name] if isinstance(doc, dict) else getattr(doc, name)
if (
isinstance(value, (str, int, float, bool))
and name != self.content_field
):
lc_doc.metadata[name] = value
return lc_doc
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/docarray.html |
691b53995d6b-0 | Source code for langchain.retrievers.wikipedia
from typing import List
from langchain.schema import BaseRetriever, Document
from langchain.utilities.wikipedia import WikipediaAPIWrapper
[docs]class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper):
"""
It is effectively a wrapper for WikipediaAPIWrapper.
It wraps load() to get_relevant_documents().
It uses all WikipediaAPIWrapper arguments without any change.
"""
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.load(query=query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/wikipedia.html |
990796014d06-0 | Source code for langchain.retrievers.knn
"""KNN Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
[docs]class KNNRetriever(BaseRetriever, BaseModel):
"""KNN Retriever."""
embeddings: Embeddings
index: Any
texts: List[str]
k: int = 4
relevancy_threshold: Optional[float] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) -> KNNRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
query_embeds = np.array(self.embeddings.embed_query(query)) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html |
990796014d06-1 | query_embeds = np.array(self.embeddings.embed_query(query))
# calc L2 norm
index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True))
query_embeds = query_embeds / np.sqrt((query_embeds**2).sum())
similarities = index_embeds.dot(query_embeds)
sorted_ix = np.argsort(-similarities)
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = [
Document(page_content=self.texts[row])
for row in sorted_ix[0 : self.k]
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
)
]
return top_k_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html |
ec60f923f49b-0 | Source code for langchain.retrievers.milvus
"""Milvus Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.milvus import Milvus
# TODO: Update to MilvusClient + Hybrid Search when available
[docs]class MilvusRetriever(BaseRetriever):
"""Retriever that uses the Milvus API."""
def __init__(
self,
embedding_function: Embeddings,
collection_name: str = "LangChainCollection",
connection_args: Optional[Dict[str, Any]] = None,
consistency_level: str = "Session",
search_params: Optional[dict] = None,
):
self.store = Milvus(
embedding_function,
collection_name,
connection_args,
consistency_level,
)
self.retriever = self.store.as_retriever(search_kwargs={"param": search_params})
[docs] def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.retriever.get_relevant_documents(query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/milvus.html |
ec60f923f49b-1 | raise NotImplementedError
def MilvusRetreiver(*args: Any, **kwargs: Any) -> MilvusRetriever:
"""Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
"""
warnings.warn(
"MilvusRetreiver will be deprecated in the future. "
"Please use MilvusRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return MilvusRetriever(*args, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/milvus.html |
f1a23d85538d-0 | Source code for langchain.retrievers.chatgpt_plugin_retriever
from __future__ import annotations
from typing import List, Optional
import aiohttp
import requests
from pydantic import BaseModel
from langchain.schema import BaseRetriever, Document
[docs]class ChatGPTPluginRetriever(BaseRetriever, BaseModel):
url: str
bearer_token: str
top_k: int = 3
filter: Optional[dict] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
url, json, headers = self._create_request(query)
response = requests.post(url, json=json, headers=headers)
results = response.json()["results"][0]["results"]
docs = []
for d in results:
content = d.pop("text")
metadata = d.pop("metadata", d)
if metadata.get("source_id"):
metadata["source"] = metadata.pop("source_id")
docs.append(Document(page_content=content, metadata=metadata))
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
url, json, headers = self._create_request(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=headers, json=json) as response:
res = await response.json()
else:
async with self.aiosession.post(
url, headers=headers, json=json
) as response:
res = await response.json() | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/chatgpt_plugin_retriever.html |
f1a23d85538d-1 | ) as response:
res = await response.json()
results = res["results"][0]["results"]
docs = []
for d in results:
content = d.pop("text")
metadata = d.pop("metadata", d)
if metadata.get("source_id"):
metadata["source"] = metadata.pop("source_id")
docs.append(Document(page_content=content, metadata=metadata))
return docs
def _create_request(self, query: str) -> tuple[str, dict, dict]:
url = f"{self.url}/query"
json = {
"queries": [
{
"query": query,
"filter": self.filter,
"top_k": self.top_k,
}
]
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.bearer_token}",
}
return url, json, headers | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/chatgpt_plugin_retriever.html |
aeb291d7ee0a-0 | Source code for langchain.retrievers.pinecone_hybrid_search
"""Taken from: https://docs.pinecone.io/docs/hybrid-search"""
import hashlib
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def hash_text(text: str) -> str:
"""Hash a text using SHA256.
Args:
text: Text to hash.
Returns:
Hashed text.
"""
return str(hashlib.sha256(text.encode("utf-8")).hexdigest())
def create_index(
contexts: List[str],
index: Any,
embeddings: Embeddings,
sparse_encoder: Any,
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
"""
Create a Pinecone index from a list of contexts.
Modifies the index argument in-place.
Args:
contexts: List of contexts to embed.
index: Pinecone index to use.
embeddings: Embeddings model to use.
sparse_encoder: Sparse encoder to use.
ids: List of ids to use for the documents.
metadatas: List of metadata to use for the documents.
"""
batch_size = 32
_iterator = range(0, len(contexts), batch_size)
try:
from tqdm.auto import tqdm
_iterator = tqdm(_iterator)
except ImportError:
pass
if ids is None:
# create unique ids using hash of the text
ids = [hash_text(context) for context in contexts]
for i in _iterator: | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
aeb291d7ee0a-1 | for i in _iterator:
# find end of batch
i_end = min(i + batch_size, len(contexts))
# extract batch
context_batch = contexts[i:i_end]
batch_ids = ids[i:i_end]
metadata_batch = (
metadatas[i:i_end] if metadatas else [{} for _ in context_batch]
)
# add context passages as metadata
meta = [
{"context": context, **metadata}
for context, metadata in zip(context_batch, metadata_batch)
]
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch)
# create sparse vectors
sparse_embeds = sparse_encoder.encode_documents(context_batch)
for s in sparse_embeds:
s["values"] = [float(s1) for s1 in s["values"]]
vectors = []
# loop through the data and create dictionaries for upserts
for doc_id, sparse, dense, metadata in zip(
batch_ids, sparse_embeds, dense_embeds, meta
):
vectors.append(
{
"id": doc_id,
"sparse_values": sparse,
"values": dense,
"metadata": metadata,
}
)
# upload the documents to the new hybrid index
index.upsert(vectors)
[docs]class PineconeHybridSearchRetriever(BaseRetriever, BaseModel):
embeddings: Embeddings
sparse_encoder: Any
index: Any
top_k: int = 4
alpha: float = 0.5
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def add_texts( | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
aeb291d7ee0a-2 | arbitrary_types_allowed = True
[docs] def add_texts(
self,
texts: List[str],
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
create_index(
texts,
self.index,
self.embeddings,
self.sparse_encoder,
ids=ids,
metadatas=metadatas,
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401
from pinecone_text.sparse.base_sparse_encoder import (
BaseSparseEncoder, # noqa:F401
)
except ImportError:
raise ValueError(
"Could not import pinecone_text python package. "
"Please install it with `pip install pinecone_text`."
)
return values
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
# convert the question into a dense vector
dense_vec = self.embeddings.embed_query(query)
# scale alpha with hybrid_scale
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha)
sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]]
# query pinecone with the query parameters
result = self.index.query(
vector=dense_vec,
sparse_vector=sparse_vec,
top_k=self.top_k,
include_metadata=True,
) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
aeb291d7ee0a-3 | top_k=self.top_k,
include_metadata=True,
)
final_result = []
for res in result["matches"]:
context = res["metadata"].pop("context")
final_result.append(
Document(page_content=context, metadata=res["metadata"])
)
# return search results as json
return final_result
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
ac0a4067d953-0 | Source code for langchain.retrievers.databerry
from typing import List, Optional
import aiohttp
import requests
from langchain.schema import BaseRetriever, Document
[docs]class DataberryRetriever(BaseRetriever):
"""Retriever that uses the Databerry API."""
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def __init__(
self,
datastore_url: str,
top_k: Optional[int] = None,
api_key: Optional[str] = None,
):
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query, | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html |
ac0a4067d953-1 | self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
] | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html |
014e57f2dece-0 | Source code for langchain.retrievers.zep
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Optional
from langchain.schema import BaseRetriever, Document
if TYPE_CHECKING:
from zep_python import MemorySearchResult
[docs]class ZepRetriever(BaseRetriever):
"""A Retriever implementation for the Zep long-term memory store. Search your
user's long-term chat history with Zep.
Note: You will need to provide the user's `session_id` to use this retriever.
More on Zep:
Zep provides long-term conversation storage for LLM apps. The server stores,
summarizes, embeds, indexes, and enriches conversational AI chat
histories, and exposes them via simple, low-latency APIs.
For server installation instructions, see:
https://getzep.github.io/deployment/quickstart/
"""
def __init__(
self,
session_id: str,
url: str,
top_k: Optional[int] = None,
):
try:
from zep_python import ZepClient
except ImportError:
raise ValueError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
self.zep_client = ZepClient(base_url=url)
self.session_id = session_id
self.top_k = top_k
def _search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=r.message.pop("content"),
metadata={"score": r.dist, **r.message},
)
for r in results | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html |
014e57f2dece-1 | )
for r in results
if r.message
]
[docs] def get_relevant_documents(
self, query: str, metadata: Optional[Dict] = None
) -> List[Document]:
from zep_python import MemorySearchPayload
payload: MemorySearchPayload = MemorySearchPayload(
text=query, metadata=metadata
)
results: List[MemorySearchResult] = self.zep_client.search_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results)
[docs] async def aget_relevant_documents(
self, query: str, metadata: Optional[Dict] = None
) -> List[Document]:
from zep_python import MemorySearchPayload
payload: MemorySearchPayload = MemorySearchPayload(
text=query, metadata=metadata
)
results: List[MemorySearchResult] = await self.zep_client.asearch_memory(
self.session_id, payload, limit=self.top_k
)
return self._search_result_to_doc(results) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html |
1005bee1386f-0 | Source code for langchain.retrievers.svm
"""SMV Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
[docs]class SVMRetriever(BaseRetriever, BaseModel):
"""SVM Retriever."""
embeddings: Embeddings
index: Any
texts: List[str]
k: int = 4
relevancy_threshold: Optional[float] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) -> SVMRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from sklearn import svm
query_embeds = np.array(self.embeddings.embed_query(query)) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html |
1005bee1386f-1 | query_embeds = np.array(self.embeddings.embed_query(query))
x = np.concatenate([query_embeds[None, ...], self.index])
y = np.zeros(x.shape[0])
y[0] = 1
clf = svm.LinearSVC(
class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1
)
clf.fit(x, y)
similarities = clf.decision_function(x)
sorted_ix = np.argsort(-similarities)
# svm.LinearSVC in scikit-learn is non-deterministic.
# if a text is the same as a query, there is no guarantee
# the query will be in the first index.
# this performs a simple swap, this works because anything
# left of the 0 should be equivalent.
zero_index = np.where(sorted_ix == 0)[0][0]
if zero_index != 0:
sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0]
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = []
for row in sorted_ix[1 : self.k + 1]:
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
):
top_k_results.append(Document(page_content=self.texts[row - 1]))
return top_k_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html |
abdd6368a0c1-0 | Source code for langchain.retrievers.multi_query
import logging
from typing import List
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.output_parsers.pydantic import PydanticOutputParser
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseRetriever, Document
logger = logging.getLogger(__name__)
class LineList(BaseModel):
lines: List[str] = Field(description="Lines of text")
class LineListOutputParser(PydanticOutputParser):
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)
def parse(self, text: str) -> LineList:
lines = text.strip().split("\n")
return LineList(lines=lines)
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an AI language model assistant. Your task is
to generate 3 different versions of the given user
question to retrieve relevant documents from a vector database.
By generating multiple perspectives on the user question,
your goal is to help the user overcome some of the limitations
of distance-based similarity search. Provide these alternative
questions seperated by newlines. Original question: {question}""",
)
[docs]class MultiQueryRetriever(BaseRetriever):
"""Given a user query, use an LLM to write a set of queries.
Retrieve docs for each query. Rake the unique union of all retrieved docs."""
def __init__(
self,
retriever: BaseRetriever,
llm_chain: LLMChain,
verbose: bool = True, | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html |
abdd6368a0c1-1 | llm_chain: LLMChain,
verbose: bool = True,
parser_key: str = "lines",
) -> None:
"""Initialize MultiQueryRetriever.
Args:
retriever: retriever to query documents from
llm_chain: llm_chain for query generation
verbose: show the queries that we generated to the user
parser_key: attribute name for the parsed output
Returns:
MultiQueryRetriever
"""
self.retriever = retriever
self.llm_chain = llm_chain
self.verbose = verbose
self.parser_key = parser_key
[docs] @classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLLM,
prompt: PromptTemplate = DEFAULT_QUERY_PROMPT,
parser_key: str = "lines",
) -> "MultiQueryRetriever":
"""Initialize from llm using default template.
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
Returns:
MultiQueryRetriever
"""
output_parser = LineListOutputParser()
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser)
return cls(
retriever=retriever,
llm_chain=llm_chain,
parser_key=parser_key,
)
[docs] def get_relevant_documents(self, question: str) -> List[Document]:
"""Get relevated documents given a user query.
Args:
question: user query
Returns:
Unique union of relevant documents from all generated queries
""" | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html |
abdd6368a0c1-2 | Returns:
Unique union of relevant documents from all generated queries
"""
queries = self.generate_queries(question)
documents = self.retrieve_documents(queries)
unique_documents = self.unique_union(documents)
return unique_documents
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
[docs] def generate_queries(self, question: str) -> List[str]:
"""Generate queries based upon user input.
Args:
question: user query
Returns:
List of LLM generated queries that are similar to the user input
"""
response = self.llm_chain({"question": question})
lines = getattr(response["text"], self.parser_key, [])
if self.verbose:
logger.info(f"Generated queries: {lines}")
return lines
[docs] def retrieve_documents(self, queries: List[str]) -> List[Document]:
"""Run all LLM generated queries.
Args:
queries: query list
Returns:
List of retrived Documents
"""
documents = []
for query in queries:
docs = self.retriever.get_relevant_documents(query)
documents.extend(docs)
return documents
[docs] def unique_union(self, documents: List[Document]) -> List[Document]:
"""Get uniqe Documents.
Args:
documents: List of retrived Documents
Returns:
List of unique retrived Documents
"""
# Create a dictionary with page_content as keys to remove duplicates
# TODO: Add Document ID property (e.g., UUID)
unique_documents_dict = {
(doc.page_content, tuple(sorted(doc.metadata.items()))): doc
for doc in documents
} | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html |
abdd6368a0c1-3 | for doc in documents
}
unique_documents = list(unique_documents_dict.values())
return unique_documents | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/multi_query.html |
152f3fb8cfec-0 | Source code for langchain.retrievers.merger_retriever
from typing import List
from langchain.schema import BaseRetriever, Document
[docs]class MergerRetriever(BaseRetriever):
"""
This class merges the results of multiple retrievers.
Args:
retrievers: A list of retrievers to merge.
"""
def __init__(
self,
retrievers: List[BaseRetriever],
):
"""
Initialize the MergerRetriever class.
Args:
retrievers: A list of retrievers to merge.
"""
self.retrievers = retrievers
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = self.merge_documents(query)
return merged_documents
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
merged_documents = await self.amerge_documents(query)
return merged_documents
[docs] def merge_documents(self, query: str) -> List[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
""" | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/merger_retriever.html |
152f3fb8cfec-1 | Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.get_relevant_documents(query) for retriever in self.retrievers
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(len(docs) for docs in retriever_docs)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
[docs] async def amerge_documents(self, query: str) -> List[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
await retriever.aget_relevant_documents(query)
for retriever in self.retrievers
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(len(docs) for docs in retriever_docs)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/merger_retriever.html |
a1402ed09f96-0 | Source code for langchain.retrievers.arxiv
from typing import List
from langchain.schema import BaseRetriever, Document
from langchain.utilities.arxiv import ArxivAPIWrapper
[docs]class ArxivRetriever(BaseRetriever, ArxivAPIWrapper):
"""
It is effectively a wrapper for ArxivAPIWrapper.
It wraps load() to get_relevant_documents().
It uses all ArxivAPIWrapper arguments without any change.
"""
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.load(query=query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/arxiv.html |
2c8a1e4bc351-0 | Source code for langchain.retrievers.kendra
import re
from typing import Any, Dict, List, Literal, Optional
from pydantic import BaseModel, Extra
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
def clean_excerpt(excerpt: str) -> str:
if not excerpt:
return excerpt
res = re.sub("\s+", " ", excerpt).replace("...", "")
return res
def combined_text(title: str, excerpt: str) -> str:
if not title or not excerpt:
return ""
return f"Document Title: {title} \nDocument Excerpt: \n{excerpt}\n"
class Highlight(BaseModel, extra=Extra.allow):
BeginOffset: int
EndOffset: int
TopAnswer: Optional[bool]
Type: Optional[str]
class TextWithHighLights(BaseModel, extra=Extra.allow):
Text: str
Highlights: Optional[Any]
class AdditionalResultAttributeValue(BaseModel, extra=Extra.allow):
TextWithHighlightsValue: TextWithHighLights
class AdditionalResultAttribute(BaseModel, extra=Extra.allow):
Key: str
ValueType: Literal["TEXT_WITH_HIGHLIGHTS_VALUE"]
Value: AdditionalResultAttributeValue
def get_value_text(self) -> str:
return self.Value.TextWithHighlightsValue.Text
class QueryResultItem(BaseModel, extra=Extra.allow):
DocumentId: str
DocumentTitle: TextWithHighLights
DocumentURI: Optional[str]
FeedbackToken: Optional[str]
Format: Optional[str]
Id: Optional[str]
Type: Optional[str]
AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = []
DocumentExcerpt: Optional[TextWithHighLights]
def get_attribute_value(self) -> str: | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
2c8a1e4bc351-1 | def get_attribute_value(self) -> str:
if not self.AdditionalAttributes:
return ""
if not self.AdditionalAttributes[0]:
return ""
else:
return self.AdditionalAttributes[0].get_value_text()
def get_excerpt(self) -> str:
if (
self.AdditionalAttributes
and self.AdditionalAttributes[0].Key == "AnswerText"
):
excerpt = self.get_attribute_value()
elif self.DocumentExcerpt:
excerpt = self.DocumentExcerpt.Text
else:
excerpt = ""
return clean_excerpt(excerpt)
def to_doc(self) -> Document:
title = self.DocumentTitle.Text
source = self.DocumentURI
excerpt = self.get_excerpt()
type = self.Type
page_content = combined_text(title, excerpt)
metadata = {"source": source, "title": title, "excerpt": excerpt, "type": type}
return Document(page_content=page_content, metadata=metadata)
class QueryResult(BaseModel, extra=Extra.allow):
ResultItems: List[QueryResultItem]
def get_top_k_docs(self, top_n: int) -> List[Document]:
items_len = len(self.ResultItems)
count = items_len if items_len < top_n else top_n
docs = [self.ResultItems[i].to_doc() for i in range(0, count)]
return docs
class DocumentAttributeValue(BaseModel, extra=Extra.allow):
DateValue: Optional[str]
LongValue: Optional[int]
StringListValue: Optional[List[str]]
StringValue: Optional[str]
class DocumentAttribute(BaseModel, extra=Extra.allow):
Key: str
Value: DocumentAttributeValue | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
2c8a1e4bc351-2 | Key: str
Value: DocumentAttributeValue
class RetrieveResultItem(BaseModel, extra=Extra.allow):
Content: Optional[str]
DocumentAttributes: Optional[List[DocumentAttribute]] = []
DocumentId: Optional[str]
DocumentTitle: Optional[str]
DocumentURI: Optional[str]
Id: Optional[str]
def get_excerpt(self) -> str:
if not self.Content:
return ""
return clean_excerpt(self.Content)
def to_doc(self) -> Document:
title = self.DocumentTitle if self.DocumentTitle else ""
source = self.DocumentURI
excerpt = self.get_excerpt()
page_content = combined_text(title, excerpt)
metadata = {"source": source, "title": title, "excerpt": excerpt}
return Document(page_content=page_content, metadata=metadata)
class RetrieveResult(BaseModel, extra=Extra.allow):
QueryId: str
ResultItems: List[RetrieveResultItem]
def get_top_k_docs(self, top_n: int) -> List[Document]:
items_len = len(self.ResultItems)
count = items_len if items_len < top_n else top_n
docs = [self.ResultItems[i].to_doc() for i in range(0, count)]
return docs
[docs]class AmazonKendraRetriever(BaseRetriever):
"""Retriever class to query documents from Amazon Kendra Index.
Args:
index_id: Kendra index id
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
2c8a1e4bc351-3 | or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
top_k: No of results to return
attribute_filter: Additional filtering of results based on metadata
See: https://docs.aws.amazon.com/kendra/latest/APIReference
client: boto3 client for Kendra
Example:
.. code-block:: python
retriever = AmazonKendraRetriever(
index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03"
)
"""
def __init__(
self,
index_id: str,
region_name: Optional[str] = None,
credentials_profile_name: Optional[str] = None,
top_k: int = 3,
attribute_filter: Optional[Dict] = None,
client: Optional[Any] = None,
):
self.index_id = index_id
self.top_k = top_k
self.attribute_filter = attribute_filter
if client is not None:
self.client = client
return
try:
import boto3
if credentials_profile_name is not None:
session = boto3.Session(profile_name=credentials_profile_name)
else:
# use default credentials
session = boto3.Session()
client_params = {}
if region_name is not None:
client_params["region_name"] = region_name
self.client = session.client("kendra", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`." | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
2c8a1e4bc351-4 | "Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
def _kendra_query(
self,
query: str,
top_k: int,
attribute_filter: Optional[Dict] = None,
) -> List[Document]:
if attribute_filter is not None:
response = self.client.retrieve(
IndexId=self.index_id,
QueryText=query.strip(),
PageSize=top_k,
AttributeFilter=attribute_filter,
)
else:
response = self.client.retrieve(
IndexId=self.index_id, QueryText=query.strip(), PageSize=top_k
)
r_result = RetrieveResult.parse_obj(response)
result_len = len(r_result.ResultItems)
if result_len == 0:
# retrieve API returned 0 results, call query API
if attribute_filter is not None:
response = self.client.query(
IndexId=self.index_id,
QueryText=query.strip(),
PageSize=top_k,
AttributeFilter=attribute_filter,
)
else:
response = self.client.query(
IndexId=self.index_id, QueryText=query.strip(), PageSize=top_k
)
q_result = QueryResult.parse_obj(response)
docs = q_result.get_top_k_docs(top_k)
else:
docs = r_result.get_top_k_docs(top_k)
return docs
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
"""Run search on Kendra index and get top k documents
Example: | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
2c8a1e4bc351-5 | """Run search on Kendra index and get top k documents
Example:
.. code-block:: python
docs = retriever.get_relevant_documents('This is my query')
"""
docs = self._kendra_query(query, self.top_k, self.attribute_filter)
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError("Async version is not implemented for Kendra yet.") | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/kendra.html |
c2e92f691419-0 | Source code for langchain.retrievers.tfidf
"""TF-IDF Retriever.
Largely based on
https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb"""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from pydantic import BaseModel
from langchain.schema import BaseRetriever, Document
[docs]class TFIDFRetriever(BaseRetriever, BaseModel):
vectorizer: Any
docs: List[Document]
tfidf_array: Any
k: int = 4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
tfidf_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> TFIDFRetriever:
try:
from sklearn.feature_extraction.text import TfidfVectorizer
except ImportError:
raise ImportError(
"Could not import scikit-learn, please install with `pip install "
"scikit-learn`."
)
tfidf_params = tfidf_params or {}
vectorizer = TfidfVectorizer(**tfidf_params)
tfidf_array = vectorizer.fit_transform(texts)
metadatas = metadatas or ({} for _ in texts)
docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)] | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html |
c2e92f691419-1 | return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs)
[docs] @classmethod
def from_documents(
cls,
documents: Iterable[Document],
*,
tfidf_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> TFIDFRetriever:
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(
texts=texts, tfidf_params=tfidf_params, metadatas=metadatas, **kwargs
)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from sklearn.metrics.pairwise import cosine_similarity
query_vec = self.vectorizer.transform(
[query]
) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
results = cosine_similarity(self.tfidf_array, query_vec).reshape(
(-1,)
) # Op -- (n_docs,1) -- Cosine Sim with each doc
return_docs = [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
return return_docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html |
6a68ffd57cda-0 | Source code for langchain.retrievers.zilliz
"""Zilliz Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores.zilliz import Zilliz
# TODO: Update to ZillizClient + Hybrid Search when available
[docs]class ZillizRetriever(BaseRetriever):
"""Retriever that uses the Zilliz API."""
def __init__(
self,
embedding_function: Embeddings,
collection_name: str = "LangChainCollection",
connection_args: Optional[Dict[str, Any]] = None,
consistency_level: str = "Session",
search_params: Optional[dict] = None,
):
self.store = Zilliz(
embedding_function,
collection_name,
connection_args,
consistency_level,
)
self.retriever = self.store.as_retriever(search_kwargs={"param": search_params})
[docs] def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Zilliz store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
return self.retriever.get_relevant_documents(query)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zilliz.html |
6a68ffd57cda-1 | raise NotImplementedError
def ZillizRetreiver(*args: Any, **kwargs: Any) -> ZillizRetriever:
"""
Deprecated ZillizRetreiver. Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
"""
warnings.warn(
"ZillizRetreiver will be deprecated in the future. "
"Please use ZillizRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return ZillizRetriever(*args, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/zilliz.html |
aa6d8bbbfe79-0 | Source code for langchain.retrievers.document_compressors.cohere_rerank
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Sequence
from pydantic import Extra, root_validator
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.schema import Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from cohere import Client
else:
# We do to avoid pydantic annotation issues when actually instantiating
# while keeping this import optional
try:
from cohere import Client
except ImportError:
pass
[docs]class CohereRerank(BaseDocumentCompressor):
client: Client
top_n: int = 3
model: str = "rerank-english-v2.0"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
if len(documents) == 0: # to avoid empty api call
return [] | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html |
aa6d8bbbfe79-1 | return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(
model=self.model, query=query, documents=_docs, top_n=self.top_n
)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata["relevance_score"] = r.relevance_score
final_results.append(doc)
return final_results
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html |
dca67d1844c2-0 | Source code for langchain.retrievers.document_compressors.chain_filter
"""Filter that uses an LLM to drop documents that aren't relevant to the query."""
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import BasePromptTemplate, LLMChain, PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.output_parsers.boolean import BooleanOutputParser
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_filter_prompt import (
prompt_template,
)
from langchain.schema import Document
def _get_default_chain_prompt() -> PromptTemplate:
return PromptTemplate(
template=prompt_template,
input_variables=["question", "context"],
output_parser=BooleanOutputParser(),
)
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
[docs]class LLMChainFilter(BaseDocumentCompressor):
"""Filter that drops documents that aren't relevant to the query."""
llm_chain: LLMChain
"""LLM wrapper to use for filtering documents.
The chain prompt is expected to have a BooleanOutputParser."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
filtered_docs = []
for doc in documents:
_input = self.get_input(query, doc)
include_doc = self.llm_chain.predict_and_parse(**_input) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html |
dca67d1844c2-1 | include_doc = self.llm_chain.predict_and_parse(**_input)
if include_doc:
filtered_docs.append(doc)
return filtered_docs
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter down documents."""
raise NotImplementedError
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any
) -> "LLMChainFilter":
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html |
d715578d933a-0 | Source code for langchain.retrievers.document_compressors.base
"""Interface for retrieved document compressors."""
from abc import ABC, abstractmethod
from typing import List, Sequence, Union
from pydantic import BaseModel
from langchain.schema import BaseDocumentTransformer, Document
class BaseDocumentCompressor(BaseModel, ABC):
"""Base abstraction interface for document compression."""
@abstractmethod
def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
@abstractmethod
async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
[docs]class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of transformers."""
transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html |
d715578d933a-1 | self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html |
baa8da819db9-0 | Source code for langchain.retrievers.document_compressors.chain_extract
"""DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
from __future__ import annotations
import asyncio
from typing import Any, Callable, Dict, Optional, Sequence
from langchain import LLMChain, PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.retrievers.document_compressors.chain_extract_prompt import (
prompt_template,
)
from langchain.schema import BaseOutputParser, Document
def default_get_input(query: str, doc: Document) -> Dict[str, Any]:
"""Return the compression chain input."""
return {"question": query, "context": doc.page_content}
class NoOutputParser(BaseOutputParser[str]):
"""Parse outputs that could return a null string of some sort."""
no_output_str: str = "NO_OUTPUT"
def parse(self, text: str) -> str:
cleaned_text = text.strip()
if cleaned_text == self.no_output_str:
return ""
return cleaned_text
def _get_default_chain_prompt() -> PromptTemplate:
output_parser = NoOutputParser()
template = prompt_template.format(no_output_str=output_parser.no_output_str)
return PromptTemplate(
template=template,
input_variables=["question", "context"],
output_parser=output_parser,
)
[docs]class LLMChainExtractor(BaseDocumentCompressor):
llm_chain: LLMChain
"""LLM wrapper to use for compressing documents."""
get_input: Callable[[str, Document], dict] = default_get_input
"""Callable for constructing the chain input from the query and a Document."""
[docs] def compress_documents( | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html |
baa8da819db9-1 | [docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output = self.llm_chain.predict_and_parse(**_input)
if len(output) == 0:
continue
compressed_docs.append(Document(page_content=output, metadata=doc.metadata))
return compressed_docs
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Compress page content of raw documents asynchronously."""
outputs = await asyncio.gather(
*[
self.llm_chain.apredict_and_parse(**self.get_input(query, doc))
for doc in documents
]
)
compressed_docs = []
for i, doc in enumerate(documents):
if len(outputs[i]) == 0:
continue
compressed_docs.append(
Document(page_content=outputs[i], metadata=doc.metadata)
)
return compressed_docs
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[PromptTemplate] = None,
get_input: Optional[Callable[[str, Document], str]] = None,
llm_chain_kwargs: Optional[dict] = None,
) -> LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html |
baa8da819db9-2 | _get_input = get_input if get_input is not None else default_get_input
llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {}))
return cls(llm_chain=llm_chain, get_input=_get_input) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html |
1cd3aded5f24-0 | Source code for langchain.retrievers.document_compressors.embeddings_filter
"""Document compressor that uses embeddings to drop documents unrelated to the query."""
from typing import Callable, Dict, Optional, Sequence
import numpy as np
from pydantic import root_validator
from langchain.document_transformers import (
_get_embeddings_from_stateful_docs,
get_stateful_documents,
)
from langchain.embeddings.base import Embeddings
from langchain.math_utils import cosine_similarity
from langchain.retrievers.document_compressors.base import (
BaseDocumentCompressor,
)
from langchain.schema import Document
[docs]class EmbeddingsFilter(BaseDocumentCompressor):
embeddings: Embeddings
"""Embeddings to use for embedding document contents and queries."""
similarity_fn: Callable = cosine_similarity
"""Similarity function for comparing documents. Function expected to take as input
two matrices (List[List[float]]) and return a matrix of scores where higher values
indicate greater similarity."""
k: Optional[int] = 20
"""The number of relevant documents to return. Can be set to None, in which case
`similarity_threshold` must be specified. Defaults to 20."""
similarity_threshold: Optional[float]
"""Threshold for determining when two documents are similar enough
to be considered redundant. Defaults to None, must be specified if `k` is set
to None."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_params(cls, values: Dict) -> Dict:
"""Validate similarity parameters."""
if values["k"] is None and values["similarity_threshold"] is None:
raise ValueError("Must specify one of `k` or `similarity_threshold`.")
return values | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html |
1cd3aded5f24-1 | return values
[docs] def compress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter documents based on similarity of their embeddings to the query."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
embedded_query = self.embeddings.embed_query(query)
similarity = self.similarity_fn([embedded_query], embedded_documents)[0]
included_idxs = np.arange(len(embedded_documents))
if self.k is not None:
included_idxs = np.argsort(similarity)[::-1][: self.k]
if self.similarity_threshold is not None:
similar_enough = np.where(
similarity[included_idxs] > self.similarity_threshold
)
included_idxs = included_idxs[similar_enough]
return [stateful_documents[i] for i in included_idxs]
[docs] async def acompress_documents(
self, documents: Sequence[Document], query: str
) -> Sequence[Document]:
"""Filter down documents."""
raise NotImplementedError | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html |
c333be1ed1c6-0 | Source code for langchain.retrievers.self_query.base
"""Retriever that generates and executes structured queries over its own data source."""
from typing import Any, Dict, List, Optional, Type, cast
from pydantic import BaseModel, Field, root_validator
from langchain import LLMChain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.query_constructor.base import load_query_constructor_chain
from langchain.chains.query_constructor.ir import StructuredQuery, Visitor
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.chroma import ChromaTranslator
from langchain.retrievers.self_query.myscale import MyScaleTranslator
from langchain.retrievers.self_query.pinecone import PineconeTranslator
from langchain.retrievers.self_query.qdrant import QdrantTranslator
from langchain.retrievers.self_query.weaviate import WeaviateTranslator
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores import (
Chroma,
MyScale,
Pinecone,
Qdrant,
VectorStore,
Weaviate,
)
def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
"""Get the translator class corresponding to the vector store class."""
vectorstore_cls = vectorstore.__class__
BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = {
Pinecone: PineconeTranslator,
Chroma: ChromaTranslator,
Weaviate: WeaviateTranslator,
Qdrant: QdrantTranslator,
MyScale: MyScaleTranslator,
}
if vectorstore_cls not in BUILTIN_TRANSLATORS:
raise ValueError( | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html |
c333be1ed1c6-1 | if vectorstore_cls not in BUILTIN_TRANSLATORS:
raise ValueError(
f"Self query retriever with Vector Store type {vectorstore_cls}"
f" not supported."
)
if isinstance(vectorstore, Qdrant):
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
elif isinstance(vectorstore, MyScale):
return MyScaleTranslator(metadata_key=vectorstore.metadata_column)
return BUILTIN_TRANSLATORS[vectorstore_cls]()
[docs]class SelfQueryRetriever(BaseRetriever, BaseModel):
"""Retriever that wraps around a vector store and uses an LLM to generate
the vector store queries."""
vectorstore: VectorStore
"""The underlying vector store from which documents will be retrieved."""
llm_chain: LLMChain
"""The LLMChain for generating the vector store queries."""
search_type: str = "similarity"
"""The search type to perform on the vector store."""
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass in to the vector store search."""
structured_query_translator: Visitor
"""Translator for turning internal query language into vectorstore search params."""
verbose: bool = False
"""Use original query instead of the revised new query from LLM"""
use_original_query: bool = False
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_translator(cls, values: Dict) -> Dict:
"""Validate translator."""
if "structured_query_translator" not in values:
values["structured_query_translator"] = _get_builtin_translator(
values["vectorstore"]
)
return values | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html |
c333be1ed1c6-2 | values["vectorstore"]
)
return values
[docs] def get_relevant_documents(
self, query: str, callbacks: Callbacks = None
) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
inputs = self.llm_chain.prep_inputs({"query": query})
structured_query = cast(
StructuredQuery,
self.llm_chain.predict_and_parse(callbacks=callbacks, **inputs),
)
if self.verbose:
print(structured_query)
new_query, new_kwargs = self.structured_query_translator.visit_structured_query(
structured_query
)
if structured_query.limit is not None:
new_kwargs["k"] = structured_query.limit
if self.use_original_query:
new_query = query
search_kwargs = {**self.search_kwargs, **new_kwargs}
docs = self.vectorstore.search(new_query, self.search_type, **search_kwargs)
return docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
document_contents: str,
metadata_field_info: List[AttributeInfo],
structured_query_translator: Optional[Visitor] = None,
chain_kwargs: Optional[Dict] = None,
enable_limit: bool = False,
use_original_query: bool = False,
**kwargs: Any,
) -> "SelfQueryRetriever":
if structured_query_translator is None: | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html |
c333be1ed1c6-3 | if structured_query_translator is None:
structured_query_translator = _get_builtin_translator(vectorstore)
chain_kwargs = chain_kwargs or {}
if "allowed_comparators" not in chain_kwargs:
chain_kwargs[
"allowed_comparators"
] = structured_query_translator.allowed_comparators
if "allowed_operators" not in chain_kwargs:
chain_kwargs[
"allowed_operators"
] = structured_query_translator.allowed_operators
llm_chain = load_query_constructor_chain(
llm,
document_contents,
metadata_field_info,
enable_limit=enable_limit,
**chain_kwargs,
)
return cls(
llm_chain=llm_chain,
vectorstore=vectorstore,
use_original_query=use_original_query,
structured_query_translator=structured_query_translator,
**kwargs,
) | https://api.python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html |
d8867cdfc156-0 | Source code for langchain.chains.loading
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.graph_qa.cypher import GraphCypherQAChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import (
_load_output_parser,
load_prompt,
load_prompt_from_config,
)
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" | https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html |
d8867cdfc156-1 | def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
_load_output_parser(config)
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder( | https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html |
d8867cdfc156-2 | return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else: | https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html |
d8867cdfc156-3 | llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
llm_chain = None
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config) | https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.