id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
e4e5434bc9d7-2
Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_vector( embedding, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) [docs] def similarity_search_with_score_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_item( docstore_index, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) [docs] def similarity_search_with_score( self, query: str, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4.
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
e4e5434bc9d7-3
k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k, search_k) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, search_k ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to docstore_index. Args: docstore_index: Index of document in docstore k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
e4e5434bc9d7-4
Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_index( docstore_index, k, search_k ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search( self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, search_k) return [doc for doc, _ in docs_and_scores] [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. fetch_k: Number of Documents to fetch to pass to MMR algorithm. k: Number of Documents to return. Defaults to 4. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
e4e5434bc9d7-5
of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ idxs = self.index.get_nns_by_vector( embedding, fetch_k, search_k=-1, include_distances=False ) embeddings = [self.index.get_item_vector(i) for i in idxs] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) # ignore the -1's if not enough docs are returned/indexed selected_indices = [idxs[i] for i in mmr_selected if i != -1] docs = [] for i in selected_indices: _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append(doc) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4.
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
e4e5434bc9d7-6
k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: if metric not in INDEX_METRICS: raise ValueError( ( f"Unsupported distance metric: {metric}. " f"Expected one of {list(INDEX_METRICS)}" ) ) annoy = dependable_annoy_import() if not embeddings: raise ValueError("embeddings must be provided to build AnnoyIndex") f = len(embeddings[0]) index = annoy.AnnoyIndex(f, metric=metric) for i, emb in enumerate(embeddings): index.add_item(i, emb) index.build(trees, n_jobs=n_jobs) documents = [] for i, text in enumerate(texts):
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
e4e5434bc9d7-7
documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls(embedding.embed_query, index, metric, docstore, index_to_id) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
e4e5434bc9d7-8
from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) [docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: 1. Creates an in memory docstore with provided embeddings 2. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
e4e5434bc9d7-9
text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) [docs] def save_local(self, folder_path: str, prefault: bool = False) -> None: """Save Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. prefault: Whether to pre-load the index into memory. """ path = Path(folder_path) os.makedirs(path, exist_ok=True) # save index, index config, docstore and index_to_docstore_id config_object = ConfigParser() config_object["ANNOY"] = { "f": self.index.f, "metric": self.metric, } self.index.save(str(path / "index.annoy"), prefault=prefault) with open(path / "index.pkl", "wb") as file: pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file) [docs] @classmethod def load_local( cls, folder_path: str, embeddings: Embeddings, ) -> Annoy: """Load Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
e4e5434bc9d7-10
Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries. """ path = Path(folder_path) # load index separately since it is not picklable annoy = dependable_annoy_import() # load docstore and index_to_docstore_id with open(path / "index.pkl", "rb") as file: docstore, index_to_docstore_id, config_object = pickle.load(file) f = int(config_object["ANNOY"]["f"]) metric = config_object["ANNOY"]["metric"] index = annoy.AnnoyIndex(f, metric=metric) index.load(str(path / "index.annoy")) return cls( embeddings.embed_query, index, metric, docstore, index_to_docstore_id )
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7b5b99f2bfe5-0
Source code for langchain.vectorstores.mongodb_atlas from __future__ import annotations import logging from typing import ( TYPE_CHECKING, Any, Dict, Generator, Iterable, List, Optional, Tuple, TypeVar, Union, ) from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from pymongo.collection import Collection MongoDBDocumentType = TypeVar("MongoDBDocumentType", bound=Dict[str, Any]) logger = logging.getLogger(__name__) DEFAULT_INSERT_BATCH_SIZE = 100 [docs]class MongoDBAtlasVectorSearch(VectorStore): """Wrapper around MongoDB Atlas Vector Search. To use, you should have both: - the ``pymongo`` python package installed - a connection string associated with a MongoDB Atlas Cluster having deployed an Atlas Search index Example: .. code-block:: python from langchain.vectorstores import MongoDBAtlasVectorSearch from langchain.embeddings.openai import OpenAIEmbeddings from pymongo import MongoClient mongo_client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch(collection, embeddings) """ def __init__( self, collection: Collection[MongoDBDocumentType], embedding: Embeddings, *, index_name: str = "default", text_key: str = "text", embedding_key: str = "embedding", ): """ Args: collection: MongoDB collection to add the texts to.
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
7b5b99f2bfe5-1
""" Args: collection: MongoDB collection to add the texts to. embedding: Text embedding model to use. text_key: MongoDB field that will contain the text for each document. embedding_key: MongoDB field that will contain the embedding for each document. """ self._collection = collection self._embedding = embedding self._index_name = index_name self._text_key = text_key self._embedding_key = embedding_key [docs] @classmethod def from_connection_string( cls, connection_string: str, namespace: str, embedding: Embeddings, **kwargs: Any, ) -> MongoDBAtlasVectorSearch: try: from pymongo import MongoClient except ImportError: raise ImportError( "Could not import pymongo, please install it with " "`pip install pymongo`." ) client: MongoClient = MongoClient(connection_string) db_name, collection_name = namespace.split(".") collection = client[db_name][collection_name] return cls(collection, embedding, **kwargs) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ) -> List: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
7b5b99f2bfe5-2
""" batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE) _metadatas: Union[List, Generator] = metadatas or ({} for _ in texts) texts_batch = [] metadatas_batch = [] result_ids = [] for i, (text, metadata) in enumerate(zip(texts, _metadatas)): texts_batch.append(text) metadatas_batch.append(metadata) if (i + 1) % batch_size == 0: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) texts_batch = [] metadatas_batch = [] if texts_batch: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) return result_ids def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List: if not texts: return [] # Embed and create the documents embeddings = self._embedding.embed_documents(texts) to_insert = [ {self._text_key: t, self._embedding_key: embedding, **m} for t, m, embedding in zip(texts, metadatas, embeddings) ] # insert the documents in MongoDB Atlas insert_result = self._collection.insert_many(to_insert) return insert_result.inserted_ids [docs] def similarity_search_with_score( self, query: str, *, k: int = 4, pre_filter: Optional[dict] = None, post_filter_pipeline: Optional[List[Dict]] = None, ) -> List[Tuple[Document, float]]: """Return MongoDB documents most similar to query, along with scores.
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
7b5b99f2bfe5-3
"""Return MongoDB documents most similar to query, along with scores. Use the knnBeta Operator available in MongoDB Atlas Search This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: Optional Number of Documents to return. Defaults to 4. pre_filter: Optional Dictionary of argument(s) to prefilter on document fields. post_filter_pipeline: Optional Pipeline of MongoDB aggregation stages following the knnBeta search. Returns: List of Documents most similar to the query and score for each """ knn_beta = { "vector": self._embedding.embed_query(query), "path": self._embedding_key, "k": k, } if pre_filter: knn_beta["filter"] = pre_filter pipeline = [ { "$search": { "index": self._index_name, "knnBeta": knn_beta, } }, {"$project": {"score": {"$meta": "searchScore"}, self._embedding_key: 0}}, ] if post_filter_pipeline is not None: pipeline.extend(post_filter_pipeline) cursor = self._collection.aggregate(pipeline) docs = [] for res in cursor: text = res.pop(self._text_key) score = res.pop("score") docs.append((Document(page_content=text, metadata=res), score)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
7b5b99f2bfe5-4
docs.append((Document(page_content=text, metadata=res), score)) return docs [docs] def similarity_search( self, query: str, k: int = 4, pre_filter: Optional[dict] = None, post_filter_pipeline: Optional[List[Dict]] = None, **kwargs: Any, ) -> List[Document]: """Return MongoDB documents most similar to query. Use the knnBeta Operator available in MongoDB Atlas Search This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: Optional Number of Documents to return. Defaults to 4. pre_filter: Optional Dictionary of argument(s) to prefilter on document fields. post_filter_pipeline: Optional Pipeline of MongoDB aggregation stages following the knnBeta search. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( query, k=k, pre_filter=pre_filter, post_filter_pipeline=post_filter_pipeline, ) return [doc for doc, _ in docs_and_scores] [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection: Optional[Collection[MongoDBDocumentType]] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
7b5b99f2bfe5-5
collection: Optional[Collection[MongoDBDocumentType]] = None, **kwargs: Any, ) -> MongoDBAtlasVectorSearch: """Construct MongoDBAtlasVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided MongoDB Atlas Vector Search index (Lucene) This is intended to be a quick way to get started. Example: .. code-block:: python from pymongo import MongoClient from langchain.vectorstores import MongoDBAtlasVectorSearch from langchain.embeddings import OpenAIEmbeddings client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch.from_texts( texts, embeddings, metadatas=metadatas, collection=collection ) """ if collection is None: raise ValueError("Must provide 'collection' named parameter.") vecstore = cls(collection, embedding, **kwargs) vecstore.add_texts(texts, metadatas=metadatas) return vecstore
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
f084adf76b42-0
Source code for langchain.vectorstores.tair """Wrapper around Tair Vector.""" from __future__ import annotations import json import logging import uuid from typing import Any, Iterable, List, Optional, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore logger = logging.getLogger(__name__) def _uuid_key() -> str: return uuid.uuid4().hex [docs]class Tair(VectorStore): """Wrapper around Tair Vector store.""" def __init__( self, embedding_function: Embeddings, url: str, index_name: str, content_key: str = "content", metadata_key: str = "metadata", search_params: Optional[dict] = None, **kwargs: Any, ): self.embedding_function = embedding_function self.index_name = index_name try: from tair import Tair as TairClient except ImportError: raise ImportError( "Could not import tair python package. " "Please install it with `pip install tair`." ) try: # connect to tair from url client = TairClient.from_url(url, **kwargs) except ValueError as e: raise ValueError(f"Tair failed to connect: {e}") self.client = client self.content_key = content_key self.metadata_key = metadata_key self.search_params = search_params [docs] def create_index_if_not_exist( self, dim: int, distance_type: str, index_type: str, data_type: str,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
f084adf76b42-1
index_type: str, data_type: str, **kwargs: Any, ) -> bool: index = self.client.tvs_get_index(self.index_name) if index is not None: logger.info("Index already exists") return False self.client.tvs_create_index( self.index_name, dim, distance_type, index_type, data_type, **kwargs, ) return True [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add texts data to an existing index.""" ids = [] keys = kwargs.get("keys", None) # Write data to tair pipeline = self.client.pipeline(transaction=False) embeddings = self.embedding_function.embed_documents(list(texts)) for i, text in enumerate(texts): # Use provided key otherwise use default key key = keys[i] if keys else _uuid_key() metadata = metadatas[i] if metadatas else {} pipeline.tvs_hset( self.index_name, key, embeddings[i], False, **{ self.content_key: text, self.metadata_key: json.dumps(metadata), }, ) ids.append(key) pipeline.execute() return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
f084adf76b42-2
""" Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ # Creates embedding vector from user query embedding = self.embedding_function.embed_query(query) keys_and_scores = self.client.tvs_knnsearch( self.index_name, k, embedding, False, None, **kwargs ) pipeline = self.client.pipeline(transaction=False) for key, _ in keys_and_scores: pipeline.tvs_hmget( self.index_name, key, self.metadata_key, self.content_key ) docs = pipeline.execute() return [ Document( page_content=d[1], metadata=json.loads(d[0]), ) for d in docs ] [docs] @classmethod def from_texts( cls: Type[Tair], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: try: from tair import tairvector except ImportError: raise ValueError( "Could not import tair python package. " "Please install it with `pip install tair`." ) url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL")
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
f084adf76b42-3
if "tair_url" in kwargs: kwargs.pop("tair_url") distance_type = tairvector.DistanceMetric.InnerProduct if "distance_type" in kwargs: distance_type = kwargs.pop("distance_typ") index_type = tairvector.IndexType.HNSW if "index_type" in kwargs: index_type = kwargs.pop("index_type") data_type = tairvector.DataType.Float32 if "data_type" in kwargs: data_type = kwargs.pop("data_type") index_params = {} if "index_params" in kwargs: index_params = kwargs.pop("index_params") search_params = {} if "search_params" in kwargs: search_params = kwargs.pop("search_params") keys = None if "keys" in kwargs: keys = kwargs.pop("keys") try: tair_vector_store = cls( embedding, url, index_name, content_key=content_key, metadata_key=metadata_key, search_params=search_params, **kwargs, ) except ValueError as e: raise ValueError(f"tair failed to connect: {e}") # Create embeddings for documents embeddings = embedding.embed_documents(texts) tair_vector_store.create_index_if_not_exist( len(embeddings[0]), distance_type, index_type, data_type, **index_params, ) tair_vector_store.add_texts(texts, metadatas, keys=keys) return tair_vector_store [docs] @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
f084adf76b42-4
cls, documents: List[Document], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts( texts, embedding, metadatas, index_name, content_key, metadata_key, **kwargs ) [docs] @staticmethod def drop_index( index_name: str = "langchain", **kwargs: Any, ) -> bool: """ Drop an existing index. Args: index_name (str): Name of the index to drop. Returns: bool: True if the index is dropped successfully. """ try: from tair import Tair as TairClient except ImportError: raise ValueError( "Could not import tair python package. " "Please install it with `pip install tair`." ) url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL") try: if "tair_url" in kwargs: kwargs.pop("tair_url") client = TairClient.from_url(url=url, **kwargs) except ValueError as e: raise ValueError(f"Tair connection error: {e}") # delete index ret = client.tvs_del_index(index_name) if ret == 0: # index not exist logger.info("Index does not exist") return False
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
f084adf76b42-5
# index not exist logger.info("Index does not exist") return False return True [docs] @classmethod def from_existing_index( cls, embedding: Embeddings, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: """Connect to an existing Tair index.""" url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL") search_params = {} if "search_params" in kwargs: search_params = kwargs.pop("search_params") return cls( embedding, url, index_name, content_key=content_key, metadata_key=metadata_key, search_params=search_params, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
4e008ca515ab-0
Source code for langchain.vectorstores.lancedb """Wrapper around LanceDB vector database""" from __future__ import annotations import uuid from typing import Any, Iterable, List, Optional from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore [docs]class LanceDB(VectorStore): """Wrapper around LanceDB vector database. To use, you should have ``lancedb`` python package installed. Example: .. code-block:: python db = lancedb.connect('./lancedb') table = db.open_table('my_table') vectorstore = LanceDB(table, embedding_function) vectorstore.add_texts(['text1', 'text2']) result = vectorstore.similarity_search('text1') """ def __init__( self, connection: Any, embedding: Embeddings, vector_key: Optional[str] = "vector", id_key: Optional[str] = "id", text_key: Optional[str] = "text", ): """Initialize with Lance DB connection""" try: import lancedb except ImportError: raise ValueError( "Could not import lancedb python package. " "Please install it with `pip install lancedb`." ) if not isinstance(connection, lancedb.db.LanceTable): raise ValueError( "connection should be an instance of lancedb.db.LanceTable, ", f"got {type(connection)}", ) self._connection = connection self._embedding = embedding self._vector_key = vector_key self._id_key = id_key self._text_key = text_key
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/lancedb.html
4e008ca515ab-1
self._id_key = id_key self._text_key = text_key [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Turn texts into embedding and add it to the database Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids of the added texts. """ # Embed texts and create documents docs = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self._embedding.embed_documents(list(texts)) for idx, text in enumerate(texts): embedding = embeddings[idx] metadata = metadatas[idx] if metadatas else {} docs.append( { self._vector_key: embedding, self._id_key: ids[idx], self._text_key: text, **metadata, } ) self._connection.add(docs) return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return documents most similar to the query Args: query: String to query the vectorstore with. k: Number of documents to return. Returns: List of documents most similar to the query. """ embedding = self._embedding.embed_query(query)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/lancedb.html
4e008ca515ab-2
""" embedding = self._embedding.embed_query(query) docs = self._connection.search(embedding).limit(k).to_df() return [ Document( page_content=row[self._text_key], metadata=row[docs.columns != self._text_key], ) for _, row in docs.iterrows() ] [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, connection: Any = None, vector_key: Optional[str] = "vector", id_key: Optional[str] = "id", text_key: Optional[str] = "text", **kwargs: Any, ) -> LanceDB: instance = LanceDB( connection, embedding, vector_key, id_key, text_key, ) instance.add_texts(texts, metadatas=metadatas, **kwargs) return instance
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/lancedb.html
90fc2f1b7340-0
Source code for langchain.vectorstores.hologres """VectorStore wrapper around a Hologres database.""" from __future__ import annotations import json import logging import uuid from typing import Any, Dict, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_pg_embedding" class HologresWrapper: def __init__(self, connection_string: str, ndims: int, table_name: str) -> None: import psycopg2 self.table_name = table_name self.conn = psycopg2.connect(connection_string) self.cursor = self.conn.cursor() self.conn.autocommit = False self.ndims = ndims def create_vector_extension(self) -> None: self.cursor.execute("create extension if not exists proxima") self.conn.commit() def create_table(self, drop_if_exist: bool = True) -> None: if drop_if_exist: self.cursor.execute(f"drop table if exists {self.table_name}") self.conn.commit() self.cursor.execute( f"""create table if not exists {self.table_name} ( id text, embedding float4[] check(array_ndims(embedding) = 1 and \ array_length(embedding, 1) = {self.ndims}), metadata json, document text);""" ) self.cursor.execute( f"call set_table_property('{self.table_name}'" + """, 'proxima_vectors', '{"embedding":{"algorithm":"Graph", "distance_method":"SquaredEuclidean",
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-1
'{"embedding":{"algorithm":"Graph", "distance_method":"SquaredEuclidean", "build_params":{"min_flush_proxima_row_count" : 1, "min_compaction_proxima_row_count" : 1, "max_total_size_to_merge_mb" : 2000}}}');""" ) self.conn.commit() def get_by_id(self, id: str) -> List[Tuple]: statement = ( f"select id, embedding, metadata, " f"document from {self.table_name} where id = %s;" ) self.cursor.execute( statement, (id), ) self.conn.commit() return self.cursor.fetchall() def insert( self, embedding: List[float], metadata: dict, document: str, id: Optional[str] = None, ) -> None: self.cursor.execute( f'insert into "{self.table_name}" ' f"values (%s, array{json.dumps(embedding)}::float4[], %s, %s)", (id if id is not None else "null", json.dumps(metadata), document), ) self.conn.commit() def query_nearest_neighbours( self, embedding: List[float], k: int, filter: Optional[Dict[str, str]] = None ) -> List[Tuple[str, str, float]]: params = [] filter_clause = "" if filter is not None: conjuncts = [] for key, val in filter.items(): conjuncts.append("metadata->>%s=%s") params.append(key) params.append(val)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-2
params.append(key) params.append(val) filter_clause = "where " + " and ".join(conjuncts) sql = ( f"select document, metadata::text, " f"pm_approx_squared_euclidean_distance(array{json.dumps(embedding)}" f"::float4[], embedding) as distance from" f" {self.table_name} {filter_clause} order by distance asc limit {k};" ) self.cursor.execute(sql, tuple(params)) self.conn.commit() return self.cursor.fetchall() [docs]class Hologres(VectorStore): """VectorStore implementation using Hologres. - `connection_string` is a hologres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `ndims` is the number of dimensions of the embedding output. - `table_name` is the name of the table to store embeddings and data. (default: langchain_pg_embedding) - NOTE: The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `pre_delete_table` if True, will delete the table if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, logger: Optional[logging.Logger] = None, ) -> None: self.connection_string = connection_string self.ndims = ndims
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-3
self.connection_string = connection_string self.ndims = ndims self.table_name = table_name self.embedding_function = embedding_function self.pre_delete_table = pre_delete_table self.logger = logger or logging.getLogger(__name__) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ self.storage = HologresWrapper( self.connection_string, self.ndims, self.table_name ) self.create_vector_extension() self.create_table() [docs] def create_vector_extension(self) -> None: try: self.storage.create_vector_extension() except Exception as e: self.logger.exception(e) raise e [docs] def create_table(self) -> None: self.storage.create_table(self.pre_delete_table) @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding_function: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, embedding_function=embedding_function, ndims=ndims,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-4
embedding_function=embedding_function, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store [docs] def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: List[dict], ids: List[str], **kwargs: Any, ) -> None: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ try: for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): self.storage.insert(embedding, metadata, text, id) except Exception as e: self.logger.exception(e) self.storage.conn.commit() [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-5
List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs) return ids [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Hologres with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-6
Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results: List[Tuple[str, str, float]] = self.storage.query_nearest_neighbours( embedding, k, filter ) docs = [ ( Document( page_content=result[0], metadata=json.loads(result[1]), ), result[2], ) for result in results ] return docs [docs] @classmethod def from_texts(
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-7
] return docs [docs] @classmethod def from_texts( cls: Type[Hologres], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) [docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """Construct Hologres wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-8
Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain import Hologres from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) [docs] @classmethod def from_existing_index( cls: Type[Hologres], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Get intsance of an existing Hologres store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, ndims=ndims, table_name=table_name, embedding_function=embedding, pre_delete_table=pre_delete_table,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-9
embedding_function=embedding, pre_delete_table=pre_delete_table, ) return store [docs] @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="HOLOGRES_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the HOLOGRES_CONNECTION_STRING environment variable." ) return connection_string [docs] @classmethod def from_documents( cls: Type[Hologres], documents: List[Document], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
90fc2f1b7340-10
ndims=ndims, table_name=table_name, **kwargs, ) [docs] @classmethod def connection_string_from_db_params( cls, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return ( f"dbname={database} user={user} password={password} host={host} port={port}" )
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/hologres.html
1e6d615e6aba-0
Source code for langchain.vectorstores.azuresearch """Wrapper around Azure Cognitive Search.""" from __future__ import annotations import base64 import json import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, ) import numpy as np from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore logger = logging.getLogger() if TYPE_CHECKING: from azure.search.documents import SearchClient # Allow overriding field names for Azure Search FIELDS_ID = get_from_env( key="AZURESEARCH_FIELDS_ID", env_key="AZURESEARCH_FIELDS_ID", default="id" ) FIELDS_CONTENT = get_from_env( key="AZURESEARCH_FIELDS_CONTENT", env_key="AZURESEARCH_FIELDS_CONTENT", default="content", ) FIELDS_CONTENT_VECTOR = get_from_env( key="AZURESEARCH_FIELDS_CONTENT_VECTOR", env_key="AZURESEARCH_FIELDS_CONTENT_VECTOR", default="content_vector", ) FIELDS_METADATA = get_from_env( key="AZURESEARCH_FIELDS_TAG", env_key="AZURESEARCH_FIELDS_TAG", default="metadata" ) MAX_UPLOAD_BATCH_SIZE = 1000 def _get_search_client( endpoint: str, key: str, index_name: str, embedding_function: Callable, semantic_configuration_name: Optional[str] = None, ) -> SearchClient: from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import ResourceNotFoundError
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-1
from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import ResourceNotFoundError from azure.identity import DefaultAzureCredential from azure.search.documents import SearchClient from azure.search.documents.indexes import SearchIndexClient from azure.search.documents.indexes.models import ( PrioritizedFields, SearchableField, SearchField, SearchFieldDataType, SearchIndex, SemanticConfiguration, SemanticField, SemanticSettings, SimpleField, VectorSearch, VectorSearchAlgorithmConfiguration, ) if key is None: credential = DefaultAzureCredential() else: credential = AzureKeyCredential(key) index_client: SearchIndexClient = SearchIndexClient( endpoint=endpoint, credential=credential ) try: index_client.get_index(name=index_name) except ResourceNotFoundError: # Fields configuration fields = [ SimpleField( name=FIELDS_ID, type=SearchFieldDataType.String, key=True, filterable=True, ), SearchableField( name=FIELDS_CONTENT, type=SearchFieldDataType.String, searchable=True, retrievable=True, ), SearchField( name=FIELDS_CONTENT_VECTOR, type=SearchFieldDataType.Collection(SearchFieldDataType.Single), searchable=True, dimensions=len(embedding_function("Text")), vector_search_configuration="default", ), SearchableField( name=FIELDS_METADATA, type=SearchFieldDataType.String, searchable=True, retrievable=True, ), ] # Vector search configuration vector_search = VectorSearch( algorithm_configurations=[ VectorSearchAlgorithmConfiguration(
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-2
algorithm_configurations=[ VectorSearchAlgorithmConfiguration( name="default", kind="hnsw", hnsw_parameters={ "m": 4, "efConstruction": 400, "efSearch": 500, "metric": "cosine", }, ) ] ) # Create the semantic settings with the configuration semantic_settings = ( None if semantic_configuration_name is None else SemanticSettings( configurations=[ SemanticConfiguration( name=semantic_configuration_name, prioritized_fields=PrioritizedFields( prioritized_content_fields=[ SemanticField(field_name=FIELDS_CONTENT) ], ), ) ] ) ) # Create the search index with the semantic settings and vector search index = SearchIndex( name=index_name, fields=fields, vector_search=vector_search, semantic_settings=semantic_settings, ) index_client.create_index(index) # Create the search client return SearchClient(endpoint=endpoint, index_name=index_name, credential=credential) [docs]class AzureSearch(VectorStore): def __init__( self, azure_search_endpoint: str, azure_search_key: str, index_name: str, embedding_function: Callable, search_type: str = "hybrid", semantic_configuration_name: Optional[str] = None, semantic_query_language: str = "en-us", **kwargs: Any, ): """Initialize with necessary components.""" # Initialize base class self.embedding_function = embedding_function self.client = _get_search_client( azure_search_endpoint, azure_search_key,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-3
azure_search_endpoint, azure_search_key, index_name, embedding_function, semantic_configuration_name, ) self.search_type = search_type self.semantic_configuration_name = semantic_configuration_name self.semantic_query_language = semantic_query_language [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add texts data to an existing index.""" keys = kwargs.get("keys") ids = [] # Write data to index data = [] for i, text in enumerate(texts): # Use provided key otherwise use default key key = keys[i] if keys else str(uuid.uuid4()) # Encoding key for Azure Search valid characters key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii") metadata = metadatas[i] if metadatas else {} # Add data to index data.append( { "@search.action": "upload", FIELDS_ID: key, FIELDS_CONTENT: text, FIELDS_CONTENT_VECTOR: np.array( self.embedding_function(text), dtype=np.float32 ).tolist(), FIELDS_METADATA: json.dumps(metadata), } ) ids.append(key) # Upload data in batches if len(data) == MAX_UPLOAD_BATCH_SIZE: response = self.client.upload_documents(documents=data) # Check if all documents were successfully uploaded if not all([r.succeeded for r in response]): raise Exception(response) # Reset data data = []
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-4
raise Exception(response) # Reset data data = [] # Considering case where data is an exact multiple of batch-size entries if len(data) == 0: return ids # Upload data to index response = self.client.upload_documents(documents=data) # Check if all documents were successfully uploaded if all([r.succeeded for r in response]): return ids else: raise Exception(response) [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: search_type = kwargs.get("search_type", self.search_type) if search_type == "similarity": docs = self.vector_search(query, k=k, **kwargs) elif search_type == "hybrid": docs = self.hybrid_search(query, k=k, **kwargs) elif search_type == "semantic_hybrid": docs = self.semantic_hybrid_search(query, k=k, **kwargs) else: raise ValueError(f"search_type of {search_type} not allowed.") return docs [docs] def vector_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.vector_search_with_score( query, k=k, filters=kwargs.get("filters", None) )
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-5
query, k=k, filters=kwargs.get("filters", None) ) return [doc for doc, _ in docs_and_scores] [docs] def vector_search_with_score( self, query: str, k: int = 4, filters: Optional[str] = None ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ from azure.search.documents.models import Vector results = self.client.search( search_text="", vector=Vector( value=np.array( self.embedding_function(query), dtype=np.float32 ).tolist(), k=k, fields=FIELDS_CONTENT_VECTOR, ), select=[f"{FIELDS_ID},{FIELDS_CONTENT},{FIELDS_METADATA}"], filter=filters, ) # Convert results to Document objects docs = [ ( Document( page_content=result[FIELDS_CONTENT], metadata=json.loads(result[FIELDS_METADATA]), ), float(result["@search.score"]), ) for result in results ] return docs [docs] def hybrid_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-6
Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.hybrid_search_with_score( query, k=k, filters=kwargs.get("filters", None) ) return [doc for doc, _ in docs_and_scores] [docs] def hybrid_search_with_score( self, query: str, k: int = 4, filters: Optional[str] = None ) -> List[Tuple[Document, float]]: """Return docs most similar to query with an hybrid query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ from azure.search.documents.models import Vector results = self.client.search( search_text=query, vector=Vector( value=np.array( self.embedding_function(query), dtype=np.float32 ).tolist(), k=k, fields=FIELDS_CONTENT_VECTOR, ), select=[f"{FIELDS_ID},{FIELDS_CONTENT},{FIELDS_METADATA}"], filter=filters, top=k, ) # Convert results to Document objects docs = [ ( Document( page_content=result[FIELDS_CONTENT], metadata=json.loads(result[FIELDS_METADATA]), ), float(result["@search.score"]), ) for result in results ] return docs [docs] def semantic_hybrid_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-7
) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.semantic_hybrid_search_with_score( query, k=k, filters=kwargs.get("filters", None) ) return [doc for doc, _ in docs_and_scores] [docs] def semantic_hybrid_search_with_score( self, query: str, k: int = 4, filters: Optional[str] = None ) -> List[Tuple[Document, float]]: """Return docs most similar to query with an hybrid query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ from azure.search.documents.models import Vector results = self.client.search( search_text=query, vector=Vector( value=np.array( self.embedding_function(query), dtype=np.float32 ).tolist(), k=50, # Hardcoded value to maximize L2 retrieval fields=FIELDS_CONTENT_VECTOR, ), select=[f"{FIELDS_ID},{FIELDS_CONTENT},{FIELDS_METADATA}"], filter=filters, query_type="semantic", query_language=self.semantic_query_language, semantic_configuration_name=self.semantic_configuration_name, query_caption="extractive", query_answer="extractive", top=k, )
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-8
query_answer="extractive", top=k, ) # Get Semantic Answers semantic_answers = results.get_answers() semantic_answers_dict = {} for semantic_answer in semantic_answers: semantic_answers_dict[semantic_answer.key] = { "text": semantic_answer.text, "highlights": semantic_answer.highlights, } # Convert results to Document objects docs = [ ( Document( page_content=result["content"], metadata={ **json.loads(result["metadata"]), **{ "captions": { "text": result.get("@search.captions", [{}])[0].text, "highlights": result.get("@search.captions", [{}])[ 0 ].highlights, } if result.get("@search.captions") else {}, "answers": semantic_answers_dict.get( json.loads(result["metadata"]).get("key"), "" ), }, }, ), float(result["@search.score"]), ) for result in results ] return docs [docs] @classmethod def from_texts( cls: Type[AzureSearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, azure_search_endpoint: str = "", azure_search_key: str = "", index_name: str = "langchain-index", **kwargs: Any, ) -> AzureSearch: # Creating a new Azure Search instance azure_search = cls( azure_search_endpoint, azure_search_key, index_name, embedding.embed_query, )
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
1e6d615e6aba-9
azure_search_key, index_name, embedding.embed_query, ) azure_search.add_texts(texts, metadatas, **kwargs) return azure_search class AzureSearchVectorStoreRetriever(BaseRetriever, BaseModel): vectorstore: AzureSearch search_type: str = "hybrid" k: int = 4 class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "hybrid", "semantic_hybrid"): raise ValueError(f"search_type of {search_type} not allowed.") return values def get_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.vector_search(query, k=self.k) elif self.search_type == "hybrid": docs = self.vectorstore.hybrid_search(query, k=self.k) elif self.search_type == "semantic_hybrid": docs = self.vectorstore.semantic_hybrid_search(query, k=self.k) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError( "AzureSearchVectorStoreRetriever does not support async" )
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/azuresearch.html
0a8fd0f65e70-0
Source code for langchain.vectorstores.base """Interface for vector stores.""" from __future__ import annotations import asyncio import warnings from abc import ABC, abstractmethod from functools import partial from typing import ( Any, ClassVar, Collection, Dict, Iterable, List, Optional, Tuple, Type, TypeVar, ) from pydantic import BaseModel, Field, root_validator from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever VST = TypeVar("VST", bound="VectorStore") [docs]class VectorStore(ABC): """Interface for vector stores.""" [docs] @abstractmethod def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ [docs] def delete(self, ids: List[str]) -> Optional[bool]: """Delete by vector ID. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ raise NotImplementedError( "delete_by_id method must be implemented by subclass." ) [docs] async def aadd_texts(
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-1
) [docs] async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore.""" raise NotImplementedError [docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]: Documents to add to the vectorstore. Returns: List[str]: List of IDs of the added texts. """ # TODO: Handle the case where the user doesn't provide ids on the Collection texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return self.add_texts(texts, metadatas, **kwargs) [docs] async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]: Documents to add to the vectorstore. Returns: List[str]: List of IDs of the added texts. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return await self.aadd_texts(texts, metadatas, **kwargs) [docs] def search(self, query: str, search_type: str, **kwargs: Any) -> List[Document]: """Return docs most similar to query using specified search type.""" if search_type == "similarity":
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-2
if search_type == "similarity": return self.similarity_search(query, **kwargs) elif search_type == "mmr": return self.max_marginal_relevance_search(query, **kwargs) else: raise ValueError( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity' or 'mmr'." ) [docs] async def asearch( self, query: str, search_type: str, **kwargs: Any ) -> List[Document]: """Return docs most similar to query using specified search type.""" if search_type == "similarity": return await self.asimilarity_search(query, **kwargs) elif search_type == "mmr": return await self.amax_marginal_relevance_search(query, **kwargs) else: raise ValueError( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity' or 'mmr'." ) [docs] @abstractmethod def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4.
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-3
k: Number of Documents to return. Defaults to 4. **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 to filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ docs_and_similarities = self._similarity_search_with_relevance_scores( query, k=k, **kwargs ) if any( similarity < 0.0 or similarity > 1.0 for _, similarity in docs_and_similarities ): warnings.warn( "Relevance scores must be between" f" 0 and 1, got {docs_and_similarities}" ) score_threshold = kwargs.get("score_threshold") if score_threshold is not None: docs_and_similarities = [ (doc, similarity) for doc, similarity in docs_and_similarities if similarity >= score_threshold ] if len(docs_and_similarities) == 0: warnings.warn( "No relevant docs were retrieved using the relevance score" f" threshold {score_threshold}" ) return docs_and_similarities def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ raise NotImplementedError [docs] async def asimilarity_search_with_relevance_scores(
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-4
raise NotImplementedError [docs] async def asimilarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial(self.similarity_search_with_relevance_scores, query, k, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] async def asimilarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial(self.similarity_search, query, k, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ raise NotImplementedError [docs] async def asimilarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-5
self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial(self.similarity_search_by_vector, embedding, k, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ raise NotImplementedError [docs] async def amax_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-6
lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial( self.max_marginal_relevance_search, query, k, fetch_k, lambda_mult, **kwargs ) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ raise NotImplementedError [docs] async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-7
k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" raise NotImplementedError [docs] @classmethod def from_documents( cls: Type[VST], documents: List[Document], embedding: Embeddings, **kwargs: Any, ) -> VST: """Return VectorStore initialized from documents and embeddings.""" texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs) [docs] @classmethod async def afrom_documents( cls: Type[VST], documents: List[Document], embedding: Embeddings, **kwargs: Any, ) -> VST: """Return VectorStore initialized from documents and embeddings.""" texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return await cls.afrom_texts(texts, embedding, metadatas=metadatas, **kwargs) [docs] @classmethod @abstractmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: """Return VectorStore initialized from texts and embeddings.""" [docs] @classmethod async def afrom_texts( cls: Type[VST], texts: List[str],
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-8
cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: """Return VectorStore initialized from texts and embeddings.""" raise NotImplementedError [docs] def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever: return VectorStoreRetriever(vectorstore=self, **kwargs) class VectorStoreRetriever(BaseRetriever, BaseModel): vectorstore: VectorStore search_type: str = "similarity" search_kwargs: dict = Field(default_factory=dict) allowed_search_types: ClassVar[Collection[str]] = ( "similarity", "similarity_score_threshold", "mmr", ) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" search_type = values["search_type"] if search_type not in cls.allowed_search_types: raise ValueError( f"search_type of {search_type} not allowed. Valid values are: " f"{cls.allowed_search_types}" ) if search_type == "similarity_score_threshold": score_threshold = values["search_kwargs"].get("score_threshold") if (score_threshold is None) or (not isinstance(score_threshold, float)): raise ValueError( "`score_threshold` is not specified with a float value(0~1) " "in `search_kwargs`." ) return values def get_relevant_documents(self, query: str) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-9
def get_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search(query, **self.search_kwargs) elif self.search_type == "similarity_score_threshold": docs_and_similarities = ( self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs ) ) docs = [doc for doc, _ in docs_and_similarities] elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( query, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def aget_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = await self.vectorstore.asimilarity_search( query, **self.search_kwargs ) elif self.search_type == "similarity_score_threshold": docs_and_similarities = ( await self.vectorstore.asimilarity_search_with_relevance_scores( query, **self.search_kwargs ) ) docs = [doc for doc, _ in docs_and_similarities] elif self.search_type == "mmr": docs = await self.vectorstore.amax_marginal_relevance_search( query, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore."""
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
0a8fd0f65e70-10
"""Add documents to vectorstore.""" return self.vectorstore.add_documents(documents, **kwargs) async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" return await self.vectorstore.aadd_documents(documents, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
f043c01c49f2-0
Source code for langchain.vectorstores.singlestoredb """Wrapper around SingleStore DB.""" from __future__ import annotations import enum import json from typing import ( Any, ClassVar, Collection, Iterable, List, Optional, Tuple, Type, ) from sqlalchemy.pool import QueuePool from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore, VectorStoreRetriever class DistanceStrategy(str, enum.Enum): """Enumerator of the Distance strategies for SingleStoreDB.""" EUCLIDEAN_DISTANCE = "EUCLIDEAN_DISTANCE" DOT_PRODUCT = "DOT_PRODUCT" DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.DOT_PRODUCT ORDERING_DIRECTIVE: dict = { DistanceStrategy.EUCLIDEAN_DISTANCE: "", DistanceStrategy.DOT_PRODUCT: "DESC", } [docs]class SingleStoreDB(VectorStore): """ This class serves as a Pythonic interface to the SingleStore DB database. The prerequisite for using this class is the installation of the ``singlestoredb`` Python package. The SingleStoreDB vectorstore can be created by providing an embedding function and the relevant parameters for the database connection, connection pool, and optionally, the names of the table and the fields to use. """ def _get_connection(self: SingleStoreDB) -> Any: try: import singlestoredb as s2 except ImportError: raise ImportError( "Could not import singlestoredb python package. " "Please install it with `pip install singlestoredb`." ) return s2.connect(**self.connection_kwargs) def __init__( self,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-1
def __init__( self, embedding: Embeddings, *, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, table_name: str = "embeddings", content_field: str = "content", metadata_field: str = "metadata", vector_field: str = "vector", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ): """Initialize with necessary components. Args: embedding (Embeddings): A text embedding model. distance_strategy (DistanceStrategy, optional): Determines the strategy employed for calculating the distance between vectors in the embedding space. Defaults to DOT_PRODUCT. Available options are: - DOT_PRODUCT: Computes the scalar product of two vectors. This is the default behavior - EUCLIDEAN_DISTANCE: Computes the Euclidean distance between two vectors. This metric considers the geometric distance in the vector space, and might be more suitable for embeddings that rely on spatial relationships. table_name (str, optional): Specifies the name of the table in use. Defaults to "embeddings". content_field (str, optional): Specifies the field to store the content. Defaults to "content". metadata_field (str, optional): Specifies the field to store metadata. Defaults to "metadata". vector_field (str, optional): Specifies the field to store the vector. Defaults to "vector". Following arguments pertain to the connection pool: pool_size (int, optional): Determines the number of active connections in the pool. Defaults to 5. max_overflow (int, optional): Determines the maximum number of connections
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-2
max_overflow (int, optional): Determines the maximum number of connections allowed beyond the pool_size. Defaults to 10. timeout (float, optional): Specifies the maximum wait time in seconds for establishing a connection. Defaults to 30. Following arguments pertain to the database connection: host (str, optional): Specifies the hostname, IP address, or URL for the database connection. The default scheme is "mysql". user (str, optional): Database username. password (str, optional): Database password. port (int, optional): Database port. Defaults to 3306 for non-HTTP connections, 80 for HTTP connections, and 443 for HTTPS connections. database (str, optional): Database name. Additional optional arguments provide further customization over the database connection: pure_python (bool, optional): Toggles the connector mode. If True, operates in pure Python mode. local_infile (bool, optional): Allows local file uploads. charset (str, optional): Specifies the character set for string values. ssl_key (str, optional): Specifies the path of the file containing the SSL key. ssl_cert (str, optional): Specifies the path of the file containing the SSL certificate. ssl_ca (str, optional): Specifies the path of the file containing the SSL certificate authority. ssl_cipher (str, optional): Sets the SSL cipher list. ssl_disabled (bool, optional): Disables SSL usage. ssl_verify_cert (bool, optional): Verifies the server's certificate. Automatically enabled if ``ssl_ca`` is specified. ssl_verify_identity (bool, optional): Verifies the server's identity. conv (dict[int, Callable], optional): A dictionary of data conversion
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-3
conv (dict[int, Callable], optional): A dictionary of data conversion functions. credential_type (str, optional): Specifies the type of authentication to use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO. autocommit (bool, optional): Enables autocommits. results_type (str, optional): Determines the structure of the query results: tuples, namedtuples, dicts. results_format (str, optional): Deprecated. This option has been renamed to results_type. Examples: Basic Usage: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB vectorstore = SingleStoreDB( OpenAIEmbeddings(), host="https://user:password@127.0.0.1:3306/database" ) Advanced Usage: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB vectorstore = SingleStoreDB( OpenAIEmbeddings(), distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, host="127.0.0.1", port=3306, user="user", password="password", database="db", table_name="my_custom_table", pool_size=10, timeout=60, ) Using environment variables: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import SingleStoreDB os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db' vectorstore = SingleStoreDB(OpenAIEmbeddings())
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-4
vectorstore = SingleStoreDB(OpenAIEmbeddings()) """ self.embedding = embedding self.distance_strategy = distance_strategy self.table_name = table_name self.content_field = content_field self.metadata_field = metadata_field self.vector_field = vector_field """Pass the rest of the kwargs to the connection.""" self.connection_kwargs = kwargs """Add program name and version to connection attributes.""" if "conn_attrs" not in self.connection_kwargs: self.connection_kwargs["conn_attrs"] = dict() if "program_name" not in self.connection_kwargs["conn_attrs"]: self.connection_kwargs["conn_attrs"][ "program_name" ] = "langchain python sdk" self.connection_kwargs["conn_attrs"][ "program_version" ] = "0.0.205" # the version of SingleStoreDB VectorStore implementation """Create connection pool.""" self.connection_pool = QueuePool( self._get_connection, max_overflow=max_overflow, pool_size=pool_size, timeout=timeout, ) self._create_table() def _create_table(self: SingleStoreDB) -> None: """Create table if it doesn't exist.""" conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """CREATE TABLE IF NOT EXISTS {} ({} TEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, {} BLOB, {} JSON);""".format( self.table_name, self.content_field, self.vector_field, self.metadata_field, ), ) finally: cur.close() finally: conn.close()
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-5
finally: cur.close() finally: conn.close() [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. Returns: List[str]: empty list """ conn = self.connection_pool.connect() try: cur = conn.cursor() try: # Write data to singlestore db for i, text in enumerate(texts): # Use provided values by default or fallback metadata = metadatas[i] if metadatas else {} embedding = ( embeddings[i] if embeddings else self.embedding.embed_documents([text])[0] ) cur.execute( "INSERT INTO {} VALUES (%s, JSON_ARRAY_PACK(%s), %s)".format( self.table_name ), ( text, "[{}]".format(",".join(map(str, embedding))), json.dumps(metadata), ), ) finally: cur.close() finally: conn.close() return [] [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-6
) -> List[Document]: """Returns the most similar indexed documents to the query text. Uses cosine similarity. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (dict): A dictionary of metadata fields and values to filter by. Returns: List[Document]: A list of documents that are most similar to the query text. Examples: .. code-block:: python from langchain.vectorstores import SingleStoreDB from langchain.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_documents( docs, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) s2.similarity_search("query text", 1, {"metadata_field": "metadata_value"}) """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Uses cosine similarity. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query and score for each """ # Creates embedding vector from user query embedding = self.embedding.embed_query(query)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-7
# Creates embedding vector from user query embedding = self.embedding.embed_query(query) conn = self.connection_pool.connect() result = [] where_clause: str = "" where_clause_values: List[Any] = [] if filter: where_clause = "WHERE " arguments = [] def build_where_clause( where_clause_values: List[Any], sub_filter: dict, prefix_args: List[str] = [], ) -> None: for key in sub_filter.keys(): if isinstance(sub_filter[key], dict): build_where_clause( where_clause_values, sub_filter[key], prefix_args + [key] ) else: arguments.append( "JSON_EXTRACT_JSON({}, {}) = %s".format( self.metadata_field, ", ".join(["%s"] * (len(prefix_args) + 1)), ) ) where_clause_values += prefix_args + [key] where_clause_values.append(json.dumps(sub_filter[key])) build_where_clause(where_clause_values, filter) where_clause += " AND ".join(arguments) try: cur = conn.cursor() try: cur.execute( """SELECT {}, {}, {}({}, JSON_ARRAY_PACK(%s)) as __score FROM {} {} ORDER BY __score {} LIMIT %s""".format( self.content_field, self.metadata_field, self.distance_strategy, self.vector_field, self.table_name, where_clause, ORDERING_DIRECTIVE[self.distance_strategy], ), ("[{}]".format(",".join(map(str, embedding))),) + tuple(where_clause_values) + (k,), ) for row in cur.fetchall():
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-8
+ (k,), ) for row in cur.fetchall(): doc = Document(page_content=row[0], metadata=row[1]) result.append((doc, float(row[2]))) finally: cur.close() finally: conn.close() return result [docs] @classmethod def from_texts( cls: Type[SingleStoreDB], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, table_name: str = "embeddings", content_field: str = "content", metadata_field: str = "metadata", vector_field: str = "vector", pool_size: int = 5, max_overflow: int = 10, timeout: float = 30, **kwargs: Any, ) -> SingleStoreDB: """Create a SingleStoreDB vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new table for the embeddings in SingleStoreDB. 3. Adds the documents to the newly created table. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores import SingleStoreDB from langchain.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_texts( texts, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) """ instance = cls( embedding, distance_strategy=distance_strategy, table_name=table_name,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
f043c01c49f2-9
embedding, distance_strategy=distance_strategy, table_name=table_name, content_field=content_field, metadata_field=metadata_field, vector_field=vector_field, pool_size=pool_size, max_overflow=max_overflow, timeout=timeout, **kwargs, ) instance.add_texts(texts, metadatas, embedding.embed_documents(texts), **kwargs) return instance [docs] def as_retriever(self, **kwargs: Any) -> SingleStoreDBRetriever: return SingleStoreDBRetriever(vectorstore=self, **kwargs) class SingleStoreDBRetriever(VectorStoreRetriever): """Retriever for SingleStoreDB vector stores.""" vectorstore: SingleStoreDB k: int = 4 allowed_search_types: ClassVar[Collection[str]] = ("similarity",) def get_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search(query, k=self.k) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError( "SingleStoreDBVectorStoreRetriever does not support async" )
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/singlestoredb.html
c74ef7671545-0
Source code for langchain.vectorstores.vectara """Wrapper around Vectara vector database.""" from __future__ import annotations import json import logging import os from hashlib import md5 from typing import Any, Iterable, List, Optional, Tuple, Type import requests from pydantic import Field from langchain.embeddings.base import Embeddings from langchain.schema import Document from langchain.vectorstores.base import VectorStore, VectorStoreRetriever [docs]class Vectara(VectorStore): """Implementation of Vector Store using Vectara (https://vectara.com). Example: .. code-block:: python from langchain.vectorstores import Vectara vectorstore = Vectara( vectara_customer_id=vectara_customer_id, vectara_corpus_id=vectara_corpus_id, vectara_api_key=vectara_api_key ) """ def __init__( self, vectara_customer_id: Optional[str] = None, vectara_corpus_id: Optional[str] = None, vectara_api_key: Optional[str] = None, ): """Initialize with Vectara API.""" self._vectara_customer_id = vectara_customer_id or os.environ.get( "VECTARA_CUSTOMER_ID" ) self._vectara_corpus_id = vectara_corpus_id or os.environ.get( "VECTARA_CORPUS_ID" ) self._vectara_api_key = vectara_api_key or os.environ.get("VECTARA_API_KEY") if ( self._vectara_customer_id is None or self._vectara_corpus_id is None or self._vectara_api_key is None ): logging.warning(
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html
c74ef7671545-1
or self._vectara_api_key is None ): logging.warning( "Cant find Vectara credentials, customer_id or corpus_id in " "environment." ) else: logging.debug(f"Using corpus id {self._vectara_corpus_id}") self._session = requests.Session() # to reuse connections adapter = requests.adapters.HTTPAdapter(max_retries=3) self._session.mount("http://", adapter) def _get_post_headers(self) -> dict: """Returns headers that should be attached to each post request.""" return { "x-api-key": self._vectara_api_key, "customer-id": self._vectara_customer_id, "Content-Type": "application/json", } def _delete_doc(self, doc_id: str) -> bool: """ Delete a document from the Vectara corpus. Args: url (str): URL of the page to delete. doc_id (str): ID of the document to delete. Returns: bool: True if deletion was successful, False otherwise. """ body = { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "document_id": doc_id, } response = self._session.post( "https://api.vectara.io/v1/delete-doc", data=json.dumps(body), verify=True, headers=self._get_post_headers(), ) if response.status_code != 200: logging.error( f"Delete request failed for doc_id = {doc_id} with status code " f"{response.status_code}, reason {response.reason}, text "
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html
c74ef7671545-2
f"{response.status_code}, reason {response.reason}, text " f"{response.text}" ) return False return True def _index_doc(self, doc: dict) -> bool: request: dict[str, Any] = {} request["customer_id"] = self._vectara_customer_id request["corpus_id"] = self._vectara_corpus_id request["document"] = doc response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/core/index", data=json.dumps(request), timeout=30, verify=True, ) status_code = response.status_code result = response.json() status_str = result["status"]["code"] if "status" in result else None if status_code == 409 or (status_str and status_str == "ALREADY_EXISTS"): return False else: return True [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ doc_hash = md5() for t in texts: doc_hash.update(t.encode()) doc_id = doc_hash.hexdigest() if metadatas is None: metadatas = [{} for _ in texts] doc = {
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html
c74ef7671545-3
metadatas = [{} for _ in texts] doc = { "document_id": doc_id, "metadataJson": json.dumps({"source": "langchain"}), "parts": [ {"text": text, "metadataJson": json.dumps(md)} for text, md in zip(texts, metadatas) ], } succeeded = self._index_doc(doc) if not succeeded: self._delete_doc(doc_id) self._index_doc(doc) return [doc_id] [docs] def similarity_search_with_score( self, query: str, k: int = 5, lambda_val: float = 0.025, filter: Optional[str] = None, n_sentence_context: int = 0, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add Returns: List of Documents most similar to the query and score for each. """ data = json.dumps( { "query": [ { "query": query,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html
c74ef7671545-4
{ "query": [ { "query": query, "start": 0, "num_results": k, "context_config": { "sentences_before": n_sentence_context, "sentences_after": n_sentence_context, }, "corpus_key": [ { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "metadataFilter": filter, "lexical_interpolation_config": {"lambda": lambda_val}, } ], } ] } ) response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/query", data=data, timeout=10, ) if response.status_code != 200: logging.error( "Query failed %s", f"(code {response.status_code}, reason {response.reason}, details " f"{response.text})", ) return [] result = response.json() responses = result["responseSet"][0]["response"] vectara_default_metadata = ["lang", "len", "offset"] docs = [ ( Document( page_content=x["text"], metadata={ m["name"]: m["value"] for m in x["metadata"] if m["name"] not in vectara_default_metadata }, ), x["score"], ) for x in responses ] return docs [docs] def similarity_search( self, query: str, k: int = 5,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html
c74ef7671545-5
self, query: str, k: int = 5, lambda_val: float = 0.025, filter: Optional[str] = None, n_sentence_context: int = 0, **kwargs: Any, ) -> List[Document]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query, k=k, lambda_val=lambda_val, filter=filter, n_sentence_context=n_sentence_context, **kwargs, ) return [doc for doc, _ in docs_and_scores] [docs] @classmethod def from_texts( cls: Type[Vectara], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Vectara
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html
c74ef7671545-6
Example: .. code-block:: python from langchain import Vectara vectara = Vectara.from_texts( texts, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Note: Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) vectara = cls(**kwargs) vectara.add_texts(texts, metadatas) return vectara [docs] def as_retriever(self, **kwargs: Any) -> VectaraRetriever: return VectaraRetriever(vectorstore=self, **kwargs) class VectaraRetriever(VectorStoreRetriever): vectorstore: Vectara search_kwargs: dict = Field( default_factory=lambda: { "lambda_val": 0.025, "k": 5, "filter": "", "n_sentence_context": "0", } ) """Search params. k: Number of Documents to return. Defaults to 5. lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. n_sentence_context: number of sentences before/after the matching segment to add """ def add_texts( self, texts: List[str], metadatas: Optional[List[dict]] = None ) -> None: """Add text to the Vectara vectorstore.
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html
c74ef7671545-7
) -> None: """Add text to the Vectara vectorstore. Args: texts (List[str]): The text metadatas (List[dict]): Metadata dicts, must line up with existing store """ self.vectorstore.add_texts(texts, metadatas)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/vectara.html
2ba5e24e6c42-0
Source code for langchain.vectorstores.elastic_vector_search """Wrapper around Elasticsearch vector database.""" from __future__ import annotations import uuid from abc import ABC from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, ) from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from elasticsearch import Elasticsearch def _default_text_mapping(dim: int) -> Dict: return { "properties": { "text": {"type": "text"}, "vector": {"type": "dense_vector", "dims": dim}, } } def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict: if filter: ((key, value),) = filter.items() filter = {"match": {f"metadata.{key}.keyword": f"{value}"}} else: filter = {"match_all": {}} return { "script_score": { "query": filter, "script": { "source": "cosineSimilarity(params.query_vector, 'vector') + 1.0", "params": {"query_vector": query_vector}, }, } } # ElasticVectorSearch is a concrete implementation of the abstract base class # VectorStore, which defines a common interface for all vector database # implementations. By inheriting from the ABC class, ElasticVectorSearch can be # defined as an abstract base class itself, allowing the creation of subclasses with
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-1
# defined as an abstract base class itself, allowing the creation of subclasses with # their own specific implementations. If you plan to subclass ElasticVectorSearch, # you can inherit from it and define your own implementation of the necessary methods # and attributes. [docs]class ElasticVectorSearch(VectorStore, ABC): """Wrapper around Elasticsearch as a vector database. To connect to an Elasticsearch instance that does not require login credentials, pass the Elasticsearch URL and index name along with the embedding object to the constructor. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="test_index", embedding=embedding ) To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the "Deployments" page. To obtain your Elastic Cloud password for the default "elastic" user: 1. Log in to the Elastic Cloud console at https://cloud.elastic.co 2. Go to "Security" > "Users" 3. Locate the "elastic" user and click "Edit" 4. Click "Reset password"
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-2
4. Click "Reset password" 5. Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_host = "cluster_id.region_id.gcp.cloud.es.io" elasticsearch_url = f"https://username:password@{elastic_host}:9243" elastic_vector_search = ElasticVectorSearch( elasticsearch_url=elasticsearch_url, index_name="test_index", embedding=embedding ) Args: elasticsearch_url (str): The URL for the Elasticsearch instance. index_name (str): The name of the Elasticsearch index for the embeddings. embedding (Embeddings): An object that provides the ability to embed text. It should be an instance of a class that subclasses the Embeddings abstract base class, such as OpenAIEmbeddings() Raises: ValueError: If the elasticsearch python package is not installed. """ def __init__( self, elasticsearch_url: str, index_name: str, embedding: Embeddings, *, ssl_verify: Optional[Dict[str, Any]] = None, ): """Initialize with necessary components.""" try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding self.index_name = index_name _ssl_verify = ssl_verify or {}
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-3
self.index_name = index_name _ssl_verify = ssl_verify or {} try: self.client = elasticsearch.Elasticsearch(elasticsearch_url, **_ssl_verify) except ValueError as e: raise ValueError( f"Your elasticsearch client string is mis-formatted. Got error: {e} " ) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, refresh_indices: bool = True, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.exceptions import NotFoundError from elasticsearch.helpers import bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) requests = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self.embedding.embed_documents(list(texts)) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) # check to see if the index already exists try: self.client.indices.get(index=self.index_name) except NotFoundError: # TODO would be nice to create index before embedding, # just to save expensive steps for last
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-4
# just to save expensive steps for last self.create_index(self.client, self.index_name, mapping) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request = { "_op_type": "index", "_index": self.index_name, "vector": embeddings[i], "text": text, "metadata": metadata, "_id": ids[i], } requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) documents = [d[0] for d in docs_and_scores] return documents [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-5
Returns: List of Documents most similar to the query. """ embedding = self.embedding.embed_query(query) script_query = _default_script_query(embedding, filter) response = self.client_search( self.client, self.index_name, script_query, size=k ) hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"]["text"], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, elasticsearch_url: Optional[str] = None, index_name: Optional[str] = None, refresh_indices: bool = True, **kwargs: Any, ) -> ElasticVectorSearch: """Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) """
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-6
elasticsearch_url="http://localhost:9200" ) """ elasticsearch_url = elasticsearch_url or get_from_env( "elasticsearch_url", "ELASTICSEARCH_URL" ) index_name = index_name or uuid.uuid4().hex vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs) vectorsearch.add_texts( texts, metadatas=metadatas, refresh_indices=refresh_indices ) return vectorsearch [docs] def create_index(self, client: Any, index_name: str, mapping: Dict) -> None: version_num = client.info()["version"]["number"][0] version_num = int(version_num) if version_num >= 8: client.indices.create(index=index_name, mappings=mapping) else: client.indices.create(index=index_name, body={"mappings": mapping}) [docs] def client_search( self, client: Any, index_name: str, script_query: Dict, size: int ) -> Any: version_num = client.info()["version"]["number"][0] version_num = int(version_num) if version_num >= 8: response = client.search(index=index_name, query=script_query, size=size) else: response = client.search( index=index_name, body={"query": script_query, "size": size} ) return response [docs] def delete(self, ids: List[str]) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ # TODO: Check if this can be done in bulk for id in ids:
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-7
# TODO: Check if this can be done in bulk for id in ids: self.client.delete(index=self.index_name, id=id) class ElasticKnnSearch(ElasticVectorSearch): """ A class for performing k-Nearest Neighbors (k-NN) search on an Elasticsearch index. The class is designed for a text search scenario where documents are text strings and their embeddings are vector representations of those strings. """ def __init__( self, index_name: str, embedding: Embeddings, es_connection: Optional["Elasticsearch"] = None, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, vector_query_field: Optional[str] = "vector", query_field: Optional[str] = "text", ): """ Initializes an instance of the ElasticKnnSearch class and sets up the Elasticsearch client. Args: index_name: The name of the Elasticsearch index. embedding: An instance of the Embeddings class, used to generate vector representations of text strings. es_connection: An existing Elasticsearch connection. es_cloud_id: The Cloud ID of the Elasticsearch instance. Required if creating a new connection. es_user: The username for the Elasticsearch instance. Required if creating a new connection. es_password: The password for the Elasticsearch instance. Required if creating a new connection. """ try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-8
) self.embedding = embedding self.index_name = index_name self.query_field = query_field self.vector_query_field = vector_query_field # If a pre-existing Elasticsearch connection is provided, use it. if es_connection is not None: self.client = es_connection else: # If credentials for a new Elasticsearch connection are provided, # create a new connection. if es_cloud_id and es_user and es_password: self.client = elasticsearch.Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) else: raise ValueError( """Either provide a pre-existing Elasticsearch connection, \ or valid credentials for creating a new connection.""" ) @staticmethod def _default_knn_mapping(dims: int) -> Dict: """Generates a default index mapping for kNN search.""" return { "properties": { "text": {"type": "text"}, "vector": { "type": "dense_vector", "dims": dims, "index": True, "similarity": "dot_product", }, } } def _default_knn_query( self, query_vector: Optional[List[float]] = None, query: Optional[str] = None, model_id: Optional[str] = None, k: Optional[int] = 10, num_candidates: Optional[int] = 10, ) -> Dict: knn: Dict = { "field": self.vector_query_field, "k": k, "num_candidates": num_candidates, }
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-9
"k": k, "num_candidates": num_candidates, } # Case 1: `query_vector` is provided, but not `model_id` -> use query_vector if query_vector and not model_id: knn["query_vector"] = query_vector # Case 2: `query` and `model_id` are provided, -> use query_vector_builder elif query and model_id: knn["query_vector_builder"] = { "text_embedding": { "model_id": model_id, # use 'model_id' argument "model_text": query, # use 'query' argument } } else: raise ValueError( "Either `query_vector` or `model_id` must be provided, but not both." ) return knn def knn_search( self, query: Optional[str] = None, k: Optional[int] = 10, query_vector: Optional[List[float]] = None, model_id: Optional[str] = None, size: Optional[int] = 10, source: Optional[bool] = True, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None] ] = None, ) -> Dict: """ Performs a k-nearest neighbor (k-NN) search on the Elasticsearch index. The search can be conducted using either a raw query vector or a model ID. The method first generates the body of the search query, which can be interpreted by Elasticsearch. It then performs the k-NN search on the Elasticsearch index and returns the results. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-10
search on the Elasticsearch index and returns the results. Args: query: The query or queries to be used for the search. Required if `query_vector` is not provided. k: The number of nearest neighbors to return. Defaults to 10. query_vector: The query vector to be used for the search. Required if `query` is not provided. model_id: The ID of the model to use for generating the query vector, if `query` is provided. size: The number of search hits to return. Defaults to 10. source: Whether to include the source of each hit in the results. fields: The fields to include in the source of each hit. If None, all fields are included. vector_query_field: Field name to use in knn search if not default 'vector' Returns: The search results. Raises: ValueError: If neither `query_vector` nor `model_id` is provided, or if both are provided. """ knn_query_body = self._default_knn_query( query_vector=query_vector, query=query, model_id=model_id, k=k ) # Perform the kNN search on the Elasticsearch index and return the results. res = self.client.search( index=self.index_name, knn=knn_query_body, size=size, source=source, fields=fields, ) return dict(res) def knn_hybrid_search( self, query: Optional[str] = None, k: Optional[int] = 10, query_vector: Optional[List[float]] = None, model_id: Optional[str] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-11
model_id: Optional[str] = None, size: Optional[int] = 10, source: Optional[bool] = True, knn_boost: Optional[float] = 0.9, query_boost: Optional[float] = 0.1, fields: Optional[ Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None] ] = None, ) -> Dict[Any, Any]: """Performs a hybrid k-nearest neighbor (k-NN) and text-based search on the Elasticsearch index. The search can be conducted using either a raw query vector or a model ID. The method first generates the body of the k-NN search query and the text-based query, which can be interpreted by Elasticsearch. It then performs the hybrid search on the Elasticsearch index and returns the results. Args: query: The query or queries to be used for the search. Required if `query_vector` is not provided. k: The number of nearest neighbors to return. Defaults to 10. query_vector: The query vector to be used for the search. Required if `query` is not provided. model_id: The ID of the model to use for generating the query vector, if `query` is provided. size: The number of search hits to return. Defaults to 10. source: Whether to include the source of each hit in the results. knn_boost: The boost factor for the k-NN part of the search. query_boost: The boost factor for the text-based part of the search. fields The fields to include in the source of each hit. If None, all fields are included. Defaults to None.
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
2ba5e24e6c42-12
included. Defaults to None. vector_query_field: Field name to use in knn search if not default 'vector' query_field: Field name to use in search if not default 'text' Returns: The search results. Raises: ValueError: If neither `query_vector` nor `model_id` is provided, or if both are provided. """ knn_query_body = self._default_knn_query( query_vector=query_vector, query=query, model_id=model_id, k=k ) # Modify the knn_query_body to add a "boost" parameter knn_query_body["boost"] = knn_boost # Generate the body of the standard Elasticsearch query match_query_body = { "match": {self.query_field: {"query": query, "boost": query_boost}} } # Perform the hybrid search on the Elasticsearch index and return the results. res = self.client.search( index=self.index_name, query=match_query_body, knn=knn_query_body, fields=fields, size=size, source=source, ) return dict(res)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
467dacdbe65a-0
Source code for langchain.vectorstores.zilliz from __future__ import annotations import logging from typing import Any, List, Optional from langchain.embeddings.base import Embeddings from langchain.vectorstores.milvus import Milvus logger = logging.getLogger(__name__) [docs]class Zilliz(Milvus): def _create_index(self) -> None: """Create a index on the collection""" from pymilvus import Collection, MilvusException if isinstance(self.col, Collection) and self._get_index() is None: try: # If no index params, use a default AutoIndex based one if self.index_params is None: self.index_params = { "metric_type": "L2", "index_type": "AUTOINDEX", "params": {}, } try: self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) # If default did not work, most likely Milvus self-hosted except MilvusException: # Use HNSW based index self.index_params = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) logger.debug( "Successfully created an index on collection: %s", self.collection_name, ) except MilvusException as e: logger.error( "Failed to create an index on collection: %s", self.collection_name
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/zilliz.html
467dacdbe65a-1
"Failed to create an index on collection: %s", self.collection_name ) raise e [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "LangChainCollection", connection_args: dict[str, Any] = {}, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any, ) -> Zilliz: """Create a Zilliz collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Zilliz: Zilliz Vector Store """ vector_db = cls(
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/zilliz.html
467dacdbe65a-2
""" vector_db = cls( embedding_function=embedding, collection_name=collection_name, connection_args=connection_args, consistency_level=consistency_level, index_params=index_params, search_params=search_params, drop_old=drop_old, **kwargs, ) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/zilliz.html
eedabc04c945-0
Source code for langchain.vectorstores.chroma """Wrapper around ChromaDB embeddings platform.""" from __future__ import annotations import logging import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import xor_args from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import chromadb import chromadb.config from chromadb.api.types import ID, OneOrMany, Where, WhereDocument logger = logging.getLogger() DEFAULT_K = 4 # Number of Documents to return. def _results_to_docs(results: Any) -> List[Document]: return [doc for doc, _ in _results_to_docs_and_scores(results)] def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]: return [ # TODO: Chroma can do batch querying, # we shouldn't hard code to the 1st result (Document(page_content=result[0], metadata=result[1] or {}), result[2]) for result in zip( results["documents"][0], results["metadatas"][0], results["distances"][0], ) ] [docs]class Chroma(VectorStore): """Wrapper around ChromaDB embeddings platform. To use, you should have the ``chromadb`` python package installed. Example: .. code-block:: python from langchain.vectorstores import Chroma from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings()
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-1
embeddings = OpenAIEmbeddings() vectorstore = Chroma("langchain_store", embeddings) """ _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" def __init__( self, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, embedding_function: Optional[Embeddings] = None, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, collection_metadata: Optional[Dict] = None, client: Optional[chromadb.Client] = None, ) -> None: """Initialize with Chroma client.""" try: import chromadb import chromadb.config except ImportError: raise ValueError( "Could not import chromadb python package. " "Please install it with `pip install chromadb`." ) if client is not None: self._client = client else: if client_settings: self._client_settings = client_settings else: self._client_settings = chromadb.config.Settings() if persist_directory is not None: self._client_settings = chromadb.config.Settings( chroma_db_impl="duckdb+parquet", persist_directory=persist_directory, ) self._client = chromadb.Client(self._client_settings) self._embedding_function = embedding_function self._persist_directory = persist_directory self._collection = self._client.get_or_create_collection( name=collection_name, embedding_function=self._embedding_function.embed_documents if self._embedding_function is not None else None, metadata=collection_metadata, ) @xor_args(("query_texts", "query_embeddings")) def __query_collection(
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-2
@xor_args(("query_texts", "query_embeddings")) def __query_collection( self, query_texts: Optional[List[str]] = None, query_embeddings: Optional[List[List[float]]] = None, n_results: int = 4, where: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Query the chroma collection.""" try: import chromadb # noqa: F401 except ImportError: raise ValueError( "Could not import chromadb python package. " "Please install it with `pip install chromadb`." ) return self._collection.query( query_texts=query_texts, query_embeddings=query_embeddings, n_results=n_results, where=where, **kwargs, ) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts. """ # TODO: Handle the case where the user doesn't provide ids on the Collection if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = None
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-3
ids = [str(uuid.uuid1()) for _ in texts] embeddings = None if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(list(texts)) self._collection.upsert( metadatas=metadatas, embeddings=embeddings, documents=texts, ids=ids ) return ids [docs] def similarity_search( self, query: str, k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Chroma. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of documents most similar to the query text. """ docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-4
Returns: List of Documents most similar to the query vector. """ results = self.__query_collection( query_embeddings=embedding, n_results=k, where=filter ) return _results_to_docs(results) [docs] def similarity_search_with_score( self, query: str, k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding_function is None: results = self.__query_collection( query_texts=[query], n_results=k, where=filter ) else: query_embedding = self._embedding_function.embed_query(query) results = self.__query_collection( query_embeddings=[query_embedding], n_results=k, where=filter ) return _results_to_docs_and_scores(results) def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: return self.similarity_search_with_score(query, k, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-5
return self.similarity_search_with_score(query, k, **kwargs) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ results = self.__query_collection( query_embeddings=embedding, n_results=fetch_k, where=filter, include=["metadatas", "documents", "distances", "embeddings"], ) mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), results["embeddings"][0], k=k, lambda_mult=lambda_mult, ) candidates = _results_to_docs(results)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-6
lambda_mult=lambda_mult, ) candidates = _results_to_docs(results) selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected] return selected_results [docs] def max_marginal_relevance_search( self, query: str, k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on" "creation." ) embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mul=lambda_mult, filter=filter ) return docs [docs] def delete_collection(self) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-7
) return docs [docs] def delete_collection(self) -> None: """Delete the collection.""" self._client.delete_collection(self._collection.name) [docs] def get( self, ids: Optional[OneOrMany[ID]] = None, where: Optional[Where] = None, limit: Optional[int] = None, offset: Optional[int] = None, where_document: Optional[WhereDocument] = None, include: Optional[List[str]] = None, ) -> Dict[str, Any]: """Gets the collection. Args: ids: The ids of the embeddings to get. Optional. where: A Where type dict used to filter results by. E.g. `{"color" : "red", "price": 4.20}`. Optional. limit: The number of documents to return. Optional. offset: The offset to start returning results from. Useful for paging results with limit. Optional. where_document: A WhereDocument type dict used to filter by the documents. E.g. `{$contains: {"text": "hello"}}`. Optional. include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional. """ kwargs = { "ids": ids, "where": where, "limit": limit, "offset": offset, "where_document": where_document, } if include is not None: kwargs["include"] = include return self._collection.get(**kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-8
kwargs["include"] = include return self._collection.get(**kwargs) [docs] def persist(self) -> None: """Persist the collection. This can be used to explicitly persist the data to disk. It will also be called automatically when the object is destroyed. """ if self._persist_directory is None: raise ValueError( "You must specify a persist_directory on" "creation to persist the collection." ) self._client.persist() [docs] def update_document(self, document_id: str, document: Document) -> None: """Update a document in the collection. Args: document_id (str): ID of the document to update. document (Document): Document to update. """ text = document.page_content metadata = document.metadata if self._embedding_function is None: raise ValueError( "For update, you must specify an embedding function on creation." ) embeddings = self._embedding_function.embed_documents([text]) self._collection.update( ids=[document_id], embeddings=embeddings, documents=[text], metadatas=[metadata], ) [docs] @classmethod def from_texts( cls: Type[Chroma], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, **kwargs: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
eedabc04c945-9
client: Optional[chromadb.Client] = None, **kwargs: Any, ) -> Chroma: """Create a Chroma vectorstore from a raw documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Args: texts (List[str]): List of texts to add to the collection. collection_name (str): Name of the collection to create. persist_directory (Optional[str]): Directory to persist the collection. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. client_settings (Optional[chromadb.config.Settings]): Chroma client settings Returns: Chroma: Chroma vectorstore. """ chroma_collection = cls( collection_name=collection_name, embedding_function=embedding, persist_directory=persist_directory, client_settings=client_settings, client=client, ) chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids) return chroma_collection [docs] @classmethod def from_documents( cls: Type[Chroma], documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, # Add this line
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html