id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
12f9bd8b091f-0
Source code for langchain.document_loaders.airbyte_json """Loader that loads local airbyte json files.""" import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class AirbyteJSONLoader(BaseLoader): """Loader that loads local airbyte json files.""" def __init__(self, file_path: str): """Initialize with file path. This should start with '/tmp/airbyte_local/'.""" self.file_path = file_path [docs] def load(self) -> List[Document]: """Load file.""" text = "" for line in open(self.file_path, "r"): data = json.loads(line)["_airbyte_data"] text += stringify_dict(data) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airbyte_json.html
cb52392989a3-0
Source code for langchain.document_loaders.image """Loader that loads image files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredImageLoader(UnstructuredFileLoader): """Loader that uses unstructured to load image files, such as PNGs and JPGs.""" def _get_elements(self) -> List: from unstructured.partition.image import partition_image return partition_image(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/image.html
486368f79bf6-0
Source code for langchain.document_loaders.readthedocs """Loader that loads ReadTheDocs documentation directory dump.""" from pathlib import Path from typing import Any, List, Optional, Tuple, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ReadTheDocsLoader(BaseLoader): """Loader that loads ReadTheDocs documentation directory dump.""" def __init__( self, path: Union[str, Path], encoding: Optional[str] = None, errors: Optional[str] = None, custom_html_tag: Optional[Tuple[str, dict]] = None, **kwargs: Optional[Any] ): """ Initialize ReadTheDocsLoader The loader loops over all files under `path` and extract the actual content of the files by retrieving main html tags. Default main html tags include `<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You can also define your own html tags by passing custom_html_tag, e.g. `("div", "class=main")`. The loader iterates html tags with the order of custom html tags (if exists) and default html tags. If any of the tags is not empty, the loop will break and retrieve the content out of that tag. Args: path: The location of pulled readthedocs folder. encoding: The encoding with which to open the documents. errors: Specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. custom_html_tag: Optional custom html tag to retrieve the content from files. """ try: from bs4 import BeautifulSoup except ImportError: raise ImportError(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
486368f79bf6-1
from bs4 import BeautifulSoup except ImportError: raise ImportError( "Could not import python packages. " "Please install it with `pip install beautifulsoup4`. " ) try: _ = BeautifulSoup( "<html><body>Parser builder library test.</body></html>", **kwargs ) except Exception as e: raise ValueError("Parsing kwargs do not appear valid") from e self.file_path = Path(path) self.encoding = encoding self.errors = errors self.custom_html_tag = custom_html_tag self.bs_kwargs = kwargs [docs] def load(self) -> List[Document]: """Load documents.""" docs = [] for p in self.file_path.rglob("*"): if p.is_dir(): continue with open(p, encoding=self.encoding, errors=self.errors) as f: text = self._clean_data(f.read()) metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs def _clean_data(self, data: str) -> str: from bs4 import BeautifulSoup soup = BeautifulSoup(data, **self.bs_kwargs) # default tags html_tags = [ ("div", {"role": "main"}), ("main", {"id": "main-content"}), ] if self.custom_html_tag is not None: html_tags.append(self.custom_html_tag) text = None # reversed order. check the custom one first for tag, attrs in html_tags[::-1]: text = soup.find(tag, attrs) # if found, break if text is not None: break if text is not None:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
486368f79bf6-2
if text is not None: break if text is not None: text = text.get_text() else: text = "" # trim empty lines return "\n".join([t for t in text.split("\n") if t])
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
5200ed9fd3e9-0
Source code for langchain.document_loaders.weather """Simple reader that reads weather data from OpenWeatherMap API""" from __future__ import annotations from datetime import datetime from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper [docs]class WeatherDataLoader(BaseLoader): """Weather Reader. Reads the forecast & current weather of any location using OpenWeatherMap's free API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free OpenWeatherMap API. """ def __init__( self, client: OpenWeatherMapAPIWrapper, places: Sequence[str], ) -> None: """Initialize with parameters.""" super().__init__() self.client = client self.places = places [docs] @classmethod def from_params( cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None ) -> WeatherDataLoader: client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) return cls(client, places) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily load weather data for the given locations.""" for place in self.places: metadata = {"queried_at": datetime.now()} content = self.client.run(place) yield Document(page_content=content, metadata=metadata) [docs] def load( self, ) -> List[Document]: """Load weather data for the given locations.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/weather.html
a9ddb222db34-0
Source code for langchain.document_loaders.html """Loader that uses unstructured to load HTML files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredHTMLLoader(UnstructuredFileLoader): """Loader that uses unstructured to load HTML files.""" def _get_elements(self) -> List: from unstructured.partition.html import partition_html return partition_html(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/html.html
6fd690e8b81b-0
Source code for langchain.document_loaders.srt """Loader for .srt (subtitle) files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SRTLoader(BaseLoader): """Loader for .srt (subtitle) files.""" def __init__(self, file_path: str): """Initialize with file path.""" try: import pysrt # noqa:F401 except ImportError: raise ImportError( "package `pysrt` not found, please install it with `pip install pysrt`" ) self.file_path = file_path [docs] def load(self) -> List[Document]: """Load using pysrt file.""" import pysrt parsed_info = pysrt.open(self.file_path) text = " ".join([t.text for t in parsed_info]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/srt.html
65b765c0755c-0
Source code for langchain.document_loaders.onedrive_file from __future__ import annotations import tempfile from typing import TYPE_CHECKING, List from pydantic import BaseModel, Field from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader if TYPE_CHECKING: from O365.drive import File CHUNK_SIZE = 1024 * 1024 * 5 [docs]class OneDriveFileLoader(BaseLoader, BaseModel): file: File = Field(...) class Config: arbitrary_types_allowed = True [docs] def load(self) -> List[Document]: """Load Documents""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.file.name}" self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive_file.html
5d522e33f4ed-0
Source code for langchain.document_loaders.s3_directory """Loading logic for loading documents from an s3 directory.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.s3_file import S3FileLoader [docs]class S3DirectoryLoader(BaseLoader): """Loading logic for loading documents from s3.""" def __init__(self, bucket: str, prefix: str = ""): """Initialize with bucket and key name.""" self.bucket = bucket self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) s3 = boto3.resource("s3") bucket = s3.Bucket(self.bucket) docs = [] for obj in bucket.objects.filter(Prefix=self.prefix): loader = S3FileLoader(self.bucket, obj.key) docs.extend(loader.load()) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_directory.html
eaa2451f71cb-0
Source code for langchain.document_loaders.mhtml """Loader to load MHTML files, enriching metadata with page title.""" import email import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class MHTMLLoader(BaseLoader): """Loader that uses beautiful soup to parse HTML files.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object.""" try: import bs4 # noqa:F401 except ImportError: raise ValueError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator [docs] def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load MHTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: message = email.message_from_string(f.read()) parts = message.get_payload() if type(parts) is not list: parts = [message] for part in parts: if part.get_content_type() == "text/html":
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mhtml.html
eaa2451f71cb-1
for part in parts: if part.get_content_type() == "text/html": html = part.get_payload(decode=True).decode() soup = BeautifulSoup(html, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)] return []
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mhtml.html
09ac36a936cd-0
Source code for langchain.document_loaders.gcs_file """Loading logic for loading documents from a GCS file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class GCSFileLoader(BaseLoader): """Loading logic for loading documents from GCS.""" def __init__(self, project_name: str, bucket: str, blob: str): """Initialize with bucket and key name.""" self.bucket = bucket self.blob = blob self.project_name = project_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ValueError( "Could not import google-cloud-storage python package. " "Please install it with `pip install google-cloud-storage`." ) # Initialise a client storage_client = storage.Client(self.project_name) # Create a bucket object for our bucket bucket = storage_client.get_bucket(self.bucket) # Create a blob object from the filepath blob = bucket.blob(self.blob) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination blob.download_to_filename(file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_file.html
dd79b729aef4-0
Source code for langchain.document_loaders.toml import json from pathlib import Path from typing import Iterator, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class TomlLoader(BaseLoader): """ A TOML document loader that inherits from the BaseLoader class. This class can be initialized with either a single source file or a source directory containing TOML files. """ def __init__(self, source: Union[str, Path]): """Initialize the TomlLoader with a source file or directory.""" self.source = Path(source) [docs] def load(self) -> List[Document]: """Load and return all documents.""" return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Lazily load the TOML documents from the source file or directory.""" import tomli if self.source.is_file() and self.source.suffix == ".toml": files = [self.source] elif self.source.is_dir(): files = list(self.source.glob("**/*.toml")) else: raise ValueError("Invalid source path or file type") for file_path in files: with file_path.open("r", encoding="utf-8") as file: content = file.read() try: data = tomli.loads(content) doc = Document( page_content=json.dumps(data), metadata={"source": str(file_path)}, ) yield doc except tomli.TOMLDecodeError as e: print(f"Error parsing TOML file {file_path}: {e}")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/toml.html
4e1c5c8aab88-0
Source code for langchain.document_loaders.psychic """Loader that loads documents from Psychic.dev.""" from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class PsychicLoader(BaseLoader): """Loader that loads documents from Psychic.dev.""" def __init__( self, api_key: str, account_id: str, connector_id: Optional[str] = None ): """Initialize with API key, connector id, and account id.""" try: from psychicapi import ConnectorId, Psychic # noqa: F401 except ImportError: raise ImportError( "`psychicapi` package not found, please run `pip install psychicapi`" ) self.psychic = Psychic(secret_key=api_key) self.connector_id = ConnectorId(connector_id) self.account_id = account_id [docs] def load(self) -> List[Document]: """Load documents.""" psychic_docs = self.psychic.get_documents( connector_id=self.connector_id, account_id=self.account_id ) return [ Document( page_content=doc["content"], metadata={"title": doc["title"], "source": doc["uri"]}, ) for doc in psychic_docs.documents ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/psychic.html
bd7f58d718e6-0
Source code for langchain.document_loaders.iugu """Loader that fetches data from IUGU""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict IUGU_ENDPOINTS = { "invoices": "https://api.iugu.com/v1/invoices", "customers": "https://api.iugu.com/v1/customers", "charges": "https://api.iugu.com/v1/charges", "subscriptions": "https://api.iugu.com/v1/subscriptions", "plans": "https://api.iugu.com/v1/plans", } [docs]class IuguLoader(BaseLoader): """Loader that fetches data from IUGU.""" def __init__(self, resource: str, api_token: Optional[str] = None) -> None: self.resource = resource api_token = api_token or get_from_env("api_token", "IUGU_API_TOKEN") self.headers = {"Authorization": f"Bearer {api_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = IUGU_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
bd7f58d718e6-1
[docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
ead23bef666e-0
Source code for langchain.document_loaders.blob_loaders.youtube_audio from typing import Iterable, List from langchain.document_loaders.blob_loaders import FileSystemBlobLoader from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader [docs]class YoutubeAudioLoader(BlobLoader): """Load YouTube urls as audio file(s).""" def __init__(self, urls: List[str], save_dir: str): if not isinstance(urls, list): raise TypeError("urls must be a list") self.urls = urls self.save_dir = save_dir [docs] def yield_blobs(self) -> Iterable[Blob]: """Yield audio blobs for each url.""" try: import yt_dlp except ImportError: raise ValueError( "yt_dlp package not found, please install it with " "`pip install yt_dlp`" ) # Use yt_dlp to download audio given a YouTube url ydl_opts = { "format": "m4a/bestaudio/best", "noplaylist": True, "outtmpl": self.save_dir + "/%(title)s.%(ext)s", "postprocessors": [ { "key": "FFmpegExtractAudio", "preferredcodec": "m4a", } ], } for url in self.urls: # Download file with yt_dlp.YoutubeDL(ydl_opts) as ydl: ydl.download(url) # Yield the written blobs loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a") for blob in loader.yield_blobs(): yield blob
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/youtube_audio.html
5e9a2c7c2e0e-0
Source code for langchain.document_loaders.blob_loaders.file_system """Use to load blobs from the local file system.""" from pathlib import Path from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader T = TypeVar("T") def _make_iterator( length_func: Callable[[], int], show_progress: bool = False ) -> Callable[[Iterable[T]], Iterator[T]]: """Create a function that optionally wraps an iterable in tqdm.""" if show_progress: try: from tqdm.auto import tqdm except ImportError: raise ImportError( "You must install tqdm to use show_progress=True." "You can install tqdm with `pip install tqdm`." ) # Make sure to provide `total` here so that tqdm can show # a progress bar that takes into account the total number of files. def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]: """Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func()) iterator = _with_tqdm else: iterator = iter # type: ignore return iterator # PUBLIC API [docs]class FileSystemBlobLoader(BlobLoader): """Blob loader for the local file system. Example: .. code-block:: python from langchain.document_loaders.blob_loaders import FileSystemBlobLoader loader = FileSystemBlobLoader("/path/to/directory") for blob in loader.yield_blobs(): print(blob) """ def __init__( self, path: Union[str, Path], *, glob: str = "**/[!.]*",
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
5e9a2c7c2e0e-1
*, glob: str = "**/[!.]*", suffixes: Optional[Sequence[str]] = None, show_progress: bool = False, ) -> None: """Initialize with path to directory and how to glob over it. Args: path: Path to directory to load from glob: Glob pattern relative to the specified path by default set to pick up all non-hidden files suffixes: Provide to keep only files with these suffixes Useful when wanting to keep files with different suffixes Suffixes must include the dot, e.g. ".txt" show_progress: If true, will show a progress bar as the files are loaded. This forces an iteration through all matching files to count them prior to loading them. Examples: ... code-block:: python # Recursively load all text files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt") # Recursively load all non-hidden files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/[!.]*") # Load all files in a directory without recursion. loader = FileSystemBlobLoader("/path/to/directory", glob="*") """ if isinstance(path, Path): _path = path elif isinstance(path, str): _path = Path(path) else: raise TypeError(f"Expected str or Path, got {type(path)}") self.path = _path self.glob = glob self.suffixes = set(suffixes or []) self.show_progress = show_progress [docs] def yield_blobs( self, ) -> Iterable[Blob]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
5e9a2c7c2e0e-2
self, ) -> Iterable[Blob]: """Yield blobs that match the requested pattern.""" iterator = _make_iterator( length_func=self.count_matching_files, show_progress=self.show_progress ) for path in iterator(self._yield_paths()): yield Blob.from_path(path) def _yield_paths(self) -> Iterable[Path]: """Yield paths that match the requested pattern.""" paths = self.path.glob(self.glob) for path in paths: if path.is_file(): if self.suffixes and path.suffix not in self.suffixes: continue yield path [docs] def count_matching_files(self) -> int: """Count files that match the pattern without loading them.""" # Carry out a full iteration to count the files without # materializing anything expensive in memory. num = 0 for _ in self._yield_paths(): num += 1 return num
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
8ed853cfb069-0
Source code for langchain.document_loaders.blob_loaders.schema """Schema for Blobs and Blob Loaders. The goal is to facilitate decoupling of content loading from content parsing code. In addition, content loading code should provide a lazy loading interface by default. """ from __future__ import annotations import contextlib import mimetypes from abc import ABC, abstractmethod from io import BufferedReader, BytesIO from pathlib import PurePath from typing import Any, Generator, Iterable, Mapping, Optional, Union from pydantic import BaseModel, root_validator PathLike = Union[str, PurePath] [docs]class Blob(BaseModel): """A blob is used to represent raw data by either reference or value. Provides an interface to materialize the blob in different representations, and help to decouple the development of data loaders from the downstream parsing of the raw data. Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob """ data: Union[bytes, str, None] # Raw data mimetype: Optional[str] = None # Not to be confused with a file extension encoding: str = "utf-8" # Use utf-8 as default encoding, if decoding to string # Location where the original content was found # Represent location on the local file system # Useful for situations where downstream code assumes it must work with file paths # rather than in-memory content. path: Optional[PathLike] = None class Config: arbitrary_types_allowed = True frozen = True @property def source(self) -> Optional[str]: """The source location of the blob as string if known otherwise none.""" return str(self.path) if self.path else None @root_validator(pre=True)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
8ed853cfb069-1
return str(self.path) if self.path else None @root_validator(pre=True) def check_blob_is_valid(cls, values: Mapping[str, Any]) -> Mapping[str, Any]: """Verify that either data or path is provided.""" if "data" not in values and "path" not in values: raise ValueError("Either data or path must be provided") return values [docs] def as_string(self) -> str: """Read data as a string.""" if self.data is None and self.path: with open(str(self.path), "r", encoding=self.encoding) as f: return f.read() elif isinstance(self.data, bytes): return self.data.decode(self.encoding) elif isinstance(self.data, str): return self.data else: raise ValueError(f"Unable to get string for blob {self}") [docs] def as_bytes(self) -> bytes: """Read data as bytes.""" if isinstance(self.data, bytes): return self.data elif isinstance(self.data, str): return self.data.encode(self.encoding) elif self.data is None and self.path: with open(str(self.path), "rb") as f: return f.read() else: raise ValueError(f"Unable to get bytes for blob {self}") [docs] @contextlib.contextmanager def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: """Read data as a byte stream.""" if isinstance(self.data, bytes): yield BytesIO(self.data) elif self.data is None and self.path: with open(str(self.path), "rb") as f: yield f else:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
8ed853cfb069-2
yield f else: raise NotImplementedError(f"Unable to convert blob {self}") [docs] @classmethod def from_path( cls, path: PathLike, *, encoding: str = "utf-8", mime_type: Optional[str] = None, guess_type: bool = True, ) -> Blob: """Load the blob from a path like object. Args: path: path like object to file to be read encoding: Encoding to use if decoding the bytes into a string mime_type: if provided, will be set as the mime-type of the data guess_type: If True, the mimetype will be guessed from the file extension, if a mime-type was not provided Returns: Blob instance """ if mime_type is None and guess_type: _mimetype = mimetypes.guess_type(path)[0] if guess_type else None else: _mimetype = mime_type # We do not load the data immediately, instead we treat the blob as a # reference to the underlying data. return cls(data=None, mimetype=_mimetype, encoding=encoding, path=path) [docs] @classmethod def from_data( cls, data: Union[str, bytes], *, encoding: str = "utf-8", mime_type: Optional[str] = None, path: Optional[str] = None, ) -> Blob: """Initialize the blob from in-memory data. Args: data: the in-memory data associated with the blob encoding: Encoding to use if decoding the bytes into a string mime_type: if provided, will be set as the mime-type of the data
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
8ed853cfb069-3
mime_type: if provided, will be set as the mime-type of the data path: if provided, will be set as the source from which the data came Returns: Blob instance """ return cls(data=data, mimetype=mime_type, encoding=encoding, path=path) def __repr__(self) -> str: """Define the blob representation.""" str_repr = f"Blob {id(self)}" if self.source: str_repr += f" {self.source}" return str_repr [docs]class BlobLoader(ABC): """Abstract interface for blob loaders implementation. Implementer should be able to load raw content from a storage system according to some criteria and return the raw content lazily as a stream of blobs. """ [docs] @abstractmethod def yield_blobs( self, ) -> Iterable[Blob]: """A lazy loader for raw data represented by LangChain's Blob object. Returns: A generator over blobs """
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
df7893028286-0
Source code for langchain.embeddings.llamacpp """Wrapper around llama.cpp embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings [docs]class LlamaCppEmbeddings(BaseModel, Embeddings): """Wrapper around llama.cpp embedding models. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.embeddings import LlamaCppEmbeddings llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ client: Any #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock")
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
df7893028286-1
use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, embedding=True, **model_params) except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. "
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
df7893028286-2
raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using the Llama model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using the Llama model. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed(text) return list(map(float, embedding))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/llamacpp.html
9b32e2c98154-0
Source code for langchain.embeddings.aleph_alpha from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): """ Wrapper for Aleph Alpha's Asymmetric Embeddings AA provides you with an endpoint to embed a document and a query. The models were optimized to make the embeddings of documents and the query for a document as similar as possible. To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/ Example: .. code-block:: python from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding embeddings = AlephAlphaSymmetricSemanticEmbedding() document = "This is a content of the document" query = "What is the content of the document?" doc_result = embeddings.embed_documents([document]) query_result = embeddings.embed_query(query) """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" hosting: Optional[str] = "https://api.aleph-alpha.com" """Optional parameter that specifies which datacenters may process the request.""" normalize: Optional[bool] = True """Should returned embeddings be normalized""" compress_to_size: Optional[int] = 128 """Should the returned embeddings come back as an original 5120-dim vector, or should it be compressed to 128-dim.""" contextual_control_threshold: Optional[int] = None """Attention control parameters only apply to those tokens that have
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
9b32e2c98154-1
"""Attention control parameters only apply to those tokens that have explicitly been set in the request.""" control_log_additive: Optional[bool] = True """Apply controls on prompt items by adding the log(control_factor) to attention scores.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) values["client"] = Client(token=aleph_alpha_api_key) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) document_embeddings = [] for text in texts: document_params = { "prompt": Prompt.from_text(text),
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
9b32e2c98154-2
document_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Document, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } document_request = SemanticEmbeddingRequest(**document_params) document_response = self.client.semantic_embed( request=document_request, model=self.model ) document_embeddings.append(document_response.embedding) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) symmetric_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Query, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } symmetric_request = SemanticEmbeddingRequest(**symmetric_params) symmetric_response = self.client.semantic_embed( request=symmetric_request, model=self.model ) return symmetric_response.embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
9b32e2c98154-3
request=symmetric_request, model=self.model ) return symmetric_response.embedding [docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding): """The symmetric version of the Aleph Alpha's semantic embeddings. The main difference is that here, both the documents and queries are embedded with a SemanticRepresentation.Symmetric Example: .. code-block:: python from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding embeddings = AlephAlphaAsymmetricSemanticEmbedding() text = "This is a test text" doc_result = embeddings.embed_documents([text]) query_result = embeddings.embed_query(text) """ def _embed(self, text: str) -> List[float]: try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) query_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Symmetric, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } query_request = SemanticEmbeddingRequest(**query_params) query_response = self.client.semantic_embed( request=query_request, model=self.model ) return query_response.embedding [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's Document endpoint.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
9b32e2c98154-4
"""Call out to Aleph Alpha's Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ document_embeddings = [] for text in texts: document_embeddings.append(self._embed(text)) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed(text)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/aleph_alpha.html
1563c4147e59-0
Source code for langchain.embeddings.elasticsearch from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.utils import get_from_env if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient from langchain.embeddings.base import Embeddings [docs]class ElasticsearchEmbeddings(Embeddings): """ Wrapper around Elasticsearch embedding models. This class provides an interface to generate embeddings using a model deployed in an Elasticsearch cluster. It requires an Elasticsearch connection object and the model_id of the model deployed in the cluster. In Elasticsearch you need to have an embedding model loaded and deployed. - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, client: MlClient, model_id: str, *, input_field: str = "text_field", ): """ Initialize the ElasticsearchEmbeddings instance. Args: client (MlClient): An Elasticsearch ML client object. model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. """ self.client = client self.model_id = model_id self.input_field = input_field [docs] @classmethod def from_credentials( cls, model_id: str, *, es_cloud_id: Optional[str] = None, es_user: Optional[str] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
1563c4147e59-1
es_user: Optional[str] = None, es_password: Optional[str] = None, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """Instantiate embeddings from Elasticsearch credentials. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. input_field (str): The name of the key for the input text field in the document. Defaults to 'text_field'. es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to. es_user: (str, optional): Elasticsearch username. es_password: (str, optional): Elasticsearch password. Example: .. code-block:: python from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Credentials can be passed in two ways. Either set the env vars # ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically # pulled in, or pass them in directly as kwargs. embeddings = ElasticsearchEmbeddings.from_credentials( model_id, input_field=input_field, # es_cloud_id="foo", # es_user="bar", # es_password="baz", ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ try: from elasticsearch import Elasticsearch from elasticsearch.client import MlClient except ImportError: raise ImportError(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
1563c4147e59-2
from elasticsearch.client import MlClient except ImportError: raise ImportError( "elasticsearch package not found, please install with 'pip install " "elasticsearch'" ) es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID") es_user = es_user or get_from_env("es_user", "ES_USER") es_password = es_password or get_from_env("es_password", "ES_PASSWORD") # Connect to Elasticsearch es_connection = Elasticsearch( cloud_id=es_cloud_id, basic_auth=(es_user, es_password) ) client = MlClient(es_connection) return cls(client, model_id, input_field=input_field) [docs] @classmethod def from_es_connection( cls, model_id: str, es_connection: Elasticsearch, input_field: str = "text_field", ) -> ElasticsearchEmbeddings: """ Instantiate embeddings from an existing Elasticsearch connection. This method provides a way to create an instance of the ElasticsearchEmbeddings class using an existing Elasticsearch connection. The connection object is used to create an MlClient, which is then used to initialize the ElasticsearchEmbeddings instance. Args: model_id (str): The model_id of the model deployed in the Elasticsearch cluster. es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch connection object. input_field (str, optional): The name of the key for the input text field in the document. Defaults to 'text_field'. Returns: ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class. Example: .. code-block:: python from elasticsearch import Elasticsearch
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
1563c4147e59-3
Example: .. code-block:: python from elasticsearch import Elasticsearch from langchain.embeddings import ElasticsearchEmbeddings # Define the model ID and input field name (if different from default) model_id = "your_model_id" # Optional, only if different from 'text_field' input_field = "your_input_field" # Create Elasticsearch connection es_connection = Elasticsearch( hosts=["localhost:9200"], http_auth=("user", "password") ) # Instantiate ElasticsearchEmbeddings using the existing connection embeddings = ElasticsearchEmbeddings.from_es_connection( model_id, es_connection, input_field=input_field, ) documents = [ "This is an example document.", "Another example document to generate embeddings for.", ] embeddings_generator.embed_documents(documents) """ # Importing MlClient from elasticsearch.client within the method to # avoid unnecessary import if the method is not used from elasticsearch.client import MlClient # Create an MlClient from the given Elasticsearch connection client = MlClient(es_connection) # Return a new instance of the ElasticsearchEmbeddings class with # the MlClient, model_id, and input_field return cls(client, model_id, input_field=input_field) def _embedding_func(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for the given texts using the Elasticsearch model. Args: texts (List[str]): A list of text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each text in the input list. """ response = self.client.infer_trained_model(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
1563c4147e59-4
list. """ response = self.client.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) embeddings = [doc["predicted_value"] for doc in response["inference_results"]] return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """ Generate embeddings for a list of documents. Args: texts (List[str]): A list of document text strings to generate embeddings for. Returns: List[List[float]]: A list of embeddings, one for each document in the input list. """ return self._embedding_func(texts) [docs] def embed_query(self, text: str) -> List[float]: """ Generate an embedding for a single query text. Args: text (str): The query text to generate an embedding for. Returns: List[float]: The embedding for the input query text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/elasticsearch.html
4670330e2570-0
Source code for langchain.embeddings.cohere """Wrapper around Cohere embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class CohereEmbeddings(BaseModel, Embeddings): """Wrapper around Cohere embedding models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import CohereEmbeddings cohere = CohereEmbeddings( model="embed-english-light-v2.0", cohere_api_key="my-api-key" ) """ client: Any #: :meta private: model: str = "embed-english-v2.0" """Model name to use.""" truncate: Optional[str] = None """Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")""" cohere_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
4670330e2570-1
except ImportError: raise ValueError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed( model=self.model, texts=texts, truncate=self.truncate ).embeddings return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed( model=self.model, texts=[text], truncate=self.truncate ).embeddings[0] return list(map(float, embedding))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/cohere.html
2841e7409197-0
Source code for langchain.embeddings.dashscope """Wrapper around DashScope embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Optional, ) from pydantic import BaseModel, Extra, root_validator from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]: multiplier = 1 min_seconds = 1 max_seconds = 4 # Wait 2^x * 1 second between each retry starting with # 1 seconds, then up to 4 seconds, then 4 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(HTTPError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: resp = embeddings.client.call(**kwargs) if resp.status_code == 200: return resp.output["embeddings"] elif resp.status_code in [400, 401]:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
2841e7409197-1
elif resp.status_code in [400, 401]: raise ValueError( f"status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) else: raise HTTPError( f"HTTP error occurred: status_code: {resp.status_code} \n " f"code: {resp.code} \n message: {resp.message}" ) return _embed_with_retry(**kwargs) [docs]class DashScopeEmbeddings(BaseModel, Embeddings): """Wrapper around DashScope embedding models. To use, you should have the ``dashscope`` python package installed, and the environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import DashScopeEmbeddings embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key") Example: .. code-block:: python import os os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY" from langchain.embeddings.dashscope import DashScopeEmbeddings embeddings = DashScopeEmbeddings( model="text-embedding-v1", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-v1" dashscope_api_key: Optional[str] = None """Maximum number of retries to make when generating.""" max_retries: int = 5 class Config: """Configuration for this pydantic object."""
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
2841e7409197-2
class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: import dashscope """Validate that api key and python package exists in environment.""" values["dashscope_api_key"] = get_from_dict_or_env( values, "dashscope_api_key", "DASHSCOPE_API_KEY" ) dashscope.api_key = values["dashscope_api_key"] try: import dashscope values["client"] = dashscope.TextEmbedding except ImportError: raise ImportError( "Could not import dashscope python package. " "Please install it with `pip install dashscope`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to DashScope's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ embeddings = embed_with_retry( self, input=texts, text_type="document", model=self.model ) embedding_list = [item["embedding"] for item in embeddings] return embedding_list [docs] def embed_query(self, text: str) -> List[float]: """Call out to DashScope's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = embed_with_retry(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
2841e7409197-3
Embedding for the text. """ embedding = embed_with_retry( self, input=text, text_type="query", model=self.model )[0]["embedding"] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/dashscope.html
48ed439d5783-0
Source code for langchain.embeddings.modelscope_hub """Wrapper around ModelScopeHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings [docs]class ModelScopeEmbeddings(BaseModel, Embeddings): """Wrapper around modelscope_hub embedding models. To use, you should have the ``modelscope`` python package installed. Example: .. code-block:: python from langchain.embeddings import ModelScopeEmbeddings model_id = "damo/nlp_corom_sentence-embedding_english-base" embed = ModelScopeEmbeddings(model_id=model_id) """ embed: Any model_id: str = "damo/nlp_corom_sentence-embedding_english-base" """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the modelscope""" super().__init__(**kwargs) try: from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks self.embed = pipeline(Tasks.sentence_embedding, model=self.model_id) except ImportError as e: raise ImportError( "Could not import some python packages." "Please install it with `pip install modelscope`." ) from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a modelscope embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts))
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html
48ed439d5783-1
texts = list(map(lambda x: x.replace("\n", " "), texts)) inputs = {"source_sentence": texts} embeddings = self.embed(input=inputs)["text_embedding"] return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a modelscope embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") inputs = {"source_sentence": [text]} embedding = self.embed(input=inputs)["text_embedding"][0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/modelscope_hub.html
ef46c4b0034c-0
Source code for langchain.embeddings.minimax """Wrapper around MiniMax APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional import requests from pydantic import BaseModel, Extra, root_validator from tenacity import ( before_sleep_log, retry, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator.""" multiplier = 1 min_seconds = 1 max_seconds = 4 max_retries = 6 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _embed_with_retry(*args: Any, **kwargs: Any) -> Any: return embeddings.embed(*args, **kwargs) return _embed_with_retry(*args, **kwargs) [docs]class MiniMaxEmbeddings(BaseModel, Embeddings): """Wrapper around MiniMax's embedding inference service. To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and ``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to the constructor.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
ef46c4b0034c-1
the constructor. Example: .. code-block:: python from langchain.embeddings import MiniMaxEmbeddings embeddings = MiniMaxEmbeddings() query_text = "This is a test query." query_result = embeddings.embed_query(query_text) document_text = "This is a test document." document_result = embeddings.embed_documents([document_text]) """ endpoint_url: str = "https://api.minimax.chat/v1/embeddings" """Endpoint URL to use.""" model: str = "embo-01" """Embeddings model name to use.""" embed_type_db: str = "db" """For embed_documents""" embed_type_query: str = "query" """For embed_query""" minimax_group_id: Optional[str] = None """Group ID for MiniMax API.""" minimax_api_key: Optional[str] = None """API Key for MiniMax API.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that group id and api key exists in environment.""" minimax_group_id = get_from_dict_or_env( values, "minimax_group_id", "MINIMAX_GROUP_ID" ) minimax_api_key = get_from_dict_or_env( values, "minimax_api_key", "MINIMAX_API_KEY" ) values["minimax_group_id"] = minimax_group_id values["minimax_api_key"] = minimax_api_key return values def embed( self, texts: List[str], embed_type: str,
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
ef46c4b0034c-2
self, texts: List[str], embed_type: str, ) -> List[List[float]]: payload = { "model": self.model, "type": embed_type, "texts": texts, } # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.minimax_api_key}", "Content-Type": "application/json", } params = { "GroupId": self.minimax_group_id, } # send request response = requests.post( self.endpoint_url, params=params, headers=headers, json=payload ) parsed_response = response.json() # check for errors if parsed_response["base_resp"]["status_code"] != 0: raise ValueError( f"MiniMax API returned an error: {parsed_response['base_resp']}" ) embeddings = parsed_response["vectors"] return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MiniMax embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a MiniMax embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embeddings = embed_with_retry( self, texts=[text], embed_type=self.embed_type_query ) return embeddings[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/minimax.html
30f90f7e0093-0
Source code for langchain.embeddings.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase class EmbeddingsContentHandler(ContentHandlerBase[List[str], List[List[float]]]): """Content handler for LLM class.""" [docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
30f90f7e0093-1
credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: EmbeddingsContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler class ContentHandler(EmbeddingsContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompts: List[str], model_kwargs: Dict) -> bytes: input_str = json.dumps({prompts: prompts, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> List[List[float]]: response_json = json.loads(output.read().decode("utf-8")) return response_json["vectors"] """ # noqa: E501 model_kwargs: Optional[Dict] = None
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
30f90f7e0093-2
""" # noqa: E501 model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[List[float]]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
30f90f7e0093-3
# replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) [docs] def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.extend(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
30f90f7e0093-4
"""Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/sagemaker_endpoint.html
6d6bcc548a9a-0
Source code for langchain.embeddings.mosaicml """Wrapper around MosaicML APIs.""" from typing import Any, Dict, List, Mapping, Optional, Tuple import requests from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class MosaicMLInstructorEmbeddings(BaseModel, Embeddings): """Wrapper around MosaicML's embedding inference service. To use, you should have the environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import MosaicMLInstructorEmbeddings endpoint_url = ( "https://models.hosted-on.mosaicml.hosting/instructor-large/v1/predict" ) mosaic_llm = MosaicMLInstructorEmbeddings( endpoint_url=endpoint_url, mosaicml_api_token="my-api-key" ) """ endpoint_url: str = ( "https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict" ) """Endpoint URL to use.""" embed_instruction: str = "Represent the document for retrieval: " """Instruction used to embed documents.""" query_instruction: str = ( "Represent the question for retrieving supporting documents: " ) """Instruction used to embed the query.""" retry_sleep: float = 1.0 """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
6d6bcc548a9a-1
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( values, "mosaicml_api_token", "MOSAICML_API_TOKEN" ) values["mosaicml_api_token"] = mosaicml_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"endpoint_url": self.endpoint_url} def _embed( self, input: List[Tuple[str, str]], is_retry: bool = False ) -> List[List[float]]: payload = {"input_strings": input} # HTTP headers for authorization headers = { "Authorization": f"{self.mosaicml_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post(self.endpoint_url, headers=headers, json=payload) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") try: parsed_response = response.json() if "error" in parsed_response: # if we get rate limited, try sleeping for 1 second if ( not is_retry and "rate limit exceeded" in parsed_response["error"].lower() ): import time time.sleep(self.retry_sleep) return self._embed(input, is_retry=True) raise ValueError( f"Error raised by inference API: {parsed_response['error']}" )
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
6d6bcc548a9a-2
f"Error raised by inference API: {parsed_response['error']}" ) # The inference API has changed a couple of times, so we add some handling # to be robust to multiple response formats. if isinstance(parsed_response, dict): if "data" in parsed_response: output_item = parsed_response["data"] elif "output" in parsed_response: output_item = parsed_response["output"] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) if isinstance(output_item, list) and isinstance(output_item[0], list): embeddings = output_item else: embeddings = [output_item] elif isinstance(parsed_response, list): first_item = parsed_response[0] if isinstance(first_item, list): embeddings = parsed_response elif isinstance(first_item, dict): if "output" in first_item: embeddings = [item["output"] for item in parsed_response] else: raise ValueError( f"No key data or output in response: {parsed_response}" ) else: raise ValueError(f"Unexpected response format: {parsed_response}") else: raise ValueError(f"Unexpected response type: {parsed_response}") except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {response.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a MosaicML deployed instructor embedding model. Args: texts: The list of texts to embed. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
6d6bcc548a9a-3
Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [(self.embed_instruction, text) for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a MosaicML deployed instructor embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = (self.query_instruction, text) embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/mosaicml.html
54985e2a7fc2-0
Source code for langchain.embeddings.self_hosted_hugging_face """Wrapper around HuggingFace embedding models for self-hosted remote hardware.""" import importlib import logging from typing import Any, Callable, List, Optional from langchain.embeddings.self_hosted import SelfHostedEmbeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) logger = logging.getLogger(__name__) def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs) def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any: """Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
54985e2a7fc2-1
if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) client = client.to(device) return client [docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings): """Runs sentence_transformers embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceEmbeddings import runhouse as rh model_name = "sentence-transformers/all-mpnet-base-v2" gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu) """ client: Any #: :meta private: model_id: str = DEFAULT_MODEL_NAME """Model name to use.""" model_reqs: List[str] = ["./", "sentence_transformers", "torch"] """Requirements to install on hardware to inference the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable = load_embedding_model
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
54985e2a7fc2-2
model_load_fn: Callable = load_embedding_model """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" inference_fn: Callable = _embed_documents """Inference function to extract the embeddings.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings): """Runs InstructorEmbedding embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings import runhouse as rh model_name = "hkunlp/instructor-large" gpu = rh.cluster(name='rh-a10x', instance_type='A100:1') hf = SelfHostedHuggingFaceInstructEmbeddings( model_name=model_name, hardware=gpu) """
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
54985e2a7fc2-3
model_name=model_name, hardware=gpu) """ model_id: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"] """Requirements to install on hardware to inference the model.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get( "model_id", DEFAULT_INSTRUCT_MODEL ) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [] for text in texts: instruction_pairs.append([self.embed_instruction, text]) embeddings = self.client(self.pipeline_ref, instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text.
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
54985e2a7fc2-4
Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(self.pipeline_ref, [instruction_pair])[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted_hugging_face.html
112bb16d008b-0
Source code for langchain.embeddings.huggingface_hub """Wrapper around HuggingFace Hub embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2" VALID_TASKS = ("feature-extraction",) [docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings): """Wrapper around HuggingFaceHub embedding models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import HuggingFaceHubEmbeddings repo_id = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceHubEmbeddings( repo_id=repo_id, task="feature-extraction", huggingfacehub_api_token="my-api-key", ) """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = "feature-extraction" """Task to call the model with.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
112bb16d008b-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] if not repo_id.startswith("sentence-transformers"): raise ValueError( "Currently only 'sentence-transformers' embedding models " f"are supported. Got invalid 'repo_id' {repo_id}." ) client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please install it with `pip install huggingface_hub`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ # replace newlines, which can negatively affect performance. texts = [text.replace("\n", " ") for text in texts]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
112bb16d008b-2
texts = [text.replace("\n", " ") for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client(inputs=texts, params=_model_kwargs) return responses [docs] def embed_query(self, text: str) -> List[float]: """Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface_hub.html
e31f29488f15-0
Source code for langchain.embeddings.deepinfra from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32" [docs]class DeepInfraEmbeddings(BaseModel, Embeddings): """Wrapper around Deep Infra's embedding inference service. To use, you should have the environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. There are multiple embeddings models available, see https://deepinfra.com/models?type=embeddings. Example: .. code-block:: python from langchain.embeddings import DeepInfraEmbeddings deepinfra_emb = DeepInfraEmbeddings( model_id="sentence-transformers/clip-ViT-B-32", deepinfra_api_token="my-api-key" ) r1 = deepinfra_emb.embed_documents( [ "Alpha is the first letter of Greek alphabet", "Beta is the second letter of Greek alphabet", ] ) r2 = deepinfra_emb.embed_query( "What is the second letter of Greek alphabet" ) """ model_id: str = DEFAULT_MODEL_ID """Embeddings model to use.""" normalize: bool = False """whether to normalize the computed embeddings""" embed_instruction: str = "passage: " """Instruction used to embed documents.""" query_instruction: str = "query: " """Instruction used to embed the query.""" model_kwargs: Optional[dict] = None
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
e31f29488f15-1
model_kwargs: Optional[dict] = None """Other model keyword args""" deepinfra_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {"model_id": self.model_id} def _embed(self, input: List[str]) -> List[List[float]]: _model_kwargs = self.model_kwargs or {} # HTTP headers for authorization headers = { "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", } # send request try: res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers=headers, json={"inputs": input, "normalize": self.normalize, **_model_kwargs}, ) except requests.exceptions.RequestException as e: raise ValueError(f"Error raised by inference endpoint: {e}") if res.status_code != 200: raise ValueError( "Error raised by inference API HTTP code: %s, %s" % (res.status_code, res.text) ) try: t = res.json() embeddings = t["embeddings"]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
e31f29488f15-2
try: t = res.json() embeddings = t["embeddings"] except requests.exceptions.JSONDecodeError as e: raise ValueError( f"Error raised by inference API: {e}.\nResponse: {res.text}" ) return embeddings [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed documents using a Deep Infra deployed embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [f"{self.query_instruction}{text}" for text in texts] embeddings = self._embed(instruction_pairs) return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using a Deep Infra deployed embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = f"{self.query_instruction}{text}" embedding = self._embed([instruction_pair])[0] return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/deepinfra.html
c2be5813d9b8-0
Source code for langchain.embeddings.embaas """Wrapper around embaas embeddings API.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from typing_extensions import NotRequired, TypedDict from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env # Currently supported maximum batch size for embedding requests MAX_BATCH_SIZE = 256 EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/" class EmbaasEmbeddingsPayload(TypedDict): """Payload for the embaas embeddings API.""" model: str texts: List[str] instruction: NotRequired[str] [docs]class EmbaasEmbeddings(BaseModel, Embeddings): """Wrapper around embaas's embedding service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Initialise with default model and instruction from langchain.embeddings import EmbaasEmbeddings emb = EmbaasEmbeddings() # Initialise with custom model and instruction from langchain.embeddings import EmbaasEmbeddings emb_model = "instructor-large" emb_inst = "Represent the Wikipedia document for retrieval" emb = EmbaasEmbeddings( model=emb_model, instruction=emb_inst ) """ model: str = "e5-large-v2" """The model used for embeddings.""" instruction: Optional[str] = None """Instruction used for domain-specific embeddings.""" api_url: str = EMBAAS_API_URL
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
c2be5813d9b8-1
api_url: str = EMBAAS_API_URL """The URL for the embaas embeddings API.""" embaas_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( values, "embaas_api_key", "EMBAAS_API_KEY" ) values["embaas_api_key"] = embaas_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying params.""" return {"model": self.model, "instruction": self.instruction} def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload: """Generates payload for the API request.""" payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) if self.instruction: payload["instruction"] = self.instruction return payload def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]: """Sends a request to the Embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } response = requests.post(self.api_url, headers=headers, json=payload) response.raise_for_status() parsed_response = response.json() embeddings = [item["embedding"] for item in parsed_response["data"]] return embeddings
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
c2be5813d9b8-2
return embeddings def _generate_embeddings(self, texts: List[str]) -> List[List[float]]: """Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError(f"Error raised by embaas embeddings API: {e}") parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( "Validation Error raised by embaas embeddings API:" f"{parsed_response['message']}" ) raise [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Get embeddings for a list of texts. Args: texts: The list of texts to get embeddings for. Returns: List of embeddings, one for each text. """ batches = [ texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE) ] embeddings = [self._generate_embeddings(batch) for batch in batches] # flatten the list of lists into a single list return [embedding for batch in embeddings for embedding in batch] [docs] def embed_query(self, text: str) -> List[float]: """Get embeddings for a single text. Args: text: The text to get embeddings for. Returns: List of embeddings. """ return self.embed_documents([text])[0]
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/embaas.html
05f193a1278d-0
Source code for langchain.embeddings.fake from typing import List import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings [docs]class FakeEmbeddings(Embeddings, BaseModel): size: int def _get_embedding(self) -> List[float]: return list(np.random.normal(size=self.size)) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: return [self._get_embedding() for _ in texts] [docs] def embed_query(self, text: str) -> List[float]: return self._get_embedding()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/fake.html
004f8f89038f-0
Source code for langchain.embeddings.huggingface """Wrapper around HuggingFace embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field from langchain.embeddings.base import Embeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) [docs]class HuggingFaceEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': False} hf = HuggingFaceEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model."""
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
004f8f89038f-1
"""Key word arguments to pass when calling the `encode` method of the model.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: import sentence_transformers except ImportError as exc: raise ImportError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence_transformers`." ) from exc self.client = sentence_transformers.SentenceTransformer( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client.encode(texts, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.client.encode(text, **self.encode_kwargs) return embedding.tolist() [docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers``
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
004f8f89038f-2
To use, you should have the ``sentence_transformers`` and ``InstructorEmbedding`` python packages installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceInstructEmbeddings model_name = "hkunlp/instructor-large" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': True} hf = HuggingFaceInstructEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" cache_folder: Optional[str] = None """Path to store models. Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass to the model.""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """Key word arguments to pass when calling the `encode` method of the model.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) except ImportError as e: raise ValueError("Dependencies for InstructorEmbedding not found.") from e class Config:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
004f8f89038f-3
raise ValueError("Dependencies for InstructorEmbedding not found.") from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [[self.embed_instruction, text] for text in texts] embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/huggingface.html
97ffc8aaf9fa-0
Source code for langchain.embeddings.tensorflow_hub """Wrapper around TensorflowHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" [docs]class TensorflowHubEmbeddings(BaseModel, Embeddings): """Wrapper around tensorflow_hub embedding models. To use, you should have the ``tensorflow_text`` python package installed. Example: .. code-block:: python from langchain.embeddings import TensorflowHubEmbeddings url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" tf = TensorflowHubEmbeddings(model_url=url) """ embed: Any #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the tensorflow_hub and tensorflow_text.""" super().__init__(**kwargs) try: import tensorflow_hub except ImportError: raise ImportError( "Could not import tensorflow-hub python package. " "Please install it with `pip install tensorflow-hub``." ) try: import tensorflow_text # noqa except ImportError: raise ImportError( "Could not import tensorflow_text python package. " "Please install it with `pip install tensorflow_text``." ) self.embed = tensorflow_hub.load(self.model_url) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
97ffc8aaf9fa-1
"""Compute doc embeddings using a TensorflowHub embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.embed(texts).numpy() return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.embed([text]).numpy()[0] return embedding.tolist()
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/tensorflow_hub.html
2554362e1fe6-0
Source code for langchain.embeddings.bedrock import json import os from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings [docs]class BedrockEmbeddings(BaseModel, Embeddings): """Embeddings provider to invoke Bedrock embedding models. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Bedrock service. """ """ Example: .. code-block:: python from langchain.bedrock_embeddings import BedrockEmbeddings region_name ="us-east-1" credentials_profile_name = "default" model_id = "amazon.titan-e1t-medium" be = BedrockEmbeddings( credentials_profile_name=credentials_profile_name, region_name=region_name, model_id=model_id ) """ client: Any #: :meta private: region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here. """ credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance,
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
2554362e1fe6-1
If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ model_id: str = "amazon.titan-e1t-medium" """Id of the model to call, e.g., amazon.titan-e1t-medium, this is equivalent to the modelId property in the list-foundation-models api""" model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" if values["client"] is not None: return values try: import boto3 if values["credentials_profile_name"] is not None: session = boto3.Session(profile_name=values["credentials_profile_name"]) else: # use default credentials session = boto3.Session() client_params = {} if values["region_name"]: client_params["region_name"] = values["region_name"] values["client"] = session.client("bedrock", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e return values
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
2554362e1fe6-2
"profile name are valid." ) from e return values def _embedding_func(self, text: str) -> List[float]: """Call out to Bedrock embedding endpoint.""" # replace newlines, which can negatively affect performance. text = text.replace(os.linesep, " ") _model_kwargs = self.model_kwargs or {} input_body = {**_model_kwargs} input_body["inputText"] = text body = json.dumps(input_body) content_type = "application/json" accepts = "application/json" embeddings = [] try: response = self.client.invoke_model( body=body, modelId=self.model_id, accept=accepts, contentType=content_type, ) response_body = json.loads(response.get("body").read()) embeddings = response_body.get("embedding") except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return embeddings [docs] def embed_documents( self, texts: List[str], chunk_size: int = 1 ) -> List[List[float]]: """Compute doc embeddings using a Bedrock model. Args: texts: The list of texts to embed. chunk_size: Bedrock currently only allows single string inputs, so chunk size is always 1. This input is here only for compatibility with the embeddings interface. Returns: List of embeddings, one for each text. """ results = [] for text in texts: response = self._embedding_func(text) results.append(response) return results [docs] def embed_query(self, text: str) -> List[float]:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
2554362e1fe6-3
[docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a Bedrock model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func(text)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/bedrock.html
7582608e26e2-0
Source code for langchain.embeddings.openai """Wrapper around OpenAI embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) import numpy as np from pydantic import BaseModel, Extra, root_validator from tenacity import ( AsyncRetrying, before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any: import openai
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-1
import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards async_retrying = AsyncRetrying( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def wrap(func: Callable) -> Callable: async def wrapped_f(*args: Any, **kwargs: Any) -> Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError("this is unreachable") return wrapped_f return wrap def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: return embeddings.client.create(**kwargs) return _embed_with_retry(**kwargs) async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings)
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-2
@_async_retry_decorator(embeddings) async def _async_embed_with_retry(**kwargs: Any) -> Any: return await embeddings.client.acreate(**kwargs) return await _async_embed_with_retry(**kwargs) [docs]class OpenAIEmbeddings(BaseModel, Embeddings): """Wrapper around OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview" os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name",
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-3
deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", openai_api_base="https://your-endpoint.openai.azure.com/", openai_api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_type: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None embedding_ctx_length: int = 8191 openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-4
Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) if values["openai_api_type"] in ("azure", "azure_ad", "azuread"): default_api_version = "2022-12-01"
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-5
default_api_version = "2022-12-01" else: default_api_version = "" values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", default=default_api_version, ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _invocation_params(self) -> Dict: openai_args = { "engine": self.deployment, "request_timeout": self.request_timeout, "headers": self.headers, "api_key": self.openai_api_key, "organization": self.openai_organization, "api_base": self.openai_api_base, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } if self.openai_proxy: import openai openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, } # type: ignore[assignment] # noqa: E501 return openai_args # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb def _get_len_safe_embeddings(
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-6
def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = embed_with_retry( self,
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-7
response = embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry( self, input="", **self._invocation_params, )[ "data" ][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb async def _aget_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`."
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-8
"Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, input=tokens[i : i + _chunk_size], **self._invocation_params, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i])
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-9
results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = ( await async_embed_with_retry( self, input="", **self._invocation_params, ) )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return self._get_len_safe_embeddings([text], engine=engine)[0] else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return embed_with_retry( self, input=[text], **self._invocation_params, )[ "data" ][0]["embedding"] async def _aembedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # handle large input text if len(text) > self.embedding_ctx_length: return (await self._aget_len_safe_embeddings([text], engine=engine))[0] else:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-10
else: if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") return ( await async_embed_with_retry( self, input=[text], **self._invocation_params, ) )["data"][0]["embedding"] [docs] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return self._get_len_safe_embeddings(texts, engine=self.deployment) [docs] async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, we assume the list may contain texts longer
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
7582608e26e2-11
# NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. return await self._aget_len_safe_embeddings(texts, engine=self.deployment) [docs] def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding [docs] async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = await self._aembedding_func(text, engine=self.deployment) return embedding
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/openai.html
6aeb61bc6e4a-0
Source code for langchain.embeddings.self_hosted """Running custom embedding models on self-hosted remote hardware.""" from typing import Any, Callable, List from pydantic import Extra from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return pipeline(*args, **kwargs) [docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings): """Runs custom embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example using a model load function: .. code-block:: python from langchain.embeddings import SelfHostedEmbeddings from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") def get_pipeline(): model_id = "facebook/bart-large" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) return pipeline("feature-extraction", model=model, tokenizer=tokenizer) embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline, hardware=gpu
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
6aeb61bc6e4a-1
model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from transformers import pipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") pipeline = pipeline(model="bert-base-uncased", task="feature-extraction") rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(gpu, path="models") embeddings = SelfHostedHFEmbeddings.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ inference_fn: Callable = _embed_documents """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client(self.pipeline_ref, texts) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings [docs] def embed_query(self, text: str) -> List[float]:
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
6aeb61bc6e4a-2
[docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embeddings = self.client(self.pipeline_ref, text) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings
https://api.python.langchain.com/en/latest/_modules/langchain/embeddings/self_hosted.html
0396fe8ac2f6-0
Source code for langchain.callbacks.file """Callback Handler that writes to a file.""" from typing import Any, Dict, Optional, TextIO, cast from langchain.callbacks.base import BaseCallbackHandler from langchain.input import print_text from langchain.schema import AgentAction, AgentFinish [docs]class FileCallbackHandler(BaseCallbackHandler): """Callback Handler that writes to a file.""" def __init__( self, filename: str, mode: str = "a", color: Optional[str] = None ) -> None: """Initialize callback handler.""" self.file = cast(TextIO, open(filename, mode)) self.color = color def __del__(self) -> None: """Destructor to cleanup when done.""" self.file.close() [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Print out that we are entering a chain.""" class_name = serialized["name"] print_text( f"\n\n\033[1m> Entering new {class_name} chain...\033[0m", end="\n", file=self.file, ) [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Print out that we finished a chain.""" print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file) [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Run on agent action."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/file.html
0396fe8ac2f6-1
) -> Any: """Run on agent action.""" print_text(action.log, color=color if color else self.color, file=self.file) [docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """If not the final action, print out observation.""" if observation_prefix is not None: print_text(f"\n{observation_prefix}", file=self.file) print_text(output, color=color if color else self.color, file=self.file) if llm_prefix is not None: print_text(f"\n{llm_prefix}", file=self.file) [docs] def on_text( self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any, ) -> None: """Run when agent ends.""" print_text(text, color=color if color else self.color, end=end, file=self.file) [docs] def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" print_text( finish.log, color=color if self.color else color, end="\n", file=self.file )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/file.html