id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
59
127
d581c3edb575-0
Source code for langchain.document_loaders.telegram """Loader that loads Telegram chat json dump.""" from __future__ import annotations import asyncio import json from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter if TYPE_CHECKING: import pandas as pd from telethon.hints import EntityLike def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used.""" date = row["date"] sender = row["from"] text = row["text"] return f"{sender} on {date}: {text}\n\n" [docs]class TelegramChatFileLoader(BaseLoader): """Loader that loads Telegram chat json directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message["type"] == "message" and isinstance(message["text"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)] def text_to_docs(text: Union[str, List[str]]) -> List[Document]: """Converts a string or list of strings to a list of Documents with metadata.""" if isinstance(text, str): # Take a single string as one page
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/telegram.html
d581c3edb575-1
if isinstance(text, str): # Take a single string as one page text = [text] page_docs = [Document(page_content=page) for page in text] # Add page numbers as metadata for i, doc in enumerate(page_docs): doc.metadata["page"] = i + 1 # Split pages into chunks doc_chunks = [] for doc in page_docs: text_splitter = RecursiveCharacterTextSplitter( chunk_size=800, separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""], chunk_overlap=20, ) chunks = text_splitter.split_text(doc.page_content) for i, chunk in enumerate(chunks): doc = Document( page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i} ) # Add sources a metadata doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}" doc_chunks.append(doc) return doc_chunks [docs]class TelegramChatApiLoader(BaseLoader): """Loader that loads Telegram chat json directory dump.""" def __init__( self, chat_entity: Optional[EntityLike] = None, api_id: Optional[int] = None, api_hash: Optional[str] = None, username: Optional[str] = None, file_path: str = "telegram_data.json", ): """Initialize with API parameters.""" self.chat_entity = chat_entity self.api_id = api_id self.api_hash = api_hash self.username = username self.file_path = file_path [docs] async def fetch_data_from_telegram(self) -> None:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/telegram.html
d581c3edb575-2
[docs] async def fetch_data_from_telegram(self) -> None: """Fetch data from Telegram API and save it as a JSON file.""" from telethon.sync import TelegramClient data = [] async with TelegramClient(self.username, self.api_id, self.api_hash) as client: async for message in client.iter_messages(self.chat_entity): is_reply = message.reply_to is not None reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None data.append( { "sender_id": message.sender_id, "text": message.text, "date": message.date.isoformat(), "message.id": message.id, "is_reply": is_reply, "reply_to_id": reply_to_id, } ) with open(self.file_path, "w", encoding="utf-8") as f: json.dump(data, f, ensure_ascii=False, indent=4) def _get_message_threads(self, data: pd.DataFrame) -> dict: """Create a dictionary of message threads from the given data. Args: data (pd.DataFrame): A DataFrame containing the conversation \ data with columns: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: dict: A dictionary where the key is the parent message ID and \ the value is a list of message IDs in ascending order. """ def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]: """ Recursively find all replies to a given parent message ID. Args: parent_id (int): The parent message ID.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/telegram.html
d581c3edb575-3
Args: parent_id (int): The parent message ID. reply_data (pd.DataFrame): A DataFrame containing reply messages. Returns: list: A list of message IDs that are replies to the parent message ID. """ # Find direct replies to the parent message ID direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][ "message.id" ].tolist() # Recursively find replies to the direct replies all_replies = [] for reply_id in direct_replies: all_replies += [reply_id] + find_replies(reply_id, reply_data) return all_replies # Filter out parent messages parent_messages = data[~data["is_reply"]] # Filter out reply messages and drop rows with NaN in 'reply_to_id' reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"]) # Convert 'reply_to_id' to integer reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int) # Create a dictionary of message threads with parent message IDs as keys and \ # lists of reply message IDs as values message_threads = { parent_id: [parent_id] + find_replies(parent_id, reply_messages) for parent_id in parent_messages["message.id"] } return message_threads def _combine_message_texts( self, message_threads: Dict[int, List[int]], data: pd.DataFrame ) -> str: """ Combine the message texts for each parent message ID based \ on the list of message threads. Args: message_threads (dict): A dictionary where the key is the parent message \
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/telegram.html
d581c3edb575-4
message_threads (dict): A dictionary where the key is the parent message \ ID and the value is a list of message IDs in ascending order. data (pd.DataFrame): A DataFrame containing the conversation data: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: str: A combined string of message texts sorted by date. """ combined_text = "" # Iterate through sorted parent message IDs for parent_id, message_ids in message_threads.items(): # Get the message texts for the message IDs and sort them by date message_texts = ( data[data["message.id"].isin(message_ids)] .sort_values(by="date")["text"] .tolist() ) message_texts = [str(elem) for elem in message_texts] # Combine the message texts combined_text += " ".join(message_texts) + ".\n" return combined_text.strip() [docs] def load(self) -> List[Document]: """Load documents.""" if self.chat_entity is not None: try: import nest_asyncio nest_asyncio.apply() asyncio.run(self.fetch_data_from_telegram()) except ImportError: raise ImportError( """`nest_asyncio` package not found. please install with `pip install nest_asyncio` """ ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) try: import pandas as pd except ImportError: raise ImportError( """`pandas` package not found. please install with `pip install pandas` """
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/telegram.html
d581c3edb575-5
please install with `pip install pandas` """ ) normalized_messages = pd.json_normalize(d) df = pd.DataFrame(normalized_messages) message_threads = self._get_message_threads(df) combined_texts = self._combine_message_texts(message_threads, df) return text_to_docs(combined_texts) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/telegram.html
8f771d392861-0
Source code for langchain.document_loaders.email """Loader that loads email files.""" import os from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredEmailLoader(UnstructuredFileLoader): """Loader that uses unstructured to load email files.""" def _get_elements(self) -> List: from unstructured.file_utils.filetype import FileType, detect_filetype filetype = detect_filetype(self.file_path) if filetype == FileType.EML: from unstructured.partition.email import partition_email return partition_email(filename=self.file_path, **self.unstructured_kwargs) elif satisfies_min_unstructured_version("0.5.8") and filetype == FileType.MSG: from unstructured.partition.msg import partition_msg return partition_msg(filename=self.file_path, **self.unstructured_kwargs) else: raise ValueError( f"Filetype {filetype} is not supported in UnstructuredEmailLoader." ) [docs]class OutlookMessageLoader(BaseLoader): """ Loader that loads Outlook Message files using extract_msg. https://github.com/TeamMsgExtractor/msg-extractor """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path if not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file" % self.file_path) try: import extract_msg # noqa:F401 except ImportError: raise ImportError( "extract_msg is not installed. Please install it with " "`pip install extract_msg`"
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/email.html
8f771d392861-1
"`pip install extract_msg`" ) [docs] def load(self) -> List[Document]: """Load data into document objects.""" import extract_msg msg = extract_msg.Message(self.file_path) return [ Document( page_content=msg.body, metadata={ "subject": msg.subject, "sender": msg.sender, "date": msg.date, }, ) ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/email.html
3a62fcd7b5b2-0
Source code for langchain.document_loaders.blockchain import os import re import time from enum import Enum from typing import List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader class BlockchainType(Enum): ETH_MAINNET = "eth-mainnet" ETH_GOERLI = "eth-goerli" POLYGON_MAINNET = "polygon-mainnet" POLYGON_MUMBAI = "polygon-mumbai" [docs]class BlockchainDocumentLoader(BaseLoader): """Loads elements from a blockchain smart contract into Langchain documents. The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet, Polygon mainnet, and Polygon Mumbai testnet. If no BlockchainType is specified, the default is Ethereum mainnet. The Loader uses the Alchemy API to interact with the blockchain. ALCHEMY_API_KEY environment variable must be set to use this loader. The API returns 100 NFTs per request and can be paginated using the startToken parameter. If get_all_tokens is set to True, the loader will get all tokens on the contract. Note that for contracts with a large number of tokens, this may take a long time (e.g. 10k tokens is 100 requests). Default value is false for this reason. The max_execution_time (sec) can be set to limit the execution time of the loader. Future versions of this loader can: - Support additional Alchemy APIs (e.g. getTransactions, etc.) - Support additional blockain APIs (e.g. Infura, Opensea, etc.) """ def __init__( self, contract_address: str,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blockchain.html
3a62fcd7b5b2-1
""" def __init__( self, contract_address: str, blockchainType: BlockchainType = BlockchainType.ETH_MAINNET, api_key: str = "docs-demo", startToken: str = "", get_all_tokens: bool = False, max_execution_time: Optional[int] = None, ): self.contract_address = contract_address self.blockchainType = blockchainType.value self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key self.startToken = startToken self.get_all_tokens = get_all_tokens self.max_execution_time = max_execution_time if not self.api_key: raise ValueError("Alchemy API key not provided.") if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address): raise ValueError(f"Invalid contract address {self.contract_address}") [docs] def load(self) -> List[Document]: result = [] current_start_token = self.startToken start_time = time.time() while True: url = ( f"https://{self.blockchainType}.g.alchemy.com/nft/v2/" f"{self.api_key}/getNFTsForCollection?withMetadata=" f"True&contractAddress={self.contract_address}" f"&startToken={current_start_token}" ) response = requests.get(url) if response.status_code != 200: raise ValueError( f"Request failed with status code {response.status_code}" ) items = response.json()["nfts"] if not items: break for item in items: content = str(item) tokenId = item["id"]["tokenId"] metadata = {
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blockchain.html
3a62fcd7b5b2-2
tokenId = item["id"]["tokenId"] metadata = { "source": self.contract_address, "blockchain": self.blockchainType, "tokenId": tokenId, } result.append(Document(page_content=content, metadata=metadata)) # exit after the first API call if get_all_tokens is False if not self.get_all_tokens: break # get the start token for the next API call from the last item in array current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"]) if ( self.max_execution_time is not None and (time.time() - start_time) > self.max_execution_time ): raise RuntimeError("Execution time exceeded the allowed time limit.") if not result: raise ValueError( f"No NFTs found for contract address {self.contract_address}" ) return result # add one to the tokenId, ensuring the correct tokenId format is used def _get_next_tokenId(self, tokenId: str) -> str: value_type = self._detect_value_type(tokenId) if value_type == "hex_0x": value_int = int(tokenId, 16) elif value_type == "hex_0xbf": value_int = int(tokenId[2:], 16) else: value_int = int(tokenId) result = value_int + 1 if value_type == "hex_0x": return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x") elif value_type == "hex_0xbf":
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blockchain.html
3a62fcd7b5b2-3
elif value_type == "hex_0xbf": return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x") else: return str(result) # A smart contract can use different formats for the tokenId @staticmethod def _detect_value_type(tokenId: str) -> str: if isinstance(tokenId, int): return "int" elif tokenId.startswith("0x"): return "hex_0x" elif tokenId.startswith("0xbf"): return "hex_0xbf" else: return "hex_0xbf" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blockchain.html
3354fec44cd5-0
Source code for langchain.document_loaders.url_playwright """Loader that uses Playwright to load a page, then uses unstructured to load the html. """ import logging from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class PlaywrightURLLoader(BaseLoader): """Loader that uses Playwright and to load a page and unstructured to load the html. This is useful for loading pages that require javascript to render. Attributes: urls (List[str]): List of URLs to load. continue_on_failure (bool): If True, continue loading other URLs on failure. headless (bool): If True, the browser will run in headless mode. """ def __init__( self, urls: List[str], continue_on_failure: bool = True, headless: bool = True, remove_selectors: Optional[List[str]] = None, ): """Load a list of URLs using Playwright and unstructured.""" try: import playwright # noqa:F401 except ImportError: raise ImportError( "playwright package not found, please install it with " "`pip install playwright`" ) try: import unstructured # noqa:F401 except ImportError: raise ValueError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self.urls = urls self.continue_on_failure = continue_on_failure self.headless = headless self.remove_selectors = remove_selectors [docs] def load(self) -> List[Document]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url_playwright.html
3354fec44cd5-1
[docs] def load(self) -> List[Document]: """Load the specified URLs using Playwright and create Document instances. Returns: List[Document]: A list of Document instances with loaded content. """ from playwright.sync_api import sync_playwright from unstructured.partition.html import partition_html docs: List[Document] = list() with sync_playwright() as p: browser = p.chromium.launch(headless=self.headless) for url in self.urls: try: page = browser.new_page() page.goto(url) for selector in self.remove_selectors or []: elements = page.locator(selector).all() for element in elements: if element.is_visible(): element.evaluate("element => element.remove()") page_source = page.content() elements = partition_html(text=page_source) text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error( f"Error fetching or processing {url}, exception: {e}" ) else: raise e browser.close() return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url_playwright.html
fa84cfabe124-0
Source code for langchain.document_loaders.airtable from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class AirtableLoader(BaseLoader): """Loader that loads local airbyte json files.""" def __init__(self, api_token: str, table_id: str, base_id: str): """Initialize with API token and the IDs for table and base""" self.api_token = api_token self.table_id = table_id self.base_id = base_id [docs] def lazy_load(self) -> Iterator[Document]: """Load Table.""" from pyairtable import Table table = Table(self.api_token, self.base_id, self.table_id) records = table.all() for record in records: # Need to convert record from dict to str yield Document( page_content=str(record), metadata={ "source": self.base_id + "_" + self.table_id, "base_id": self.base_id, "table_id": self.table_id, }, ) [docs] def load(self) -> List[Document]: """Load Table.""" return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/airtable.html
f6b10951e906-0
Source code for langchain.document_loaders.googledrive """Loader that loads data from Google Drive.""" # Prerequisites: # 1. Create a Google Cloud project # 2. Enable the Google Drive API: # https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com # 3. Authorize credentials for desktop app: # https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501 # 4. For service accounts visit # https://cloud.google.com/iam/docs/service-accounts-create from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from pydantic import BaseModel, root_validator, validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader SCOPES = ["https://www.googleapis.com/auth/drive.readonly"] [docs]class GoogleDriveLoader(BaseLoader, BaseModel): """Loader that loads Google Docs from Google Drive.""" service_account_key: Path = Path.home() / ".credentials" / "keys.json" credentials_path: Path = Path.home() / ".credentials" / "credentials.json" token_path: Path = Path.home() / ".credentials" / "token.json" folder_id: Optional[str] = None document_ids: Optional[List[str]] = None file_ids: Optional[List[str]] = None recursive: bool = False file_types: Optional[Sequence[str]] = None load_trashed_files: bool = False @root_validator def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if values.get("folder_id") and (
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
f6b10951e906-1
if values.get("folder_id") and ( values.get("document_ids") or values.get("file_ids") ): raise ValueError( "Cannot specify both folder_id and document_ids nor " "folder_id and file_ids" ) if ( not values.get("folder_id") and not values.get("document_ids") and not values.get("file_ids") ): raise ValueError("Must specify either folder_id, document_ids, or file_ids") file_types = values.get("file_types") if file_types: if values.get("document_ids") or values.get("file_ids"): raise ValueError( "file_types can only be given when folder_id is given," " (not when document_ids or file_ids are given)." ) type_mapping = { "document": "application/vnd.google-apps.document", "sheet": "application/vnd.google-apps.spreadsheet", "pdf": "application/pdf", } allowed_types = list(type_mapping.keys()) + list(type_mapping.values()) short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()]) full_names = ", ".join([f"'{x}'" for x in type_mapping.values()]) for file_type in file_types: if file_type not in allowed_types: raise ValueError( f"Given file type {file_type} is not supported. " f"Supported values are: {short_names}; and " f"their full-form names: {full_names}" ) # replace short-form file types by full-form file types def full_form(x: str) -> str: return type_mapping[x] if x in type_mapping else x
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
f6b10951e906-2
return type_mapping[x] if x in type_mapping else x values["file_types"] = [full_form(file_type) for file_type in file_types] return values @validator("credentials_path") def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any: """Validate that credentials_path exists.""" if not v.exists(): raise ValueError(f"credentials_path {v} does not exist") return v def _load_credentials(self) -> Any: """Load credentials.""" # Adapted from https://developers.google.com/drive/api/v3/quickstart/python try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow except ImportError: raise ImportError( "You must run " "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib` " "to use the Google Drive loader." ) creds = None if self.service_account_key.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_key), scopes=SCOPES ) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
f6b10951e906-3
with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds def _load_sheet_from_id(self, id: str) -> List[Document]: """Load a sheet and all tabs from an ID.""" from googleapiclient.discovery import build creds = self._load_credentials() sheets_service = build("sheets", "v4", credentials=creds) spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute() sheets = spreadsheet.get("sheets", []) documents = [] for sheet in sheets: sheet_name = sheet["properties"]["title"] result = ( sheets_service.spreadsheets() .values() .get(spreadsheetId=id, range=sheet_name) .execute() ) values = result.get("values", []) header = values[0] for i, row in enumerate(values[1:], start=1): metadata = { "source": ( f"https://docs.google.com/spreadsheets/d/{id}/" f"edit?gid={sheet['properties']['sheetId']}" ), "title": f"{spreadsheet['properties']['title']} - {sheet_name}", "row": i, } content = [] for j, v in enumerate(row): title = header[j].strip() if len(header) > j else "" content.append(f"{title}: {v.strip()}") page_content = "\n".join(content) documents.append(Document(page_content=page_content, metadata=metadata)) return documents def _load_document_from_id(self, id: str) -> Document: """Load a document from an ID."""
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
f6b10951e906-4
"""Load a document from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.errors import HttpError from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().export_media(fileId=id, mimeType="text/plain") fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False try: while done is False: status, done = downloader.next_chunk() except HttpError as e: if e.resp.status == 404: print("File not found: {}".format(id)) else: print("An error occurred: {}".format(e)) text = fh.getvalue().decode("utf-8") metadata = { "source": f"https://docs.google.com/document/d/{id}/edit", "title": f"{file.get('name')}", } return Document(page_content=text, metadata=metadata) def _load_documents_from_folder( self, folder_id: str, *, file_types: Optional[Sequence[str]] = None ) -> List[Document]: """Load documents from a folder.""" from googleapiclient.discovery import build creds = self._load_credentials() service = build("drive", "v3", credentials=creds) files = self._fetch_files_recursive(service, folder_id) # If file types filter is provided, we'll filter by the file type. if file_types:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
f6b10951e906-5
if file_types: _files = [f for f in files if f["mimeType"] in file_types] # type: ignore else: _files = files returns = [] for file in files: if file["trashed"] and not self.load_trashed_files: continue elif file["mimeType"] == "application/vnd.google-apps.document": returns.append(self._load_document_from_id(file["id"])) # type: ignore elif file["mimeType"] == "application/vnd.google-apps.spreadsheet": returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore elif file["mimeType"] == "application/pdf": returns.extend(self._load_file_from_id(file["id"])) # type: ignore else: pass return returns def _fetch_files_recursive( self, service: Any, folder_id: str ) -> List[Dict[str, Union[str, List[str]]]]: """Fetch all files and subfolders recursively.""" results = ( service.files() .list( q=f"'{folder_id}' in parents", pageSize=1000, includeItemsFromAllDrives=True, supportsAllDrives=True, fields="nextPageToken, files(id, name, mimeType, parents, trashed)", ) .execute() ) files = results.get("files", []) returns = [] for file in files: if file["mimeType"] == "application/vnd.google-apps.folder": if self.recursive: returns.extend(self._fetch_files_recursive(service, file["id"])) else: returns.append(file) return returns
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
f6b10951e906-6
else: returns.append(file) return returns def _load_documents_from_ids(self) -> List[Document]: """Load documents from a list of IDs.""" if not self.document_ids: raise ValueError("document_ids must be set") return [self._load_document_from_id(doc_id) for doc_id in self.document_ids] def _load_file_from_id(self, id: str) -> List[Document]: """Load a file from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().get_media(fileId=id) fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() content = fh.getvalue() from PyPDF2 import PdfReader pdf_reader = PdfReader(BytesIO(content)) return [ Document( page_content=page.extract_text(), metadata={ "source": f"https://drive.google.com/file/d/{id}/view", "title": f"{file.get('name')}", "page": i, }, ) for i, page in enumerate(pdf_reader.pages) ] def _load_file_from_ids(self) -> List[Document]: """Load files from a list of IDs.""" if not self.file_ids: raise ValueError("file_ids must be set") docs = []
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
f6b10951e906-7
raise ValueError("file_ids must be set") docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs [docs] def load(self) -> List[Document]: """Load documents.""" if self.folder_id: return self._load_documents_from_folder( self.folder_id, file_types=self.file_types ) elif self.document_ids: return self._load_documents_from_ids() else: return self._load_file_from_ids() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
97781e4af3c5-0
Source code for langchain.document_loaders.pdf """Loader that loads PDF files.""" import json import logging import os import tempfile import time from abc import ABC from io import StringIO from pathlib import Path from typing import Any, Iterator, List, Mapping, Optional from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.blob_loaders import Blob from langchain.document_loaders.parsers.pdf import ( PDFMinerParser, PDFPlumberParser, PyMuPDFParser, PyPDFium2Parser, PyPDFParser, ) from langchain.document_loaders.unstructured import UnstructuredFileLoader from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__file__) [docs]class UnstructuredPDFLoader(UnstructuredFileLoader): """Loader that uses unstructured to load PDF files.""" def _get_elements(self) -> List: from unstructured.partition.pdf import partition_pdf return partition_pdf(filename=self.file_path, **self.unstructured_kwargs) class BasePDFLoader(BaseLoader, ABC): """Base loader class for PDF files. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path self.web_path = None if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) # If the file is a web path, download it to a temporary file, and use that
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
97781e4af3c5-1
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path self.temp_file = tempfile.NamedTemporaryFile() self.temp_file.write(r.content) self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None: if hasattr(self, "temp_file"): self.temp_file.close() @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) @property def source(self) -> str: return self.web_path if self.web_path is not None else self.file_path [docs]class OnlinePDFLoader(BasePDFLoader): """Loader that loads online PDFs.""" [docs] def load(self) -> List[Document]: """Load documents.""" loader = UnstructuredPDFLoader(str(self.file_path)) return loader.load() [docs]class PyPDFLoader(BasePDFLoader): """Loads a PDF with pypdf and chunks at character level. Loader also stores page numbers in metadatas. """ def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: import pypdf # noqa:F401 except ImportError:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
97781e4af3c5-2
try: import pypdf # noqa:F401 except ImportError: raise ImportError( "pypdf package not found, please install it with " "`pip install pypdf`" ) self.parser = PyPDFParser() super().__init__(file_path) [docs] def load(self) -> List[Document]: """Load given path as pages.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazy load given path as pages.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PyPDFium2Loader(BasePDFLoader): """Loads a PDF with pypdfium2 and chunks at character level.""" def __init__(self, file_path: str): """Initialize with file path.""" super().__init__(file_path) self.parser = PyPDFium2Parser() [docs] def load(self) -> List[Document]: """Load given path as pages.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazy load given path as pages.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PyPDFDirectoryLoader(BaseLoader): """Loads a directory with PDF files with pypdf and chunks at character level. Loader also stores page numbers in metadatas. """ def __init__( self, path: str, glob: str = "**/[!.]*.pdf", silent_errors: bool = False, load_hidden: bool = False,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
97781e4af3c5-3
silent_errors: bool = False, load_hidden: bool = False, recursive: bool = False, ): self.path = path self.glob = glob self.load_hidden = load_hidden self.recursive = recursive self.silent_errors = silent_errors @staticmethod def _is_visible(path: Path) -> bool: return not any(part.startswith(".") for part in path.parts) [docs] def load(self) -> List[Document]: p = Path(self.path) docs = [] items = p.rglob(self.glob) if self.recursive else p.glob(self.glob) for i in items: if i.is_file(): if self._is_visible(i.relative_to(p)) or self.load_hidden: try: loader = PyPDFLoader(str(i)) sub_docs = loader.load() for doc in sub_docs: doc.metadata["source"] = str(i) docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e return docs [docs]class PDFMinerLoader(BasePDFLoader): """Loader that uses PDFMiner to load PDF files.""" def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: from pdfminer.high_level import extract_text # noqa:F401 except ImportError: raise ImportError( "`pdfminer` package not found, please install it with " "`pip install pdfminer.six`" ) super().__init__(file_path) self.parser = PDFMinerParser() [docs] def load(self) -> List[Document]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
97781e4af3c5-4
[docs] def load(self) -> List[Document]: """Eagerly load the content.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily lod documents.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PDFMinerPDFasHTMLLoader(BasePDFLoader): """Loader that uses PDFMiner to load PDF files as HTML content.""" def __init__(self, file_path: str): """Initialize with file path.""" try: from pdfminer.high_level import extract_text_to_fp # noqa:F401 except ImportError: raise ImportError( "`pdfminer` package not found, please install it with " "`pip install pdfminer.six`" ) super().__init__(file_path) [docs] def load(self) -> List[Document]: """Load file.""" from pdfminer.high_level import extract_text_to_fp from pdfminer.layout import LAParams from pdfminer.utils import open_filename output_string = StringIO() with open_filename(self.file_path, "rb") as fp: extract_text_to_fp( fp, # type: ignore[arg-type] output_string, codec="", laparams=LAParams(), output_type="html", ) metadata = {"source": self.file_path} return [Document(page_content=output_string.getvalue(), metadata=metadata)] [docs]class PyMuPDFLoader(BasePDFLoader): """Loader that uses PyMuPDF to load PDF files.""" def __init__(self, file_path: str) -> None:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
97781e4af3c5-5
def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: import fitz # noqa:F401 except ImportError: raise ImportError( "`PyMuPDF` package not found, please install it with " "`pip install pymupdf`" ) super().__init__(file_path) [docs] def load(self, **kwargs: Optional[Any]) -> List[Document]: """Load file.""" parser = PyMuPDFParser(text_kwargs=kwargs) blob = Blob.from_path(self.file_path) return parser.parse(blob) # MathpixPDFLoader implementation taken largely from Daniel Gross's: # https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21 [docs]class MathpixPDFLoader(BasePDFLoader): def __init__( self, file_path: str, processed_file_format: str = "mmd", max_wait_time_seconds: int = 500, should_clean_pdf: bool = False, **kwargs: Any, ) -> None: super().__init__(file_path) self.mathpix_api_key = get_from_dict_or_env( kwargs, "mathpix_api_key", "MATHPIX_API_KEY" ) self.mathpix_api_id = get_from_dict_or_env( kwargs, "mathpix_api_id", "MATHPIX_API_ID" ) self.processed_file_format = processed_file_format self.max_wait_time_seconds = max_wait_time_seconds self.should_clean_pdf = should_clean_pdf @property def headers(self) -> dict:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
97781e4af3c5-6
@property def headers(self) -> dict: return {"app_id": self.mathpix_api_id, "app_key": self.mathpix_api_key} @property def url(self) -> str: return "https://api.mathpix.com/v3/pdf" @property def data(self) -> dict: options = {"conversion_formats": {self.processed_file_format: True}} return {"options_json": json.dumps(options)} [docs] def send_pdf(self) -> str: with open(self.file_path, "rb") as f: files = {"file": f} response = requests.post( self.url, headers=self.headers, files=files, data=self.data ) response_data = response.json() if "pdf_id" in response_data: pdf_id = response_data["pdf_id"] return pdf_id else: raise ValueError("Unable to send PDF to Mathpix.") [docs] def wait_for_processing(self, pdf_id: str) -> None: url = self.url + "/" + pdf_id for _ in range(0, self.max_wait_time_seconds, 5): response = requests.get(url, headers=self.headers) response_data = response.json() status = response_data.get("status", None) if status == "completed": return elif status == "error": raise ValueError("Unable to retrieve PDF from Mathpix") else: print(f"Status: {status}, waiting for processing to complete") time.sleep(5) raise TimeoutError [docs] def get_processed_pdf(self, pdf_id: str) -> str: self.wait_for_processing(pdf_id)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
97781e4af3c5-7
self.wait_for_processing(pdf_id) url = f"{self.url}/{pdf_id}.{self.processed_file_format}" response = requests.get(url, headers=self.headers) return response.content.decode("utf-8") [docs] def clean_pdf(self, contents: str) -> str: contents = "\n".join( [line for line in contents.split("\n") if not line.startswith("![]")] ) # replace \section{Title} with # Title contents = contents.replace("\\section{", "# ").replace("}", "") # replace the "\" slash that Mathpix adds to escape $, %, (, etc. contents = ( contents.replace(r"\$", "$") .replace(r"\%", "%") .replace(r"\(", "(") .replace(r"\)", ")") ) return contents [docs] def load(self) -> List[Document]: pdf_id = self.send_pdf() contents = self.get_processed_pdf(pdf_id) if self.should_clean_pdf: contents = self.clean_pdf(contents) metadata = {"source": self.source, "file_path": self.source} return [Document(page_content=contents, metadata=metadata)] [docs]class PDFPlumberLoader(BasePDFLoader): """Loader that uses pdfplumber to load PDF files.""" def __init__( self, file_path: str, text_kwargs: Optional[Mapping[str, Any]] = None ) -> None: """Initialize with file path.""" try: import pdfplumber # noqa:F401 except ImportError: raise ImportError( "pdfplumber package not found, please install it with " "`pip install pdfplumber`" )
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
97781e4af3c5-8
"`pip install pdfplumber`" ) super().__init__(file_path) self.text_kwargs = text_kwargs or {} [docs] def load(self) -> List[Document]: """Load file.""" parser = PDFPlumberParser(text_kwargs=self.text_kwargs) blob = Blob.from_path(self.file_path) return parser.parse(blob) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pdf.html
bc78a7abd052-0
Source code for langchain.document_loaders.odt """Loader that loads Open Office ODT files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredODTLoader(UnstructuredFileLoader): """Loader that uses unstructured to load open office ODT files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.3") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.odt import partition_odt return partition_odt(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/odt.html
a0c0f62a9531-0
Source code for langchain.document_loaders.azure_blob_storage_container """Loading logic for loading documents from an Azure Blob Storage container.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.azure_blob_storage_file import ( AzureBlobStorageFileLoader, ) from langchain.document_loaders.base import BaseLoader [docs]class AzureBlobStorageContainerLoader(BaseLoader): """Loading logic for loading documents from Azure Blob Storage.""" def __init__(self, conn_str: str, container: str, prefix: str = ""): """Initialize with connection string, container and blob prefix.""" self.conn_str = conn_str self.container = container self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" try: from azure.storage.blob import ContainerClient except ImportError as exc: raise ValueError( "Could not import azure storage blob python package. " "Please install it with `pip install azure-storage-blob`." ) from exc container = ContainerClient.from_connection_string( conn_str=self.conn_str, container_name=self.container ) docs = [] blob_list = container.list_blobs(name_starts_with=self.prefix) for blob in blob_list: loader = AzureBlobStorageFileLoader( self.conn_str, self.container, blob.name # type: ignore ) docs.extend(loader.load()) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/azure_blob_storage_container.html
1e67f0e1f8dc-0
Source code for langchain.document_loaders.gcs_directory """Loading logic for loading documents from an GCS directory.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.gcs_file import GCSFileLoader [docs]class GCSDirectoryLoader(BaseLoader): """Loading logic for loading documents from GCS.""" def __init__(self, project_name: str, bucket: str, prefix: str = ""): """Initialize with bucket and key name.""" self.project_name = project_name self.bucket = bucket self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ValueError( "Could not import google-cloud-storage python package. " "Please install it with `pip install google-cloud-storage`." ) client = storage.Client(project=self.project_name) docs = [] for blob in client.list_blobs(self.bucket, prefix=self.prefix): # we shall just skip directories since GCSFileLoader creates # intermediate directories on the fly if blob.name.endswith("/"): continue loader = GCSFileLoader(self.project_name, self.bucket, blob.name) docs.extend(loader.load()) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gcs_directory.html
63d39ba651aa-0
Source code for langchain.document_loaders.trello """Loader that loads cards from Trello""" from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env if TYPE_CHECKING: from trello import Board, Card, TrelloClient [docs]class TrelloLoader(BaseLoader): """Trello loader. Reads all cards from a Trello board.""" def __init__( self, client: TrelloClient, board_name: str, *, include_card_name: bool = True, include_comments: bool = True, include_checklist: bool = True, card_filter: Literal["closed", "open", "all"] = "all", extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"), ): """Initialize Trello loader. Args: client: Trello API client. board_name: The name of the Trello board. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ self.client = client self.board_name = board_name self.include_card_name = include_card_name
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/trello.html
63d39ba651aa-1
self.board_name = board_name self.include_card_name = include_card_name self.include_comments = include_comments self.include_checklist = include_checklist self.extra_metadata = extra_metadata self.card_filter = card_filter [docs] @classmethod def from_credentials( cls, board_name: str, *, api_key: Optional[str] = None, token: Optional[str] = None, **kwargs: Any, ) -> TrelloLoader: """Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ try: from trello import TrelloClient # type: ignore except ImportError as ex: raise ImportError( "Could not import trello python package. " "Please install it with `pip install py-trello`." ) from ex api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY")
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/trello.html
63d39ba651aa-2
token = token or get_from_env("token", "TRELLO_TOKEN") client = TrelloClient(api_key=api_key, token=token) return cls(client, board_name, **kwargs) [docs] def load(self) -> List[Document]: """Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board. """ try: from bs4 import BeautifulSoup # noqa: F401 except ImportError as ex: raise ImportError( "`beautifulsoup4` package not found, please run" " `pip install beautifulsoup4`" ) from ex board = self._get_board() # Create a dictionary with the list IDs as keys and the list names as values list_dict = {list_item.id: list_item.name for list_item in board.list_lists()} # Get Cards on the board cards = board.get_cards(card_filter=self.card_filter) return [self._card_to_doc(card, list_dict) for card in cards] def _get_board(self) -> Board: # Find the first board with a matching name board = next( (b for b in self.client.list_boards() if b.name == self.board_name), None ) if not board: raise ValueError(f"Board `{self.board_name}` not found.") return board def _card_to_doc(self, card: Card, list_dict: dict) -> Document: from bs4 import BeautifulSoup # type: ignore text_content = "" if self.include_card_name: text_content = card.name + "\n"
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/trello.html
63d39ba651aa-3
if self.include_card_name: text_content = card.name + "\n" if card.description.strip(): text_content += BeautifulSoup(card.description, "lxml").get_text() if self.include_checklist: # Get all the checklist items on the card for checklist in card.checklists: if checklist.items: items = [ f"{item['name']}:{item['state']}" for item in checklist.items ] text_content += f"\n{checklist.name}\n" + "\n".join(items) if self.include_comments: # Get all the comments on the card comments = [ BeautifulSoup(comment["data"]["text"], "lxml").get_text() for comment in card.comments ] text_content += "Comments:" + "\n".join(comments) # Default metadata fields metadata = { "title": card.name, "id": card.id, "url": card.url, } # Extra metadata fields. Card object is not subscriptable. if "labels" in self.extra_metadata: metadata["labels"] = [label.name for label in card.labels] if "list" in self.extra_metadata: if card.list_id in list_dict: metadata["list"] = list_dict[card.list_id] if "closed" in self.extra_metadata: metadata["closed"] = card.closed if "due_date" in self.extra_metadata: metadata["due_date"] = card.due_date return Document(page_content=text_content, metadata=metadata) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/trello.html
710884c5c8cf-0
Source code for langchain.document_loaders.max_compute from __future__ import annotations from typing import Any, Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.max_compute import MaxComputeAPIWrapper [docs]class MaxComputeLoader(BaseLoader): """Loads a query result from Alibaba Cloud MaxCompute table into documents.""" def __init__( self, query: str, api_wrapper: MaxComputeAPIWrapper, *, page_content_columns: Optional[Sequence[str]] = None, metadata_columns: Optional[Sequence[str]] = None, ): """Initialize Alibaba Cloud MaxCompute document loader. Args: query: SQL query to execute. api_wrapper: MaxCompute API wrapper. page_content_columns: The columns to write into the `page_content` of the Document. If unspecified, all columns will be written to `page_content`. metadata_columns: The columns to write into the `metadata` of the Document. If unspecified, all columns not added to `page_content` will be written. """ self.query = query self.api_wrapper = api_wrapper self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns [docs] @classmethod def from_params( cls, query: str, endpoint: str, project: str, *, access_id: Optional[str] = None, secret_access_key: Optional[str] = None, **kwargs: Any, ) -> MaxComputeLoader: """Convenience constructor that builds the MaxCompute API wrapper from given parameters. Args: query: SQL query to execute.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/max_compute.html
710884c5c8cf-1
given parameters. Args: query: SQL query to execute. endpoint: MaxCompute endpoint. project: A project is a basic organizational unit of MaxCompute, which is similar to a database. access_id: MaxCompute access ID. Should be passed in directly or set as the environment variable `MAX_COMPUTE_ACCESS_ID`. secret_access_key: MaxCompute secret access key. Should be passed in directly or set as the environment variable `MAX_COMPUTE_SECRET_ACCESS_KEY`. """ api_wrapper = MaxComputeAPIWrapper.from_params( endpoint, project, access_id=access_id, secret_access_key=secret_access_key ) return cls(query, api_wrapper, **kwargs) [docs] def lazy_load(self) -> Iterator[Document]: for row in self.api_wrapper.query(self.query): if self.page_content_columns: page_content_data = { k: v for k, v in row.items() if k in self.page_content_columns } else: page_content_data = row page_content = "\n".join(f"{k}: {v}" for k, v in page_content_data.items()) if self.metadata_columns: metadata = {k: v for k, v in row.items() if k in self.metadata_columns} else: metadata = {k: v for k, v in row.items() if k not in page_content_data} yield Document(page_content=page_content, metadata=metadata) [docs] def load(self) -> List[Document]: return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/max_compute.html
08eb4a8fca13-0
Source code for langchain.document_loaders.dataframe """Load from Dataframe object""" from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DataFrameLoader(BaseLoader): """Load Pandas DataFrames.""" def __init__(self, data_frame: Any, page_content_column: str = "text"): """Initialize with dataframe object.""" import pandas as pd if not isinstance(data_frame, pd.DataFrame): raise ValueError( f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}" ) self.data_frame = data_frame self.page_content_column = page_content_column [docs] def load(self) -> List[Document]: """Load from the dataframe.""" result = [] # For very large dataframes, this needs to yield instead of building a list # but that would require chaging return type to a generator for BaseLoader # and all its subclasses, which is a bigger refactor. Marking as future TODO. # This change will allow us to extend this to Spark and Dask dataframes. for _, row in self.data_frame.iterrows(): text = row[self.page_content_column] metadata = row.to_dict() metadata.pop(self.page_content_column) result.append(Document(page_content=text, metadata=metadata)) return result By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/dataframe.html
563dfbcd2b5f-0
Source code for langchain.document_loaders.web_base """Web base loader class.""" import asyncio import logging import warnings from typing import Any, Dict, List, Optional, Union import aiohttp import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) default_header_template = { "User-Agent": "", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*" ";q=0.8", "Accept-Language": "en-US,en;q=0.5", "Referer": "https://www.google.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", } def _build_metadata(soup: Any, url: str) -> dict: """Build metadata from BeautifulSoup output.""" metadata = {"source": url} if title := soup.find("title"): metadata["title"] = title.get_text() if description := soup.find("meta", attrs={"name": "description"}): metadata["description"] = description.get("content", None) if html := soup.find("html"): metadata["language"] = html.get("lang", None) return metadata [docs]class WebBaseLoader(BaseLoader): """Loader that uses urllib and beautiful soup to load webpages.""" web_paths: List[str] requests_per_second: int = 2 """Max number of concurrent requests to make.""" default_parser: str = "html.parser" """Default parser to use for BeautifulSoup.""" requests_kwargs: Dict[str, Any] = {} """kwargs for requests""" def __init__(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
563dfbcd2b5f-1
"""kwargs for requests""" def __init__( self, web_path: Union[str, List[str]], header_template: Optional[dict] = None ): """Initialize with webpage path.""" # TODO: Deprecate web_path in favor of web_paths, and remove this # left like this because there are a number of loaders that expect single # urls if isinstance(web_path, str): self.web_paths = [web_path] elif isinstance(web_path, List): self.web_paths = web_path self.session = requests.Session() try: import bs4 # noqa:F401 except ImportError: raise ValueError( "bs4 package not found, please install it with " "`pip install bs4`" ) headers = header_template or default_header_template if not headers.get("User-Agent"): try: from fake_useragent import UserAgent headers["User-Agent"] = UserAgent().random except ImportError: logger.info( "fake_useragent not found, using default user agent." "To get a realistic header for requests, " "`pip install fake_useragent`." ) self.session.headers = dict(headers) @property def web_path(self) -> str: if len(self.web_paths) > 1: raise ValueError("Multiple webpaths found.") return self.web_paths[0] async def _fetch( self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5 ) -> str: async with aiohttp.ClientSession() as session: for i in range(retries): try:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
563dfbcd2b5f-2
for i in range(retries): try: async with session.get( url, headers=self.session.headers ) as response: return await response.text() except aiohttp.ClientConnectionError as e: if i == retries - 1: raise else: logger.warning( f"Error fetching {url} with attempt " f"{i + 1}/{retries}: {e}. Retrying..." ) await asyncio.sleep(cooldown * backoff**i) raise ValueError("retry count exceeded") async def _fetch_with_rate_limit( self, url: str, semaphore: asyncio.Semaphore ) -> str: async with semaphore: return await self._fetch(url) [docs] async def fetch_all(self, urls: List[str]) -> Any: """Fetch all urls concurrently with rate limiting.""" semaphore = asyncio.Semaphore(self.requests_per_second) tasks = [] for url in urls: task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore)) tasks.append(task) try: from tqdm.asyncio import tqdm_asyncio return await tqdm_asyncio.gather( *tasks, desc="Fetching pages", ascii=True, mininterval=1 ) except ImportError: warnings.warn("For better logging of progress, `pip install tqdm`") return await asyncio.gather(*tasks) @staticmethod def _check_parser(parser: str) -> None: """Check that parser is valid for bs4.""" valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"] if parser not in valid_parsers: raise ValueError(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
563dfbcd2b5f-3
if parser not in valid_parsers: raise ValueError( "`parser` must be one of " + ", ".join(valid_parsers) + "." ) [docs] def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]: """Fetch all urls, then return soups for all results.""" from bs4 import BeautifulSoup results = asyncio.run(self.fetch_all(urls)) final_results = [] for i, result in enumerate(results): url = urls[i] if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) final_results.append(BeautifulSoup(result, parser)) return final_results def _scrape(self, url: str, parser: Union[str, None] = None) -> Any: from bs4 import BeautifulSoup if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) html_doc = self.session.get(url, **self.requests_kwargs) html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser) [docs] def scrape(self, parser: Union[str, None] = None) -> Any: """Scrape data from webpage and return it in BeautifulSoup format.""" if parser is None: parser = self.default_parser return self._scrape(self.web_path, parser) [docs] def load(self) -> List[Document]: """Load text from the url(s) in web_path.""" docs = [] for path in self.web_paths:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
563dfbcd2b5f-4
docs = [] for path in self.web_paths: soup = self._scrape(path) text = soup.get_text() metadata = _build_metadata(soup, path) docs.append(Document(page_content=text, metadata=metadata)) return docs [docs] def aload(self) -> List[Document]: """Load text from the urls in web_path async into Documents.""" results = self.scrape_all(self.web_paths) docs = [] for i in range(len(results)): soup = results[i] text = soup.get_text() metadata = _build_metadata(soup, self.web_paths[i]) docs.append(Document(page_content=text, metadata=metadata)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
52bd190295c1-0
Source code for langchain.document_loaders.image """Loader that loads image files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredImageLoader(UnstructuredFileLoader): """Loader that uses unstructured to load image files, such as PNGs and JPGs.""" def _get_elements(self) -> List: from unstructured.partition.image import partition_image return partition_image(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/image.html
b192778a4da7-0
Source code for langchain.document_loaders.docugami """Loader that loads processed documents from Docugami.""" import io import logging import os import re from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Union import requests from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader TD_NAME = "{http://www.w3.org/1999/xhtml}td" TABLE_NAME = "{http://www.w3.org/1999/xhtml}table" XPATH_KEY = "xpath" DOCUMENT_ID_KEY = "id" DOCUMENT_NAME_KEY = "name" STRUCTURE_KEY = "structure" TAG_KEY = "tag" PROJECTS_KEY = "projects" DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1" logger = logging.getLogger(__name__) [docs]class DocugamiLoader(BaseLoader, BaseModel): """Loader that loads processed docs from Docugami. To use, you should have the ``lxml`` python package installed. """ api: str = DEFAULT_API_ENDPOINT access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY") docset_id: Optional[str] document_ids: Optional[Sequence[str]] file_paths: Optional[Sequence[Union[Path, str]]] min_chunk_size: int = 32 # appended to the next chunk to avoid over-chunking @root_validator def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either local file paths are given, or remote API docset ID.""" if values.get("file_paths") and values.get("docset_id"):
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/docugami.html
b192778a4da7-1
if values.get("file_paths") and values.get("docset_id"): raise ValueError("Cannot specify both file_paths and remote API docset_id") if not values.get("file_paths") and not values.get("docset_id"): raise ValueError("Must specify either file_paths or remote API docset_id") if values.get("docset_id") and not values.get("access_token"): raise ValueError("Must specify access token if using remote API docset_id") return values def _parse_dgml( self, document: Mapping, content: bytes, doc_metadata: Optional[Mapping] = None ) -> List[Document]: """Parse a single DGML document into a list of Documents.""" try: from lxml import etree except ImportError: raise ImportError( "Could not import lxml python package. " "Please install it with `pip install lxml`." ) # helpers def _xpath_qname_for_chunk(chunk: Any) -> str: """Get the xpath qname for a chunk.""" qname = f"{chunk.prefix}:{chunk.tag.split('}')[-1]}" parent = chunk.getparent() if parent is not None: doppelgangers = [x for x in parent if x.tag == chunk.tag] if len(doppelgangers) > 1: idx_of_self = doppelgangers.index(chunk) qname = f"{qname}[{idx_of_self + 1}]" return qname def _xpath_for_chunk(chunk: Any) -> str: """Get the xpath for a chunk.""" ancestor_chain = chunk.xpath("ancestor-or-self::*")
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/docugami.html
b192778a4da7-2
ancestor_chain = chunk.xpath("ancestor-or-self::*") return "/" + "/".join(_xpath_qname_for_chunk(x) for x in ancestor_chain) def _structure_value(node: Any) -> str: """Get the structure value for a node.""" structure = ( "table" if node.tag == TABLE_NAME else node.attrib["structure"] if "structure" in node.attrib else None ) return structure def _is_structural(node: Any) -> bool: """Check if a node is structural.""" return _structure_value(node) is not None def _is_heading(node: Any) -> bool: """Check if a node is a heading.""" structure = _structure_value(node) return structure is not None and structure.lower().startswith("h") def _get_text(node: Any) -> str: """Get the text of a node.""" return " ".join(node.itertext()).strip() def _has_structural_descendant(node: Any) -> bool: """Check if a node has a structural descendant.""" for child in node: if _is_structural(child) or _has_structural_descendant(child): return True return False def _leaf_structural_nodes(node: Any) -> List: """Get the leaf structural nodes of a node.""" if _is_structural(node) and not _has_structural_descendant(node): return [node] else: leaf_nodes = [] for child in node: leaf_nodes.extend(_leaf_structural_nodes(child)) return leaf_nodes def _create_doc(node: Any, text: str) -> Document: """Create a Document from a node and text."""
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/docugami.html
b192778a4da7-3
"""Create a Document from a node and text.""" metadata = { XPATH_KEY: _xpath_for_chunk(node), DOCUMENT_ID_KEY: document["id"], DOCUMENT_NAME_KEY: document["name"], STRUCTURE_KEY: node.attrib.get("structure", ""), TAG_KEY: re.sub(r"\{.*\}", "", node.tag), } if doc_metadata: metadata.update(doc_metadata) return Document( page_content=text, metadata=metadata, ) # parse the tree and return chunks tree = etree.parse(io.BytesIO(content)) root = tree.getroot() chunks: List[Document] = [] prev_small_chunk_text = None for node in _leaf_structural_nodes(root): text = _get_text(node) if prev_small_chunk_text: text = prev_small_chunk_text + " " + text prev_small_chunk_text = None if _is_heading(node) or len(text) < self.min_chunk_size: # Save headings or other small chunks to be appended to the next chunk prev_small_chunk_text = text else: chunks.append(_create_doc(node, text)) if prev_small_chunk_text and len(chunks) > 0: # small chunk at the end left over, just append to last chunk chunks[-1].page_content += " " + prev_small_chunk_text return chunks def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]: """Gets all document details for the given docset ID""" url = f"{self.api}/docsets/{docset_id}/documents" all_documents = [] while url: response = requests.get( url,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/docugami.html
b192778a4da7-4
while url: response = requests.get( url, headers={"Authorization": f"Bearer {self.access_token}"}, ) if response.ok: data = response.json() all_documents.extend(data["documents"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) return all_documents def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]: """Gets all project details for the given docset ID""" url = f"{self.api}/projects?docset.id={docset_id}" all_projects = [] while url: response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: data = response.json() all_projects.extend(data["projects"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) return all_projects def _metadata_for_project(self, project: Dict) -> Dict: """Gets project metadata for all files""" project_id = project.get("id") url = f"{self.api}/projects/{project_id}/artifacts/latest" all_artifacts = [] while url: response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: data = response.json()
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/docugami.html
b192778a4da7-5
data={}, ) if response.ok: data = response.json() all_artifacts.extend(data["artifacts"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) per_file_metadata = {} for artifact in all_artifacts: artifact_name = artifact.get("name") artifact_url = artifact.get("url") artifact_doc = artifact.get("document") if artifact_name == f"{project_id}.xml" and artifact_url and artifact_doc: doc_id = artifact_doc["id"] metadata: Dict = {} # the evaluated XML for each document is named after the project response = requests.request( "GET", f"{artifact_url}/content", headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: try: from lxml import etree except ImportError: raise ImportError( "Could not import lxml python package. " "Please install it with `pip install lxml`." ) artifact_tree = etree.parse(io.BytesIO(response.content)) artifact_root = artifact_tree.getroot() ns = artifact_root.nsmap entries = artifact_root.xpath("//wp:Entry", namespaces=ns) for entry in entries: heading = entry.xpath("./wp:Heading", namespaces=ns)[0].text value = " ".join( entry.xpath("./wp:Value", namespaces=ns)[0].itertext() ).strip() metadata[heading] = value per_file_metadata[doc_id] = metadata else: raise Exception(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/docugami.html
b192778a4da7-6
per_file_metadata[doc_id] = metadata else: raise Exception( f"Failed to download {artifact_url}/content " + "(status: {response.status_code})" ) return per_file_metadata def _load_chunks_for_document( self, docset_id: str, document: Dict, doc_metadata: Optional[Dict] = None ) -> List[Document]: """Load chunks for a document.""" document_id = document["id"] url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml" response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: return self._parse_dgml(document, response.content, doc_metadata) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) [docs] def load(self) -> List[Document]: """Load documents.""" chunks: List[Document] = [] if self.access_token and self.docset_id: # remote mode _document_details = self._document_details_for_docset_id(self.docset_id) if self.document_ids: _document_details = [ d for d in _document_details if d["id"] in self.document_ids ] _project_details = self._project_details_for_docset_id(self.docset_id) combined_project_metadata = {} if _project_details: # if there are any projects for this docset, load project metadata for project in _project_details: metadata = self._metadata_for_project(project)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/docugami.html
b192778a4da7-7
for project in _project_details: metadata = self._metadata_for_project(project) combined_project_metadata.update(metadata) for doc in _document_details: doc_metadata = combined_project_metadata.get(doc["id"]) chunks += self._load_chunks_for_document( self.docset_id, doc, doc_metadata ) elif self.file_paths: # local mode (for integration testing, or pre-downloaded XML) for path in self.file_paths: path = Path(path) with open(path, "rb") as file: chunks += self._parse_dgml( { DOCUMENT_ID_KEY: path.name, DOCUMENT_NAME_KEY: path.name, }, file.read(), ) return chunks By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/docugami.html
04abc6aa5dd5-0
Source code for langchain.document_loaders.weather """Simple reader that reads weather data from OpenWeatherMap API""" from __future__ import annotations from datetime import datetime from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper [docs]class WeatherDataLoader(BaseLoader): """Weather Reader. Reads the forecast & current weather of any location using OpenWeatherMap's free API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free OpenWeatherMap API. """ def __init__( self, client: OpenWeatherMapAPIWrapper, places: Sequence[str], ) -> None: """Initialize with parameters.""" super().__init__() self.client = client self.places = places [docs] @classmethod def from_params( cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None ) -> WeatherDataLoader: client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) return cls(client, places) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily load weather data for the given locations.""" for place in self.places: metadata = {"queried_at": datetime.now()} content = self.client.run(place) yield Document(page_content=content, metadata=metadata) [docs] def load( self, ) -> List[Document]: """Load weather data for the given locations.""" return list(self.lazy_load()) By Harrison Chase
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/weather.html
04abc6aa5dd5-1
return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/weather.html
8658a13719a7-0
Source code for langchain.document_loaders.bibtex import logging import re from pathlib import Path from typing import Any, Iterator, List, Mapping, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.bibtex import BibtexparserWrapper logger = logging.getLogger(__name__) [docs]class BibtexLoader(BaseLoader): """Loads a bibtex file into a list of Documents. Each document represents one entry from the bibtex file. If a PDF file is present in the `file` bibtex field, the original PDF is loaded into the document text. If no such file entry is present, the `abstract` field is used instead. """ def __init__( self, file_path: str, *, parser: Optional[BibtexparserWrapper] = None, max_docs: Optional[int] = None, max_content_chars: Optional[int] = 4_000, load_extra_metadata: bool = False, file_pattern: str = r"[^:]+\.pdf", ): """Initialize the BibtexLoader. Args: file_path: Path to the bibtex file. max_docs: Max number of associated documents to load. Use -1 means no limit. """ self.file_path = file_path self.parser = parser or BibtexparserWrapper() self.max_docs = max_docs self.max_content_chars = max_content_chars self.load_extra_metadata = load_extra_metadata self.file_regex = re.compile(file_pattern) def _load_entry(self, entry: Mapping[str, Any]) -> Optional[Document]: import fitz
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bibtex.html
8658a13719a7-1
import fitz parent_dir = Path(self.file_path).parent # regex is useful for Zotero flavor bibtex files file_names = self.file_regex.findall(entry.get("file", "")) if not file_names: return None texts: List[str] = [] for file_name in file_names: try: with fitz.open(parent_dir / file_name) as f: texts.extend(page.get_text() for page in f) except FileNotFoundError as e: logger.debug(e) content = "\n".join(texts) or entry.get("abstract", "") if self.max_content_chars: content = content[: self.max_content_chars] metadata = self.parser.get_metadata(entry, load_extra=self.load_extra_metadata) return Document( page_content=content, metadata=metadata, ) [docs] def lazy_load(self) -> Iterator[Document]: """Load bibtex file using bibtexparser and get the article texts plus the article metadata. See https://bibtexparser.readthedocs.io/en/master/ Returns: a list of documents with the document.page_content in text format """ try: import fitz # noqa: F401 except ImportError: raise ImportError( "PyMuPDF package not found, please install it with " "`pip install pymupdf`" ) entries = self.parser.load_bibtex_entries(self.file_path) if self.max_docs: entries = entries[: self.max_docs] for entry in entries: doc = self._load_entry(entry) if doc: yield doc [docs] def load(self) -> List[Document]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bibtex.html
8658a13719a7-2
yield doc [docs] def load(self) -> List[Document]: """Load bibtex file documents from the given bibtex file path. See https://bibtexparser.readthedocs.io/en/master/ Args: file_path: the path to the bibtex file Returns: a list of documents with the document.page_content in text format """ return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bibtex.html
ee8f5972308d-0
Source code for langchain.document_loaders.chatgpt """Load conversations from ChatGPT data export""" import datetime import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(message: dict, title: str) -> str: if not message: return "" sender = message["author"]["role"] if message["author"] else "unknown" text = message["content"]["parts"][0] date = datetime.datetime.fromtimestamp(message["create_time"]).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{title} - {sender} on {date}: {text}\n\n" [docs]class ChatGPTLoader(BaseLoader): """Loader that loads conversations from exported ChatGPT data.""" def __init__(self, log_file: str, num_logs: int = -1): self.log_file = log_file self.num_logs = num_logs [docs] def load(self) -> List[Document]: with open(self.log_file, encoding="utf8") as f: data = json.load(f)[: self.num_logs] if self.num_logs else json.load(f) documents = [] for d in data: title = d["title"] messages = d["mapping"] text = "".join( [ concatenate_rows(messages[key]["message"], title) for idx, key in enumerate(messages) if not ( idx == 0 and messages[key]["message"]["author"]["role"] == "system" ) ] ) metadata = {"source": str(self.log_file)} documents.append(Document(page_content=text, metadata=metadata))
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/chatgpt.html
ee8f5972308d-1
documents.append(Document(page_content=text, metadata=metadata)) return documents By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/chatgpt.html
05de09cfcd15-0
Source code for langchain.document_loaders.figma """Loader that loads Figma files json dump.""" import json import urllib.request from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class FigmaFileLoader(BaseLoader): """Loader that loads Figma file json.""" def __init__(self, access_token: str, ids: str, key: str): """Initialize with access token, ids, and key.""" self.access_token = access_token self.ids = ids self.key = key def _construct_figma_api_url(self) -> str: api_url = "https://api.figma.com/v1/files/%s/nodes?ids=%s" % ( self.key, self.ids, ) return api_url def _get_figma_file(self) -> Any: """Get Figma file from Figma REST API.""" headers = {"X-Figma-Token": self.access_token} request = urllib.request.Request( self._construct_figma_api_url(), headers=headers ) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) return json_data [docs] def load(self) -> List[Document]: """Load file""" data = self._get_figma_file() text = stringify_dict(data) metadata = {"source": self._construct_figma_api_url()} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/figma.html
f12cd240fa70-0
Source code for langchain.document_loaders.url_selenium """Loader that uses Selenium to load a page, then uses unstructured to load the html. """ import logging from typing import TYPE_CHECKING, List, Literal, Optional, Union if TYPE_CHECKING: from selenium.webdriver import Chrome, Firefox from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class SeleniumURLLoader(BaseLoader): """Loader that uses Selenium and to load a page and unstructured to load the html. This is useful for loading pages that require javascript to render. Attributes: urls (List[str]): List of URLs to load. continue_on_failure (bool): If True, continue loading other URLs on failure. browser (str): The browser to use, either 'chrome' or 'firefox'. binary_location (Optional[str]): The location of the browser binary. executable_path (Optional[str]): The path to the browser executable. headless (bool): If True, the browser will run in headless mode. arguments [List[str]]: List of arguments to pass to the browser. """ def __init__( self, urls: List[str], continue_on_failure: bool = True, browser: Literal["chrome", "firefox"] = "chrome", binary_location: Optional[str] = None, executable_path: Optional[str] = None, headless: bool = True, arguments: List[str] = [], ): """Load a list of URLs using Selenium and unstructured.""" try: import selenium # noqa:F401 except ImportError: raise ImportError( "selenium package not found, please install it with "
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url_selenium.html
f12cd240fa70-1
raise ImportError( "selenium package not found, please install it with " "`pip install selenium`" ) try: import unstructured # noqa:F401 except ImportError: raise ImportError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self.urls = urls self.continue_on_failure = continue_on_failure self.browser = browser self.binary_location = binary_location self.executable_path = executable_path self.headless = headless self.arguments = arguments def _get_driver(self) -> Union["Chrome", "Firefox"]: """Create and return a WebDriver instance based on the specified browser. Raises: ValueError: If an invalid browser is specified. Returns: Union[Chrome, Firefox]: A WebDriver instance for the specified browser. """ if self.browser.lower() == "chrome": from selenium.webdriver import Chrome from selenium.webdriver.chrome.options import Options as ChromeOptions chrome_options = ChromeOptions() for arg in self.arguments: chrome_options.add_argument(arg) if self.headless: chrome_options.add_argument("--headless") chrome_options.add_argument("--no-sandbox") if self.binary_location is not None: chrome_options.binary_location = self.binary_location if self.executable_path is None: return Chrome(options=chrome_options) return Chrome(executable_path=self.executable_path, options=chrome_options) elif self.browser.lower() == "firefox": from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options as FirefoxOptions firefox_options = FirefoxOptions() for arg in self.arguments: firefox_options.add_argument(arg)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url_selenium.html
f12cd240fa70-2
for arg in self.arguments: firefox_options.add_argument(arg) if self.headless: firefox_options.add_argument("--headless") if self.binary_location is not None: firefox_options.binary_location = self.binary_location if self.executable_path is None: return Firefox(options=firefox_options) return Firefox( executable_path=self.executable_path, options=firefox_options ) else: raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.") [docs] def load(self) -> List[Document]: """Load the specified URLs using Selenium and create Document instances. Returns: List[Document]: A list of Document instances with loaded content. """ from unstructured.partition.html import partition_html docs: List[Document] = list() driver = self._get_driver() for url in self.urls: try: driver.get(url) page_content = driver.page_source elements = partition_html(text=page_content) text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e driver.quit() return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url_selenium.html
f4185e5de49b-0
Source code for langchain.document_loaders.html_bs """Loader that uses bs4 to load HTML files, enriching metadata with page title.""" import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class BSHTMLLoader(BaseLoader): """Loader that uses beautiful soup to parse HTML files.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object.""" try: import bs4 # noqa:F401 except ImportError: raise ValueError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator [docs] def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load HTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: soup = BeautifulSoup(f, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = {
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/html_bs.html
f4185e5de49b-1
title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/html_bs.html
183b8efc0563-0
Source code for langchain.document_loaders.bilibili import json import re import warnings from typing import List, Tuple import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class BiliBiliLoader(BaseLoader): """Loader that loads bilibili transcripts.""" def __init__(self, video_urls: List[str]): """Initialize with bilibili url.""" self.video_urls = video_urls [docs] def load(self) -> List[Document]: """Load from bilibili url.""" results = [] for url in self.video_urls: transcript, video_info = self._get_bilibili_subs_and_info(url) doc = Document(page_content=transcript, metadata=video_info) results.append(doc) return results def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]: try: from bilibili_api import sync, video except ImportError: raise ValueError( "requests package not found, please install it with " "`pip install bilibili-api-python`" ) bvid = re.search(r"BV\w+", url) if bvid is not None: v = video.Video(bvid=bvid.group()) else: aid = re.search(r"av[0-9]+", url) if aid is not None: try: v = video.Video(aid=int(aid.group()[2:])) except AttributeError: raise ValueError(f"{url} is not bilibili url.") else: raise ValueError(f"{url} is not bilibili url.") video_info = sync(v.get_info())
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bilibili.html
183b8efc0563-1
video_info = sync(v.get_info()) video_info.update({"url": url}) # Get subtitle url subtitle = video_info.pop("subtitle") sub_list = subtitle["list"] if sub_list: sub_url = sub_list[0]["subtitle_url"] result = requests.get(sub_url) raw_sub_titles = json.loads(result.content)["body"] raw_transcript = " ".join([c["content"] for c in raw_sub_titles]) raw_transcript_with_meta_info = ( f"Video Title: {video_info['title']}," f"description: {video_info['desc']}\n\n" f"Transcript: {raw_transcript}" ) return raw_transcript_with_meta_info, video_info else: raw_transcript = "" warnings.warn( f""" No subtitles found for video: {url}. Return Empty transcript. """ ) return raw_transcript, video_info By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bilibili.html
3afbe1deec7b-0
Source code for langchain.document_loaders.git import os from typing import Callable, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class GitLoader(BaseLoader): """Loads files from a Git repository into a list of documents. Repository can be local on disk available at `repo_path`, or remote at `clone_url` that will be cloned to `repo_path`. Currently supports only text files. Each document represents one file in the repository. The `path` points to the local Git repository, and the `branch` specifies the branch to load files from. By default, it loads from the `main` branch. """ def __init__( self, repo_path: str, clone_url: Optional[str] = None, branch: Optional[str] = "main", file_filter: Optional[Callable[[str], bool]] = None, ): self.repo_path = repo_path self.clone_url = clone_url self.branch = branch self.file_filter = file_filter [docs] def load(self) -> List[Document]: try: from git import Blob, Repo # type: ignore except ImportError as ex: raise ImportError( "Could not import git python package. " "Please install it with `pip install GitPython`." ) from ex if not os.path.exists(self.repo_path) and self.clone_url is None: raise ValueError(f"Path {self.repo_path} does not exist") elif self.clone_url: repo = Repo.clone_from(self.clone_url, self.repo_path) repo.git.checkout(self.branch) else: repo = Repo(self.repo_path)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/git.html
3afbe1deec7b-1
else: repo = Repo(self.repo_path) repo.git.checkout(self.branch) docs: List[Document] = [] for item in repo.tree().traverse(): if not isinstance(item, Blob): continue file_path = os.path.join(self.repo_path, item.path) ignored_files = repo.ignored([file_path]) # type: ignore if len(ignored_files): continue # uses filter to skip files if self.file_filter and not self.file_filter(file_path): continue rel_file_path = os.path.relpath(file_path, self.repo_path) try: with open(file_path, "rb") as f: content = f.read() file_type = os.path.splitext(item.name)[1] # loads only text files try: text_content = content.decode("utf-8") except UnicodeDecodeError: continue metadata = { "source": rel_file_path, "file_path": rel_file_path, "file_name": item.name, "file_type": file_type, } doc = Document(page_content=text_content, metadata=metadata) docs.append(doc) except Exception as e: print(f"Error reading file {file_path}: {e}") return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/git.html
6e2090860bf2-0
Source code for langchain.document_loaders.directory """Loading logic for loading documents from a directory.""" import concurrent import logging from pathlib import Path from typing import Any, List, Optional, Type, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.html_bs import BSHTMLLoader from langchain.document_loaders.text import TextLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader FILE_LOADER_TYPE = Union[ Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader] ] logger = logging.getLogger(__name__) def _is_visible(p: Path) -> bool: parts = p.parts for _p in parts: if _p.startswith("."): return False return True [docs]class DirectoryLoader(BaseLoader): """Loading logic for loading documents from a directory.""" def __init__( self, path: str, glob: str = "**/[!.]*", silent_errors: bool = False, load_hidden: bool = False, loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader, loader_kwargs: Union[dict, None] = None, recursive: bool = False, show_progress: bool = False, use_multithreading: bool = False, max_concurrency: int = 4, ): """Initialize with path to directory and how to glob over it.""" if loader_kwargs is None: loader_kwargs = {} self.path = path self.glob = glob self.load_hidden = load_hidden self.loader_cls = loader_cls self.loader_kwargs = loader_kwargs self.silent_errors = silent_errors
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/directory.html
6e2090860bf2-1
self.loader_kwargs = loader_kwargs self.silent_errors = silent_errors self.recursive = recursive self.show_progress = show_progress self.use_multithreading = use_multithreading self.max_concurrency = max_concurrency [docs] def load_file( self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any] ) -> None: if item.is_file(): if _is_visible(item.relative_to(path)) or self.load_hidden: try: sub_docs = self.loader_cls(str(item), **self.loader_kwargs).load() docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e finally: if pbar: pbar.update(1) [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.path) if not p.exists(): raise FileNotFoundError(f"Directory not found: '{self.path}'") if not p.is_dir(): raise ValueError(f"Expected directory, got file: '{self.path}'") docs: List[Document] = [] items = list(p.rglob(self.glob) if self.recursive else p.glob(self.glob)) pbar = None if self.show_progress: try: from tqdm import tqdm pbar = tqdm(total=len(items)) except ImportError as e: logger.warning( "To log the progress of DirectoryLoader you need to install tqdm, " "`pip install tqdm`" ) if self.silent_errors: logger.warning(e) else: raise e
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/directory.html
6e2090860bf2-2
logger.warning(e) else: raise e if self.use_multithreading: with concurrent.futures.ThreadPoolExecutor( max_workers=self.max_concurrency ) as executor: executor.map(lambda i: self.load_file(i, p, docs, pbar), items) else: for i in items: self.load_file(i, p, docs, pbar) if pbar: pbar.close() return docs # By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/directory.html
17cdee2265cf-0
Source code for langchain.document_loaders.arxiv from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivLoader(BaseLoader): """Loads a query result from arxiv.org into a list of Documents. Each document represents one Document. The loader converts the original PDF format into the text. """ def __init__( self, query: str, load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False, ): self.query = query self.load_max_docs = load_max_docs self.load_all_available_meta = load_all_available_meta [docs] def load(self) -> List[Document]: arxiv_client = ArxivAPIWrapper( load_max_docs=self.load_max_docs, load_all_available_meta=self.load_all_available_meta, ) docs = arxiv_client.load(self.query) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/arxiv.html
ee91b571049e-0
Source code for langchain.document_loaders.notiondb """Notion DB loader for langchain""" from typing import Any, Dict, List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader NOTION_BASE_URL = "https://api.notion.com/v1" DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query" PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}" BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children" [docs]class NotionDBLoader(BaseLoader): """Notion DB Loader. Reads content from pages within a Noton Database. Args: integration_token (str): Notion integration token. database_id (str): Notion database id. request_timeout_sec (int): Timeout for Notion requests in seconds. """ def __init__( self, integration_token: str, database_id: str, request_timeout_sec: Optional[int] = 10, ) -> None: """Initialize with parameters.""" if not integration_token: raise ValueError("integration_token must be provided") if not database_id: raise ValueError("database_id must be provided") self.token = integration_token self.database_id = database_id self.headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "Notion-Version": "2022-06-28", } self.request_timeout_sec = request_timeout_sec [docs] def load(self) -> List[Document]: """Load documents from the Notion database. Returns: List[Document]: List of documents. """
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notiondb.html
ee91b571049e-1
Returns: List[Document]: List of documents. """ page_ids = self._retrieve_page_ids() return list(self.load_page(page_id) for page_id in page_ids) def _retrieve_page_ids( self, query_dict: Dict[str, Any] = {"page_size": 100} ) -> List[str]: """Get all the pages from a Notion database.""" pages: List[Dict[str, Any]] = [] while True: data = self._request( DATABASE_URL.format(database_id=self.database_id), method="POST", query_dict=query_dict, ) pages.extend(data.get("results")) if not data.get("has_more"): break query_dict["start_cursor"] = data.get("next_cursor") page_ids = [page["id"] for page in pages] return page_ids [docs] def load_page(self, page_id: str) -> Document: """Read a page.""" data = self._request(PAGE_URL.format(page_id=page_id)) # load properties as metadata metadata: Dict[str, Any] = {} for prop_name, prop_data in data["properties"].items(): prop_type = prop_data["type"] if prop_type == "rich_text": value = ( prop_data["rich_text"][0]["plain_text"] if prop_data["rich_text"] else None ) elif prop_type == "title": value = ( prop_data["title"][0]["plain_text"] if prop_data["title"] else None ) elif prop_type == "multi_select": value = ( [item["name"] for item in prop_data["multi_select"]]
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notiondb.html
ee91b571049e-2
[item["name"] for item in prop_data["multi_select"]] if prop_data["multi_select"] else [] ) elif prop_type == "url": value = prop_data["url"] else: value = None metadata[prop_name.lower()] = value metadata["id"] = page_id return Document(page_content=self._load_blocks(page_id), metadata=metadata) def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str: """Read a block and its children.""" result_lines_arr: List[str] = [] cur_block_id: str = block_id while cur_block_id: data = self._request(BLOCK_URL.format(block_id=cur_block_id)) for result in data["results"]: result_obj = result[result["type"]] if "rich_text" not in result_obj: continue cur_result_text_arr: List[str] = [] for rich_text in result_obj["rich_text"]: if "text" in rich_text: cur_result_text_arr.append( "\t" * num_tabs + rich_text["text"]["content"] ) if result["has_children"]: children_text = self._load_blocks( result["id"], num_tabs=num_tabs + 1 ) cur_result_text_arr.append(children_text) result_lines_arr.append("\n".join(cur_result_text_arr)) cur_block_id = data.get("next_cursor") return "\n".join(result_lines_arr) def _request( self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {} ) -> Any: res = requests.request( method,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notiondb.html
ee91b571049e-3
) -> Any: res = requests.request( method, url, headers=self.headers, json=query_dict, timeout=self.request_timeout_sec, ) res.raise_for_status() return res.json() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notiondb.html
0c72577a0214-0
Source code for langchain.document_loaders.wikipedia from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.wikipedia import WikipediaAPIWrapper [docs]class WikipediaLoader(BaseLoader): """Loads a query result from www.wikipedia.org into a list of Documents. The hard limit on the number of downloaded Documents is 300 for now. Each wiki page represents one Document. """ def __init__( self, query: str, lang: str = "en", load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False, ): self.query = query self.lang = lang self.load_max_docs = load_max_docs self.load_all_available_meta = load_all_available_meta [docs] def load(self) -> List[Document]: client = WikipediaAPIWrapper( lang=self.lang, top_k_results=self.load_max_docs, load_all_available_meta=self.load_all_available_meta, ) docs = client.load(self.query) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/wikipedia.html
e07d49cbeca3-0
Source code for langchain.document_loaders.imsdb """Loader that loads IMSDb.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class IMSDbLoader(WebBaseLoader): """Loader that loads IMSDb webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() text = soup.select_one("td[class='scrtext']").text metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/imsdb.html
38a99617d4ec-0
Source code for langchain.document_loaders.gutenberg """Loader that loads .txt web files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class GutenbergLoader(BaseLoader): """Loader that uses urllib to load .txt web files.""" def __init__(self, file_path: str): """Initialize with file path.""" if not file_path.startswith("https://www.gutenberg.org"): raise ValueError("file path must start with 'https://www.gutenberg.org'") if not file_path.endswith(".txt"): raise ValueError("file path must end with '.txt'") self.file_path = file_path [docs] def load(self) -> List[Document]: """Load file.""" from urllib.request import urlopen elements = urlopen(self.file_path) text = "\n\n".join([str(el.decode("utf-8-sig")) for el in elements]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gutenberg.html
8a0d2b241cd9-0
Source code for langchain.document_loaders.word_document """Loader that loads word documents.""" import os import tempfile from abc import ABC from typing import List from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class Docx2txtLoader(BaseLoader, ABC): """Loads a DOCX with docx2txt and chunks at character level. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) # If the file is a web path, download it to a temporary file, and use that if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path self.temp_file = tempfile.NamedTemporaryFile() self.temp_file.write(r.content) self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None: if hasattr(self, "temp_file"): self.temp_file.close()
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/word_document.html
8a0d2b241cd9-1
if hasattr(self, "temp_file"): self.temp_file.close() [docs] def load(self) -> List[Document]: """Load given path as single page.""" import docx2txt return [ Document( page_content=docx2txt.process(self.file_path), metadata={"source": self.file_path}, ) ] @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) [docs]class UnstructuredWordDocumentLoader(UnstructuredFileLoader): """Loader that uses unstructured to load word documents.""" def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.file_utils.filetype import FileType, detect_filetype unstructured_version = tuple( [int(x) for x in __unstructured_version__.split(".")] ) # NOTE(MthwRobinson) - magic will raise an import error if the libmagic # system dependency isn't installed. If it's not installed, we'll just # check the file extension try: import magic # noqa: F401 is_doc = detect_filetype(self.file_path) == FileType.DOC except ImportError: _, extension = os.path.splitext(str(self.file_path)) is_doc = extension == ".doc" if is_doc and unstructured_version < (0, 4, 11): raise ValueError( f"You are on unstructured version {__unstructured_version__}. "
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/word_document.html
8a0d2b241cd9-2
f"You are on unstructured version {__unstructured_version__}. " "Partitioning .doc files is only supported in unstructured>=0.4.11. " "Please upgrade the unstructured package and try again." ) if is_doc: from unstructured.partition.doc import partition_doc return partition_doc(filename=self.file_path, **self.unstructured_kwargs) else: from unstructured.partition.docx import partition_docx return partition_docx(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/word_document.html
945837cd4144-0
Source code for langchain.memory.summary_buffer from typing import Any, Dict, List from pydantic import root_validator from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.summary import SummarizerMixin from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin): """Buffer with summarizer for storing conversation memory.""" max_token_limit: int = 2000 moving_summary_buffer: str = "" memory_key: str = "history" @property def buffer(self) -> List[BaseMessage]: return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" buffer = self.buffer if self.moving_summary_buffer != "": first_messages: List[BaseMessage] = [ self.summary_message_cls(content=self.moving_summary_buffer) ] buffer = first_messages + buffer if self.return_messages: final_buffer: Any = buffer else: final_buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix ) return {self.memory_key: final_buffer} @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables expected_keys = {"summary", "new_lines"} if expected_keys != set(prompt_variables): raise ValueError(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/summary_buffer.html
945837cd4144-1
if expected_keys != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but it should have {expected_keys}." ) return values [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.prune() [docs] def prune(self) -> None: """Prune buffer if it exceeds max token limit""" buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) self.moving_summary_buffer = self.predict_new_summary( pruned_memory, self.moving_summary_buffer ) [docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.moving_summary_buffer = "" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/summary_buffer.html
b86c4976d853-0
Source code for langchain.memory.kg from typing import Any, Dict, List, Type, Union from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.graphs import NetworkxEntityGraph from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.prompts.base import BasePromptTemplate from langchain.schema import ( BaseMessage, SystemMessage, get_buffer_string, ) [docs]class ConversationKGMemory(BaseChatMemory): """Knowledge graph memory for storing conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = []
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/kg.html
b86c4976d853-1
entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key [docs] def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/kg.html
b86c4976d853-2
human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) [docs] def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) [docs] def clear(self) -> None: """Clear memory contents."""
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/kg.html
b86c4976d853-3
[docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/kg.html
2890634ff7fb-0
Source code for langchain.memory.token_buffer from typing import Any, Dict, List from langchain.base_language import BaseLanguageModel from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationTokenBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel memory_key: str = "history" max_token_limit: int = 2000 @property def buffer(self) -> List[BaseMessage]: """String buffer of memory.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" buffer: Any = self.buffer if self.return_messages: final_buffer: Any = buffer else: final_buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) return {self.memory_key: final_buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer. Pruned.""" super().save_context(inputs, outputs) # Prune buffer if it exceeds max token limit buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/token_buffer.html
2890634ff7fb-1
if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/token_buffer.html
f754198f078c-0
Source code for langchain.memory.entity import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMessage, get_buffer_string logger = logging.getLogger(__name__) class BaseEntityStore(BaseModel, ABC): @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass [docs]class InMemoryEntityStore(BaseEntityStore): """Basic in-memory entity store.""" store: Dict[str, Optional[str]] = {} [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/entity.html
f754198f078c-1
return self.store.get(key, default) [docs] def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value [docs] def delete(self, key: str) -> None: del self.store[key] [docs] def exists(self, key: str) -> bool: return key in self.store [docs] def clear(self) -> None: return self.store.clear() [docs]class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = redis.Redis.from_url(url=url, decode_responses=True)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/entity.html
f754198f078c-2
self.redis_client = redis.Redis.from_url(url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res [docs] def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) [docs] def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") [docs] def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 [docs] def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/entity.html
f754198f078c-3
iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) [docs]class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/entity.html
f754198f078c-4
query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default [docs] def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) [docs] def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) [docs] def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None [docs] def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) [docs]class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer to memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/entity.html