id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
59
127
5f2b9a179cc1-3
payload["mime_type"] = blob.mimetype return payload def _handle_request( self, payload: EmbaasDocumentExtractionPayload ) -> List[Document]: """Sends a request to the embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } response = requests.post(self.api_url, headers=headers, json=payload) response.raise_for_status() parsed_response = response.json() return EmbaasBlobLoader._api_response_to_documents( chunks=parsed_response["data"]["chunks"] ) def _get_documents(self, blob: Blob) -> Iterator[Document]: """Get the documents from the blob.""" payload = self._generate_payload(blob=blob) try: documents = self._handle_request(payload=payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError( f"Error raised by embaas document text extraction API: {e}" ) parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( f"Validation Error raised by embaas document text extraction API:" f" {parsed_response['message']}" ) raise yield from documents [docs]class EmbaasLoader(BaseEmbaasLoader, BaseLoader): """Wrapper around embaas's document loader service. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/embaas.html
5f2b9a179cc1-4
it as a named parameter to the constructor. Example: .. code-block:: python # Default parsing from langchain.document_loaders.embaas import EmbaasLoader loader = EmbaasLoader(file_path="example.mp3") documents = loader.load() # Custom api parameters (create embeddings automatically) from langchain.document_loaders.embaas import EmbaasBlobLoader loader = EmbaasBlobLoader( file_path="example.pdf", params={ "should_embed": True, "model": "e5-large-v2", "chunk_size": 256, "chunk_splitter": "CharacterTextSplitter" } ) documents = loader.load() """ file_path: str """The path to the file to load.""" blob_loader: Optional[EmbaasBlobLoader] """The blob loader to use. If not provided, a default one will be created.""" @validator("blob_loader", always=True) def validate_blob_loader( cls, v: EmbaasBlobLoader, values: Dict ) -> EmbaasBlobLoader: return v or EmbaasBlobLoader( embaas_api_key=values["embaas_api_key"], api_url=values["api_url"], params=values["params"], ) [docs] def lazy_load(self) -> Iterator[Document]: """Load the documents from the file path lazily.""" blob = Blob.from_path(path=self.file_path) assert self.blob_loader is not None # Should never be None, but mypy doesn't know that. yield from self.blob_loader.lazy_parse(blob=blob)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/embaas.html
5f2b9a179cc1-5
yield from self.blob_loader.lazy_parse(blob=blob) [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def load_and_split( self, text_splitter: Optional[TextSplitter] = None ) -> List[Document]: if self.params.get("should_embed", False): warnings.warn( "Embeddings are not supported with load_and_split." " Use the API splitter to properly generate embeddings." " For more information see embaas.io docs." ) return super().load_and_split(text_splitter=text_splitter) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/embaas.html
5365d60eefb4-0
Source code for langchain.document_loaders.rtf """Loader that loads rich text files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredRTFLoader(UnstructuredFileLoader): """Loader that uses unstructured to load rtf files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): min_unstructured_version = "0.5.12" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( "Partitioning rtf files is only supported in " f"unstructured>={min_unstructured_version}." ) super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.rtf import partition_rtf return partition_rtf(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/rtf.html
80b08bb06794-0
Source code for langchain.document_loaders.mediawikidump """Load Data from a MediaWiki dump xml.""" from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class MWDumpLoader(BaseLoader): """ Load MediaWiki dump from XML file Example: .. code-block:: python from langchain.document_loaders import MWDumpLoader loader = MWDumpLoader( file_path="myWiki.xml", encoding="utf8" ) docs = loader.load() from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=0 ) texts = text_splitter.split_documents(docs) :param file_path: XML local file path :type file_path: str :param encoding: Charset encoding, defaults to "utf8" :type encoding: str, optional """ def __init__(self, file_path: str, encoding: Optional[str] = "utf8"): """Initialize with file path.""" self.file_path = file_path self.encoding = encoding [docs] def load(self) -> List[Document]: """Load from file path.""" import mwparserfromhell import mwxml dump = mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding)) docs = [] for page in dump.pages: for revision in page: code = mwparserfromhell.parse(revision.text) text = code.strip_code( normalize=True, collapse=True, keep_template_params=False ) metadata = {"source": page.title}
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/mediawikidump.html
80b08bb06794-1
) metadata = {"source": page.title} docs.append(Document(page_content=text, metadata=metadata)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/mediawikidump.html
3d12c40ba485-0
Source code for langchain.document_loaders.ifixit """Loader that loads iFixit data.""" from typing import List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.web_base import WebBaseLoader IFIXIT_BASE_URL = "https://www.ifixit.com/api/2.0" [docs]class IFixitLoader(BaseLoader): """Load iFixit repair guides, device wikis and answers. iFixit is the largest, open repair community on the web. The site contains nearly 100k repair manuals, 200k Questions & Answers on 42k devices, and all the data is licensed under CC-BY. This loader will allow you to download the text of a repair guide, text of Q&A's and wikis from devices on iFixit using their open APIs and web scraping. """ def __init__(self, web_path: str): """Initialize with web path.""" if not web_path.startswith("https://www.ifixit.com"): raise ValueError("web path must start with 'https://www.ifixit.com'") path = web_path.replace("https://www.ifixit.com", "") allowed_paths = ["/Device", "/Guide", "/Answers", "/Teardown"] """ TODO: Add /Wiki """ if not any(path.startswith(allowed_path) for allowed_path in allowed_paths): raise ValueError( "web path must start with /Device, /Guide, /Teardown or /Answers" ) pieces = [x for x in path.split("/") if x] """Teardowns are just guides by a different name"""
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/ifixit.html
3d12c40ba485-1
"""Teardowns are just guides by a different name""" self.page_type = pieces[0] if pieces[0] != "Teardown" else "Guide" if self.page_type == "Guide" or self.page_type == "Answers": self.id = pieces[2] else: self.id = pieces[1] self.web_path = web_path [docs] def load(self) -> List[Document]: if self.page_type == "Device": return self.load_device() elif self.page_type == "Guide" or self.page_type == "Teardown": return self.load_guide() elif self.page_type == "Answers": return self.load_questions_and_answers() else: raise ValueError("Unknown page type: " + self.page_type) [docs] @staticmethod def load_suggestions(query: str = "", doc_type: str = "all") -> List[Document]: res = requests.get( IFIXIT_BASE_URL + "/suggest/" + query + "?doctypes=" + doc_type ) if res.status_code != 200: raise ValueError( 'Could not load suggestions for "' + query + '"\n' + res.json() ) data = res.json() results = data["results"] output = [] for result in results: try: loader = IFixitLoader(result["url"]) if loader.page_type == "Device": output += loader.load_device(include_guides=False) else: output += loader.load() except ValueError: continue return output [docs] def load_questions_and_answers( self, url_override: Optional[str] = None
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/ifixit.html
3d12c40ba485-2
self, url_override: Optional[str] = None ) -> List[Document]: loader = WebBaseLoader(self.web_path if url_override is None else url_override) soup = loader.scrape() output = [] title = soup.find("h1", "post-title").text output.append("# " + title) output.append(soup.select_one(".post-content .post-text").text.strip()) answersHeader = soup.find("div", "post-answers-header") if answersHeader: output.append("\n## " + answersHeader.text.strip()) for answer in soup.select(".js-answers-list .post.post-answer"): if answer.has_attr("itemprop") and "acceptedAnswer" in answer["itemprop"]: output.append("\n### Accepted Answer") elif "post-helpful" in answer["class"]: output.append("\n### Most Helpful Answer") else: output.append("\n### Other Answer") output += [ a.text.strip() for a in answer.select(".post-content .post-text") ] output.append("\n") text = "\n".join(output).strip() metadata = {"source": self.web_path, "title": title} return [Document(page_content=text, metadata=metadata)] [docs] def load_device( self, url_override: Optional[str] = None, include_guides: bool = True ) -> List[Document]: documents = [] if url_override is None: url = IFIXIT_BASE_URL + "/wikis/CATEGORY/" + self.id else: url = url_override res = requests.get(url) data = res.json() text = "\n".join( [
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/ifixit.html
3d12c40ba485-3
data = res.json() text = "\n".join( [ data[key] for key in ["title", "description", "contents_raw"] if key in data ] ).strip() metadata = {"source": self.web_path, "title": data["title"]} documents.append(Document(page_content=text, metadata=metadata)) if include_guides: """Load and return documents for each guide linked to from the device""" guide_urls = [guide["url"] for guide in data["guides"]] for guide_url in guide_urls: documents.append(IFixitLoader(guide_url).load()[0]) return documents [docs] def load_guide(self, url_override: Optional[str] = None) -> List[Document]: if url_override is None: url = IFIXIT_BASE_URL + "/guides/" + self.id else: url = url_override res = requests.get(url) if res.status_code != 200: raise ValueError( "Could not load guide: " + self.web_path + "\n" + res.json() ) data = res.json() doc_parts = ["# " + data["title"], data["introduction_raw"]] doc_parts.append("\n\n###Tools Required:") if len(data["tools"]) == 0: doc_parts.append("\n - None") else: for tool in data["tools"]: doc_parts.append("\n - " + tool["text"]) doc_parts.append("\n\n###Parts Required:") if len(data["parts"]) == 0: doc_parts.append("\n - None") else: for part in data["parts"]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/ifixit.html
3d12c40ba485-4
else: for part in data["parts"]: doc_parts.append("\n - " + part["text"]) for row in data["steps"]: doc_parts.append( "\n\n## " + ( row["title"] if row["title"] != "" else "Step {}".format(row["orderby"]) ) ) for line in row["lines"]: doc_parts.append(line["text_raw"]) doc_parts.append(data["conclusion_raw"]) text = "\n".join(doc_parts) metadata = {"source": self.web_path, "title": data["title"]} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/ifixit.html
d404d8f30d08-0
Source code for langchain.document_loaders.snowflake_loader from __future__ import annotations from typing import Any, Dict, Iterator, List, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SnowflakeLoader(BaseLoader): """Loads a query result from Snowflake into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, user: str, password: str, account: str, warehouse: str, role: str, database: str, schema: str, parameters: Optional[Dict[str, Any]] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, ): """Initialize Snowflake document loader. Args: query: The query to run in Snowflake. user: Snowflake user. password: Snowflake password. account: Snowflake account. warehouse: Snowflake warehouse. role: Snowflake role. database: Snowflake database schema: Snowflake schema page_content_columns: Optional. Columns written to Document `page_content`. metadata_columns: Optional. Columns written to Document `metadata`. """ self.query = query self.user = user self.password = password self.account = account self.warehouse = warehouse
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/snowflake_loader.html
d404d8f30d08-1
self.password = password self.account = account self.warehouse = warehouse self.role = role self.database = database self.schema = schema self.parameters = parameters self.page_content_columns = ( page_content_columns if page_content_columns is not None else ["*"] ) self.metadata_columns = metadata_columns if metadata_columns is not None else [] def _execute_query(self) -> List[Dict[str, Any]]: try: import snowflake.connector except ImportError as ex: raise ValueError( "Could not import snowflake-connector-python package. " "Please install it with `pip install snowflake-connector-python`." ) from ex conn = snowflake.connector.connect( user=self.user, password=self.password, account=self.account, warehouse=self.warehouse, role=self.role, database=self.database, schema=self.schema, parameters=self.parameters, ) try: cur = conn.cursor() cur.execute("USE DATABASE " + self.database) cur.execute("USE SCHEMA " + self.schema) cur.execute(self.query, self.parameters) query_result = cur.fetchall() column_names = [column[0] for column in cur.description] query_result = [dict(zip(column_names, row)) for row in query_result] except Exception as e: print(f"An error occurred: {e}") query_result = [] finally: cur.close() return query_result def _get_columns( self, query_result: List[Dict[str, Any]] ) -> Tuple[List[str], List[str]]: page_content_columns = (
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/snowflake_loader.html
d404d8f30d08-2
) -> Tuple[List[str], List[str]]: page_content_columns = ( self.page_content_columns if self.page_content_columns else [] ) metadata_columns = self.metadata_columns if self.metadata_columns else [] if page_content_columns is None and query_result: page_content_columns = list(query_result[0].keys()) if metadata_columns is None: metadata_columns = [] return page_content_columns or [], metadata_columns [docs] def lazy_load(self) -> Iterator[Document]: query_result = self._execute_query() if isinstance(query_result, Exception): print(f"An error occurred during the query: {query_result}") return [] page_content_columns, metadata_columns = self._get_columns(query_result) if "*" in page_content_columns: page_content_columns = list(query_result[0].keys()) for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns ) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) yield doc [docs] def load(self) -> List[Document]: """Load data into document objects.""" return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/snowflake_loader.html
f796cf392639-0
Source code for langchain.document_loaders.s3_file """Loading logic for loading documents from an s3 file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class S3FileLoader(BaseLoader): """Loading logic for loading documents from s3.""" def __init__(self, bucket: str, key: str): """Initialize with bucket and key name.""" self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import `boto3` python package. " "Please install it with `pip install boto3`." ) s3 = boto3.client("s3") with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) s3.download_file(self.bucket, self.key, file_path) loader = UnstructuredFileLoader(file_path) return loader.load() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/s3_file.html
6bd0a0f1554b-0
Source code for langchain.document_loaders.bigquery from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: from google.auth.credentials import Credentials [docs]class BigQueryLoader(BaseLoader): """Loads a query result from BigQuery into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, project: Optional[str] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, credentials: Optional[Credentials] = None, ): """Initialize BigQuery document loader. Args: query: The query to run in BigQuery. project: Optional. The project to run the query in. page_content_columns: Optional. The columns to write into the `page_content` of the document. metadata_columns: Optional. The columns to write into the `metadata` of the document. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine (`google.auth.compute_engine.Credentials`) or Service Account (`google.oauth2.service_account.Credentials`) credentials directly. """ self.query = query self.project = project self.page_content_columns = page_content_columns
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bigquery.html
6bd0a0f1554b-1
self.project = project self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns self.credentials = credentials [docs] def load(self) -> List[Document]: try: from google.cloud import bigquery except ImportError as ex: raise ValueError( "Could not import google-cloud-bigquery python package. " "Please install it with `pip install google-cloud-bigquery`." ) from ex bq_client = bigquery.Client(credentials=self.credentials, project=self.project) query_result = bq_client.query(self.query).result() docs: List[Document] = [] page_content_columns = self.page_content_columns metadata_columns = self.metadata_columns if page_content_columns is None: page_content_columns = [column.name for column in query_result.schema] if metadata_columns is None: metadata_columns = [] for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns ) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bigquery.html
16c84175555c-0
Source code for langchain.document_loaders.hugging_face_dataset """Loader that loads HuggingFace datasets.""" from typing import Iterator, List, Mapping, Optional, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class HuggingFaceDatasetLoader(BaseLoader): """Loading logic for loading documents from the Hugging Face Hub.""" def __init__( self, path: str, page_content_column: str = "text", name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[ Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]] ] = None, cache_dir: Optional[str] = None, keep_in_memory: Optional[bool] = None, save_infos: bool = False, use_auth_token: Optional[Union[bool, str]] = None, num_proc: Optional[int] = None, ): """Initialize the HuggingFaceDatasetLoader. Args: path: Path or name of the dataset. page_content_column: Page content column name. name: Name of the dataset configuration. data_dir: Data directory of the dataset configuration. data_files: Path(s) to source data file(s). cache_dir: Directory to read/write data. keep_in_memory: Whether to copy the dataset in-memory. save_infos: Save the dataset information (checksums/size/splits/...). use_auth_token: Bearer token for remote files on the Datasets Hub. num_proc: Number of processes. """ self.path = path self.page_content_column = page_content_column self.name = name
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/hugging_face_dataset.html
16c84175555c-1
self.page_content_column = page_content_column self.name = name self.data_dir = data_dir self.data_files = data_files self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.save_infos = save_infos self.use_auth_token = use_auth_token self.num_proc = num_proc [docs] def lazy_load( self, ) -> Iterator[Document]: """Load documents lazily.""" try: from datasets import load_dataset except ImportError: raise ImportError( "Could not import datasets python package. " "Please install it with `pip install datasets`." ) dataset = load_dataset( path=self.path, name=self.name, data_dir=self.data_dir, data_files=self.data_files, cache_dir=self.cache_dir, keep_in_memory=self.keep_in_memory, save_infos=self.save_infos, use_auth_token=self.use_auth_token, num_proc=self.num_proc, ) yield from ( Document( page_content=row.pop(self.page_content_column), metadata=row, ) for key in dataset.keys() for row in dataset[key] ) [docs] def load(self) -> List[Document]: """Load documents.""" return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/hugging_face_dataset.html
f3f52799c0fc-0
Source code for langchain.document_loaders.slack_directory """Loader for documents from a Slack export.""" import json import zipfile from pathlib import Path from typing import Dict, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SlackDirectoryLoader(BaseLoader): """Loader for loading documents from a Slack directory dump.""" def __init__(self, zip_path: str, workspace_url: Optional[str] = None): """Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None. """ self.zip_path = Path(zip_path) self.workspace_url = workspace_url self.channel_id_map = self._get_channel_id_map(self.zip_path) @staticmethod def _get_channel_id_map(zip_path: Path) -> Dict[str, str]: """Get a dictionary mapping channel names to their respective IDs.""" with zipfile.ZipFile(zip_path, "r") as zip_file: try: with zip_file.open("channels.json", "r") as f: channels = json.load(f) return {channel["name"]: channel["id"] for channel in channels} except KeyError: return {} [docs] def load(self) -> List[Document]: """Load and return documents from the Slack directory dump.""" docs = [] with zipfile.ZipFile(self.zip_path, "r") as zip_file: for channel_path in zip_file.namelist(): channel_name = Path(channel_path).parent.name if not channel_name: continue
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/slack_directory.html
f3f52799c0fc-1
if not channel_name: continue if channel_path.endswith(".json"): messages = self._read_json(zip_file, channel_path) for message in messages: document = self._convert_message_to_document( message, channel_name ) docs.append(document) return docs def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" with zip_file.open(file_path, "r") as f: data = json.load(f) return data def _convert_message_to_document( self, message: dict, channel_name: str ) -> Document: """ Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message. """ text = message.get("text", "") metadata = self._get_message_metadata(message, channel_name) return Document( page_content=text, metadata=metadata, ) def _get_message_metadata(self, message: dict, channel_name: str) -> dict: """Create and return metadata for a given message and channel.""" timestamp = message.get("ts", "") user = message.get("user", "") source = self._get_message_source(channel_name, user, timestamp) return { "source": source, "channel": channel_name, "timestamp": timestamp, "user": user, }
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/slack_directory.html
f3f52799c0fc-2
"timestamp": timestamp, "user": user, } def _get_message_source(self, channel_name: str, user: str, timestamp: str) -> str: """ Get the message source as a string. Args: channel_name (str): The name of the channel the message belongs to. user (str): The user ID who sent the message. timestamp (str): The timestamp of the message. Returns: str: The message source. """ if self.workspace_url: channel_id = self.channel_id_map.get(channel_name, "") return ( f"{self.workspace_url}/archives/{channel_id}" + f"/p{timestamp.replace('.', '')}" ) else: return f"{channel_name} - {user} - {timestamp}" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/slack_directory.html
dfe871275ea7-0
Source code for langchain.document_loaders.discord """Load from Discord chat dump""" from __future__ import annotations from typing import TYPE_CHECKING, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import pandas as pd [docs]class DiscordChatLoader(BaseLoader): """Load Discord chat logs.""" def __init__(self, chat_log: pd.DataFrame, user_id_col: str = "ID"): """Initialize with a Pandas DataFrame containing chat logs.""" if not isinstance(chat_log, pd.DataFrame): raise ValueError( f"Expected chat_log to be a pd.DataFrame, got {type(chat_log)}" ) self.chat_log = chat_log self.user_id_col = user_id_col [docs] def load(self) -> List[Document]: """Load all chat messages.""" result = [] for _, row in self.chat_log.iterrows(): user_id = row[self.user_id_col] metadata = row.to_dict() metadata.pop(self.user_id_col) result.append(Document(page_content=user_id, metadata=metadata)) return result By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/discord.html
1020feaa15d3-0
Source code for langchain.document_loaders.evernote """Load documents from Evernote. https://gist.github.com/foxmask/7b29c43a161e001ff04afdb2f181e31c """ import hashlib import logging from base64 import b64decode from time import strptime from typing import Any, Dict, Iterator, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class EverNoteLoader(BaseLoader): """EverNote Loader. Loads an EverNote notebook export file e.g. my_notebook.enex into Documents. Instructions on producing this file can be found at https://help.evernote.com/hc/en-us/articles/209005557-Export-notes-and-notebooks-as-ENEX-or-HTML Currently only the plain text in the note is extracted and stored as the contents of the Document, any non content metadata (e.g. 'author', 'created', 'updated' etc. but not 'content-raw' or 'resource') tags on the note will be extracted and stored as metadata on the Document. Args: file_path (str): The path to the notebook export with a .enex extension load_single_document (bool): Whether or not to concatenate the content of all notes into a single long Document. If this is set to True (default) then the only metadata on the document will be the 'source' which contains the file name of the export. """ # noqa: E501 def __init__(self, file_path: str, load_single_document: bool = True): """Initialize with file path.""" self.file_path = file_path self.load_single_document = load_single_document
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/evernote.html
1020feaa15d3-1
self.file_path = file_path self.load_single_document = load_single_document [docs] def load(self) -> List[Document]: """Load documents from EverNote export file.""" documents = [ Document( page_content=note["content"], metadata={ **{ key: value for key, value in note.items() if key not in ["content", "content-raw", "resource"] }, **{"source": self.file_path}, }, ) for note in self._parse_note_xml(self.file_path) if note.get("content") is not None ] if not self.load_single_document: return documents return [ Document( page_content="".join([document.page_content for document in documents]), metadata={"source": self.file_path}, ) ] @staticmethod def _parse_content(content: str) -> str: try: import html2text return html2text.html2text(content).strip() except ImportError as e: logging.error( "Could not import `html2text`. Although it is not a required package " "to use Langchain, using the EverNote loader requires `html2text`. " "Please install `html2text` via `pip install html2text` and try again." ) raise e @staticmethod def _parse_resource(resource: list) -> dict: rsc_dict: Dict[str, Any] = {} for elem in resource: if elem.tag == "data": # Sometimes elem.text is None rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b""
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/evernote.html
1020feaa15d3-2
rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest() else: rsc_dict[elem.tag] = elem.text return rsc_dict @staticmethod def _parse_note(note: List, prefix: Optional[str] = None) -> dict: note_dict: Dict[str, Any] = {} resources = [] def add_prefix(element_tag: str) -> str: if prefix is None: return element_tag return f"{prefix}.{element_tag}" for elem in note: if elem.tag == "content": note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text) # A copy of original content note_dict["content-raw"] = elem.text elif elem.tag == "resource": resources.append(EverNoteLoader._parse_resource(elem)) elif elem.tag == "created" or elem.tag == "updated": note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ") elif elem.tag == "note-attributes": additional_attributes = EverNoteLoader._parse_note( elem, elem.tag ) # Recursively enter the note-attributes tag note_dict.update(additional_attributes) else: note_dict[elem.tag] = elem.text if len(resources) > 0: note_dict["resource"] = resources return {add_prefix(key): value for key, value in note_dict.items()} @staticmethod def _parse_note_xml(xml_file: str) -> Iterator[Dict[str, Any]]: """Parse Evernote xml.""" # Without huge_tree set to True, parser may complain about huge text node
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/evernote.html
1020feaa15d3-3
# Without huge_tree set to True, parser may complain about huge text node # Try to recover, because there may be " ", which will cause # "XMLSyntaxError: Entity 'nbsp' not defined" try: from lxml import etree except ImportError as e: logging.error( "Could not import `lxml`. Although it is not a required package to use " "Langchain, using the EverNote loader requires `lxml`. Please install " "`lxml` via `pip install lxml` and try again." ) raise e context = etree.iterparse( xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True ) for action, elem in context: if elem.tag == "note": yield EverNoteLoader._parse_note(elem) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/evernote.html
1e77fd84db0b-0
Source code for langchain.document_loaders.spreedly """Loader that fetches data from Spreedly API.""" import json import urllib.request from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict SPREEDLY_ENDPOINTS = { "gateways_options": "https://core.spreedly.com/v1/gateways_options.json", "gateways": "https://core.spreedly.com/v1/gateways.json", "receivers_options": "https://core.spreedly.com/v1/receivers_options.json", "receivers": "https://core.spreedly.com/v1/receivers.json", "payment_methods": "https://core.spreedly.com/v1/payment_methods.json", "certificates": "https://core.spreedly.com/v1/certificates.json", "transactions": "https://core.spreedly.com/v1/transactions.json", "environments": "https://core.spreedly.com/v1/environments.json", } [docs]class SpreedlyLoader(BaseLoader): def __init__(self, access_token: str, resource: str) -> None: self.access_token = access_token self.resource = resource self.headers = { "Authorization": f"Bearer {self.access_token}", "Accept": "application/json", } def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url}
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/spreedly.html
1e77fd84db0b-1
text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = SPREEDLY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/spreedly.html
514554c46dda-0
Source code for langchain.document_loaders.json_loader """Loader that loads data from JSON.""" import json from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class JSONLoader(BaseLoader): """Loads a JSON file and references a jq schema provided to load the text into documents. Example: [{"text": ...}, {"text": ...}, {"text": ...}] -> schema = .[].text {"key": [{"text": ...}, {"text": ...}, {"text": ...}]} -> schema = .key[].text ["", "", ""] -> schema = .[] """ def __init__( self, file_path: Union[str, Path], jq_schema: str, content_key: Optional[str] = None, metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None, text_content: bool = True, ): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON file. jq_schema (str): The jq schema to use to extract the data or text from the JSON. content_key (str): The key to use to extract the content from the JSON if the jq_schema results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicates whether the content is in string format, default to True """ try: import jq # noqa:F401
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/json_loader.html
514554c46dda-1
""" try: import jq # noqa:F401 except ImportError: raise ImportError( "jq package not found, please install it with `pip install jq`" ) self.file_path = Path(file_path).resolve() self._jq_schema = jq.compile(jq_schema) self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content [docs] def load(self) -> List[Document]: """Load and return documents from the JSON file.""" data = self._jq_schema.input(json.loads(self.file_path.read_text())) # Perform some validation # This is not a perfect validation, but it should catch most cases # and prevent the user from getting a cryptic error later on. if self._content_key is not None: self._validate_content_key(data) docs = [] for i, sample in enumerate(data, 1): metadata = dict( source=str(self.file_path), seq_num=i, ) text = self._get_text(sample=sample, metadata=metadata) docs.append(Document(page_content=text, metadata=metadata)) return docs def _get_text(self, sample: Any, metadata: dict) -> str: """Convert sample to string format""" if self._content_key is not None: content = sample.get(self._content_key) if self._metadata_func is not None: # We pass in the metadata dict to the metadata_func # so that the user can customize the default metadata # based on the content of the JSON object. metadata = self._metadata_func(sample, metadata) else: content = sample
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/json_loader.html
514554c46dda-2
else: content = sample if self._text_content and not isinstance(content, str): raise ValueError( f"Expected page_content is string, got {type(content)} instead. \ Set `text_content=False` if the desired input for \ `page_content` is not a string" ) # In case the text is None, set it to an empty string elif isinstance(content, str): return content elif isinstance(content, dict): return json.dumps(content) if content else "" else: return str(content) if content is not None else "" def _validate_content_key(self, data: Any) -> None: """Check if content key is valid""" sample = data.first() if not isinstance(sample, dict): raise ValueError( f"Expected the jq schema to result in a list of objects (dict), \ so sample must be a dict but got `{type(sample)}`" ) if sample.get(self._content_key) is None: raise ValueError( f"Expected the jq schema to result in a list of objects (dict) \ with the key `{self._content_key}`" ) if self._metadata_func is not None: sample_metadata = self._metadata_func(sample, {}) if not isinstance(sample_metadata, dict): raise ValueError( f"Expected the metadata_func to return a dict but got \ `{type(sample_metadata)}`" ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/json_loader.html
9ddf9dc84f19-0
Source code for langchain.document_loaders.onedrive """Loader that loads data from OneDrive""" from __future__ import annotations import logging import os import tempfile from enum import Enum from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Type, Union from pydantic import BaseModel, BaseSettings, Field, FilePath, SecretStr from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.onedrive_file import OneDriveFileLoader if TYPE_CHECKING: from O365 import Account from O365.drive import Drive, Folder SCOPES = ["offline_access", "Files.Read.All"] logger = logging.getLogger(__name__) class _OneDriveSettings(BaseSettings): client_id: str = Field(..., env="O365_CLIENT_ID") client_secret: SecretStr = Field(..., env="O365_CLIENT_SECRET") class Config: env_prefix = "" case_sentive = False env_file = ".env" class _OneDriveTokenStorage(BaseSettings): token_path: FilePath = Field(Path.home() / ".credentials" / "o365_token.txt") class _FileType(str, Enum): DOC = "doc" DOCX = "docx" PDF = "pdf" class _SupportedFileTypes(BaseModel): file_types: List[_FileType] def fetch_mime_types(self) -> Dict[str, str]: mime_types_mapping = {} for file_type in self.file_types: if file_type.value == "doc": mime_types_mapping[file_type.value] = "application/msword" elif file_type.value == "docx": mime_types_mapping[ file_type.value
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
9ddf9dc84f19-1
mime_types_mapping[ file_type.value ] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" # noqa: E501 elif file_type.value == "pdf": mime_types_mapping[file_type.value] = "application/pdf" return mime_types_mapping [docs]class OneDriveLoader(BaseLoader, BaseModel): settings: _OneDriveSettings = Field(default_factory=_OneDriveSettings) drive_id: str = Field(...) folder_path: Optional[str] = None object_ids: Optional[List[str]] = None auth_with_token: bool = False def _auth(self) -> Type[Account]: """ Authenticates the OneDrive API client using the specified authentication method and returns the Account object. Returns: Type[Account]: The authenticated Account object. """ try: from O365 import FileSystemTokenBackend except ImportError: raise ImportError( "O365 package not found, please install it with `pip install o365`" ) if self.auth_with_token: token_storage = _OneDriveTokenStorage() token_path = token_storage.token_path token_backend = FileSystemTokenBackend( token_path=token_path.parent, token_filename=token_path.name ) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=SCOPES, token_backend=token_backend, **{"raise_http_errors": False}, ) else: token_backend = FileSystemTokenBackend( token_path=Path.home() / ".credentials" ) account = Account( credentials=( self.settings.client_id,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
9ddf9dc84f19-2
) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=SCOPES, token_backend=token_backend, **{"raise_http_errors": False}, ) # make the auth account.authenticate() return account def _get_folder_from_path(self, drive: Type[Drive]) -> Union[Folder, Drive]: """ Returns the folder or drive object located at the specified path relative to the given drive. Args: drive (Type[Drive]): The root drive from which the folder path is relative. Returns: Union[Folder, Drive]: The folder or drive object located at the specified path. Raises: FileNotFoundError: If the path does not exist. """ subfolder_drive = drive if self.folder_path is None: return subfolder_drive subfolders = [f for f in self.folder_path.split("/") if f != ""] if len(subfolders) == 0: return subfolder_drive items = subfolder_drive.get_items() for subfolder in subfolders: try: subfolder_drive = list(filter(lambda x: subfolder in x.name, items))[0] items = subfolder_drive.get_items() except (IndexError, AttributeError): raise FileNotFoundError("Path {} not exist.".format(self.folder_path)) return subfolder_drive def _load_from_folder(self, folder: Type[Folder]) -> List[Document]: """ Loads all supported document files from the specified folder and returns a list of Document objects. Args: folder (Type[Folder]): The folder object to load the documents from.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
9ddf9dc84f19-3
folder (Type[Folder]): The folder object to load the documents from. Returns: List[Document]: A list of Document objects representing the loaded documents. """ docs = [] file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"]) file_mime_types = file_types.fetch_mime_types() items = folder.get_items() with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}" os.makedirs(os.path.dirname(file_path), exist_ok=True) for file in items: if file.is_file: if file.mime_type in list(file_mime_types.values()): loader = OneDriveFileLoader(file=file) docs.extend(loader.load()) return docs def _load_from_object_ids(self, drive: Type[Drive]) -> List[Document]: """ Loads all supported document files from the specified OneDrive drive based on their object IDs and returns a list of Document objects. Args: drive (Type[Drive]): The OneDrive drive object to load the documents from. Returns: List[Document]: A list of Document objects representing the loaded documents. """ docs = [] file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"]) file_mime_types = file_types.fetch_mime_types() with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}" os.makedirs(os.path.dirname(file_path), exist_ok=True) for object_id in self.object_ids if self.object_ids else [""]: file = drive.get_item(object_id) if not file: logging.warning( "There isn't a file with "
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
9ddf9dc84f19-4
logging.warning( "There isn't a file with " f"object_id {object_id} in drive {drive}." ) continue if file.is_file: if file.mime_type in list(file_mime_types.values()): loader = OneDriveFileLoader(file=file) docs.extend(loader.load()) return docs [docs] def load(self) -> List[Document]: """ Loads all supported document files from the specified OneDrive drive a nd returns a list of Document objects. Returns: List[Document]: A list of Document objects representing the loaded documents. Raises: ValueError: If the specified drive ID does not correspond to a drive in the OneDrive storage. """ account = self._auth() storage = account.storage() drive = storage.get_drive(self.drive_id) docs: List[Document] = [] if not drive: raise ValueError(f"There isn't a drive with id {self.drive_id}.") if self.folder_path: folder = self._get_folder_from_path(drive=drive) docs.extend(self._load_from_folder(folder=folder)) elif self.object_ids: docs.extend(self._load_from_object_ids(drive=drive)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
50feda4ad259-0
Source code for langchain.document_loaders.stripe """Loader that fetches data from Stripe""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict STRIPE_ENDPOINTS = { "balance_transactions": "https://api.stripe.com/v1/balance_transactions", "charges": "https://api.stripe.com/v1/charges", "customers": "https://api.stripe.com/v1/customers", "events": "https://api.stripe.com/v1/events", "refunds": "https://api.stripe.com/v1/refunds", "disputes": "https://api.stripe.com/v1/disputes", } [docs]class StripeLoader(BaseLoader): def __init__(self, resource: str, access_token: Optional[str] = None) -> None: self.resource = resource access_token = access_token or get_from_env( "access_token", "STRIPE_ACCESS_TOKEN" ) self.headers = {"Authorization": f"Bearer {access_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = STRIPE_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/stripe.html
50feda4ad259-1
if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/stripe.html
6512562c2d4a-0
Source code for langchain.document_loaders.acreom """Loader that loads acreom vault from a directory.""" import re from pathlib import Path from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class AcreomLoader(BaseLoader): FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL) def __init__( self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True ): """Initialize with path.""" self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata def _parse_front_matter(self, content: str) -> dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) front_matter = {} if match: lines = match.group(1).split("\n") for line in lines: if ":" in line: key, value = line.split(":", 1) front_matter[key.strip()] = value.strip() else: # Skip lines without a colon continue return front_matter def _remove_front_matter(self, content: str) -> str: """Remove front matter metadata from the given content.""" if not self.collect_metadata: return content return self.FRONT_MATTER_REGEX.sub("", content) def _process_acreom_content(self, content: str) -> str: # remove acreom specific elements from content that # do not contribute to the context of current document
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/acreom.html
6512562c2d4a-1
# do not contribute to the context of current document content = re.sub("\s*-\s\[\s\]\s.*|\s*\[\s\]\s.*", "", content) # rm tasks content = re.sub("#", "", content) # rm hashtags content = re.sub("\[\[.*?\]\]", "", content) # rm doclinks return content [docs] def lazy_load(self) -> Iterator[Document]: ps = list(Path(self.file_path).glob("**/*.md")) for p in ps: with open(p, encoding=self.encoding) as f: text = f.read() front_matter = self._parse_front_matter(text) text = self._remove_front_matter(text) text = self._process_acreom_content(text) metadata = { "source": str(p.name), "path": str(p), **front_matter, } yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/acreom.html
7f5b07604224-0
Source code for langchain.document_loaders.apify_dataset """Logic for loading documents from Apify datasets.""" from typing import Any, Callable, Dict, List from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ApifyDatasetLoader(BaseLoader, BaseModel): """Logic for loading documents from Apify datasets.""" apify_client: Any dataset_id: str """The ID of the dataset on the Apify platform.""" dataset_mapping_function: Callable[[Dict], Document] """A custom function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class.""" def __init__( self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document] ): """Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. """ super().__init__( dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate environment.""" try: from apify_client import ApifyClient values["apify_client"] = ApifyClient() except ImportError: raise ImportError( "Could not import apify-client Python package. " "Please install it with `pip install apify-client`." ) return values
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/apify_dataset.html
7f5b07604224-1
) return values [docs] def load(self) -> List[Document]: """Load documents.""" dataset_items = self.apify_client.dataset(self.dataset_id).list_items().items return list(map(self.dataset_mapping_function, dataset_items)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/apify_dataset.html
fe09ac003038-0
Source code for langchain.document_loaders.unstructured """Loader that uses unstructured to load files.""" import collections from abc import ABC, abstractmethod from typing import IO, Any, List, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def satisfies_min_unstructured_version(min_version: str) -> bool: """Checks to see if the installed unstructured version exceeds the minimum version for the feature in question.""" from unstructured.__version__ import __version__ as __unstructured_version__ min_version_tuple = tuple([int(x) for x in min_version.split(".")]) # NOTE(MthwRobinson) - enables the loader to work when you're using pre-release # versions of unstructured like 0.4.17-dev1 _unstructured_version = __unstructured_version__.split("-")[0] unstructured_version_tuple = tuple( [int(x) for x in _unstructured_version.split(".")] ) return unstructured_version_tuple >= min_version_tuple def validate_unstructured_version(min_unstructured_version: str) -> None: """Raises an error if the unstructured version does not exceed the specified minimum.""" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( f"unstructured>={min_unstructured_version} is required in this loader." ) class UnstructuredBaseLoader(BaseLoader, ABC): """Loader that uses unstructured to load files.""" def __init__(self, mode: str = "single", **unstructured_kwargs: Any): """Initialize with file path.""" try: import unstructured # noqa:F401 except ImportError: raise ValueError(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/unstructured.html
fe09ac003038-1
import unstructured # noqa:F401 except ImportError: raise ValueError( "unstructured package not found, please install it with " "`pip install unstructured`" ) _valid_modes = {"single", "elements"} if mode not in _valid_modes: raise ValueError( f"Got {mode} for `mode`, but should be one of `{_valid_modes}`" ) self.mode = mode if not satisfies_min_unstructured_version("0.5.4"): if "strategy" in unstructured_kwargs: unstructured_kwargs.pop("strategy") self.unstructured_kwargs = unstructured_kwargs @abstractmethod def _get_elements(self) -> List: """Get elements.""" @abstractmethod def _get_metadata(self) -> dict: """Get metadata.""" def load(self) -> List[Document]: """Load file.""" elements = self._get_elements() if self.mode == "elements": docs: List[Document] = list() for element in elements: metadata = self._get_metadata() # NOTE(MthwRobinson) - the attribute check is for backward compatibility # with unstructured<0.4.9. The metadata attributed was added in 0.4.9. if hasattr(element, "metadata"): metadata.update(element.metadata.to_dict()) if hasattr(element, "category"): metadata["category"] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) elif self.mode == "single": metadata = self._get_metadata() text = "\n\n".join([str(el) for el in elements]) docs = [Document(page_content=text, metadata=metadata)]
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/unstructured.html
fe09ac003038-2
docs = [Document(page_content=text, metadata=metadata)] else: raise ValueError(f"mode of {self.mode} not supported.") return docs [docs]class UnstructuredFileLoader(UnstructuredBaseLoader): """Loader that uses unstructured to load files.""" def __init__( self, file_path: Union[str, List[str]], mode: str = "single", **unstructured_kwargs: Any, ): """Initialize with file path.""" self.file_path = file_path super().__init__(mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.auto import partition return partition(filename=self.file_path, **self.unstructured_kwargs) def _get_metadata(self) -> dict: return {"source": self.file_path} def get_elements_from_api( file_path: Union[str, List[str], None] = None, file: Union[IO, Sequence[IO], None] = None, api_url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ) -> List: """Retrieves a list of elements from the Unstructured API.""" if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list): from unstructured.partition.api import partition_multiple_via_api _doc_elements = partition_multiple_via_api( filenames=file_path, files=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, ) elements = [] for _elements in _doc_elements: elements.extend(_elements) return elements else:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/unstructured.html
fe09ac003038-3
elements.extend(_elements) return elements else: from unstructured.partition.api import partition_via_api return partition_via_api( filename=file_path, file=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, ) [docs]class UnstructuredAPIFileLoader(UnstructuredFileLoader): """Loader that uses the unstructured web API to load files.""" def __init__( self, file_path: Union[str, List[str]] = "", mode: str = "single", url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ): """Initialize with file path.""" if isinstance(file_path, str): validate_unstructured_version(min_unstructured_version="0.6.2") else: validate_unstructured_version(min_unstructured_version="0.6.3") self.url = url self.api_key = api_key super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_metadata(self) -> dict: return {"source": self.file_path} def _get_elements(self) -> List: return get_elements_from_api( file_path=self.file_path, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs, ) [docs]class UnstructuredFileIOLoader(UnstructuredBaseLoader): """Loader that uses unstructured to load file IO objects.""" def __init__( self, file: Union[IO, Sequence[IO]], mode: str = "single",
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/unstructured.html
fe09ac003038-4
mode: str = "single", **unstructured_kwargs: Any, ): """Initialize with file path.""" self.file = file super().__init__(mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.auto import partition return partition(file=self.file, **self.unstructured_kwargs) def _get_metadata(self) -> dict: return {} [docs]class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader): """Loader that uses the unstructured web API to load file IO objects.""" def __init__( self, file: Union[IO, Sequence[IO]], mode: str = "single", url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ): """Initialize with file path.""" if isinstance(file, collections.abc.Sequence): validate_unstructured_version(min_unstructured_version="0.6.3") if file: validate_unstructured_version(min_unstructured_version="0.6.2") self.url = url self.api_key = api_key super().__init__(file=file, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: return get_elements_from_api( file=self.file, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/unstructured.html
124aa73c6424-0
Source code for langchain.document_loaders.joplin import json import urllib from datetime import datetime from typing import Iterator, List, Optional from langchain.document_loaders.base import BaseLoader from langchain.schema import Document from langchain.utils import get_from_env LINK_NOTE_TEMPLATE = "joplin://x-callback-url/openNote?id={id}" [docs]class JoplinLoader(BaseLoader): """ Loader that fetches notes from Joplin. In order to use this loader, you need to have Joplin running with the Web Clipper enabled (look for "Web Clipper" in the app settings). To get the access token, you need to go to the Web Clipper options and under "Advanced Options" you will find the access token. You can find more information about the Web Clipper service here: https://joplinapp.org/clipper/ """ def __init__( self, access_token: Optional[str] = None, port: int = 41184, host: str = "localhost", ) -> None: access_token = access_token or get_from_env( "access_token", "JOPLIN_ACCESS_TOKEN" ) base_url = f"http://{host}:{port}" self._get_note_url = ( f"{base_url}/notes?token={access_token}" f"&fields=id,parent_id,title,body,created_time,updated_time&page={{page}}" ) self._get_folder_url = ( f"{base_url}/folders/{{id}}?token={access_token}&fields=title" ) self._get_tag_url = (
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/joplin.html
124aa73c6424-1
) self._get_tag_url = ( f"{base_url}/notes/{{id}}/tags?token={access_token}&fields=title" ) def _get_notes(self) -> Iterator[Document]: has_more = True page = 1 while has_more: req_note = urllib.request.Request(self._get_note_url.format(page=page)) with urllib.request.urlopen(req_note) as response: json_data = json.loads(response.read().decode()) for note in json_data["items"]: metadata = { "source": LINK_NOTE_TEMPLATE.format(id=note["id"]), "folder": self._get_folder(note["parent_id"]), "tags": self._get_tags(note["id"]), "title": note["title"], "created_time": self._convert_date(note["created_time"]), "updated_time": self._convert_date(note["updated_time"]), } yield Document(page_content=note["body"], metadata=metadata) has_more = json_data["has_more"] page += 1 def _get_folder(self, folder_id: str) -> str: req_folder = urllib.request.Request(self._get_folder_url.format(id=folder_id)) with urllib.request.urlopen(req_folder) as response: json_data = json.loads(response.read().decode()) return json_data["title"] def _get_tags(self, note_id: str) -> List[str]: req_tag = urllib.request.Request(self._get_tag_url.format(id=note_id)) with urllib.request.urlopen(req_tag) as response: json_data = json.loads(response.read().decode()) return [tag["title"] for tag in json_data["items"]] def _convert_date(self, date: int) -> str:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/joplin.html
124aa73c6424-2
def _convert_date(self, date: int) -> str: return datetime.fromtimestamp(date / 1000).strftime("%Y-%m-%d %H:%M:%S") [docs] def lazy_load(self) -> Iterator[Document]: yield from self._get_notes() [docs] def load(self) -> List[Document]: return list(self.lazy_load()) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/joplin.html
e5dc31f6d3b6-0
Source code for langchain.document_loaders.gitbook """Loader that loads GitBook.""" from typing import Any, List, Optional from urllib.parse import urljoin, urlparse from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class GitbookLoader(WebBaseLoader): """Load GitBook data. 1. load from either a single page, or 2. load all (relative) paths in the navbar. """ def __init__( self, web_page: str, load_all_paths: bool = False, base_url: Optional[str] = None, content_selector: str = "main", ): """Initialize with web page and whether to load all paths. Args: web_page: The web page to load or the starting point from where relative paths are discovered. load_all_paths: If set to True, all relative paths in the navbar are loaded instead of only `web_page`. base_url: If `load_all_paths` is True, the relative paths are appended to this base url. Defaults to `web_page` if not set. """ self.base_url = base_url or web_page if self.base_url.endswith("/"): self.base_url = self.base_url[:-1] if load_all_paths: # set web_path to the sitemap if we want to crawl all paths web_paths = f"{self.base_url}/sitemap.xml" else: web_paths = web_page super().__init__(web_paths) self.load_all_paths = load_all_paths self.content_selector = content_selector [docs] def load(self) -> List[Document]:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gitbook.html
e5dc31f6d3b6-1
[docs] def load(self) -> List[Document]: """Fetch text from one single GitBook page.""" if self.load_all_paths: soup_info = self.scrape() relative_paths = self._get_paths(soup_info) documents = [] for path in relative_paths: url = urljoin(self.base_url, path) print(f"Fetching text from {url}") soup_info = self._scrape(url) documents.append(self._get_document(soup_info, url)) return [d for d in documents if d] else: soup_info = self.scrape() documents = [self._get_document(soup_info, self.web_path)] return [d for d in documents if d] def _get_document( self, soup: Any, custom_url: Optional[str] = None ) -> Optional[Document]: """Fetch content from page and return Document.""" page_content_raw = soup.find(self.content_selector) if not page_content_raw: return None content = page_content_raw.get_text(separator="\n").strip() title_if_exists = page_content_raw.find("h1") title = title_if_exists.text if title_if_exists else "" metadata = {"source": custom_url or self.web_path, "title": title} return Document(page_content=content, metadata=metadata) def _get_paths(self, soup: Any) -> List[str]: """Fetch all relative paths in the navbar.""" return [urlparse(loc.text).path for loc in soup.find_all("loc")] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gitbook.html
21d90209538f-0
Source code for langchain.document_loaders.diffbot """Loader that uses Diffbot to load webpages in text format.""" import logging from typing import Any, List import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class DiffbotLoader(BaseLoader): """Loader that loads Diffbot file json.""" def __init__( self, api_token: str, urls: List[str], continue_on_failure: bool = True ): """Initialize with API token, ids, and key.""" self.api_token = api_token self.urls = urls self.continue_on_failure = continue_on_failure def _diffbot_api_url(self, diffbot_api: str) -> str: return f"https://api.diffbot.com/v3/{diffbot_api}" def _get_diffbot_data(self, url: str) -> Any: """Get Diffbot file from Diffbot REST API.""" # TODO: Add support for other Diffbot APIs diffbot_url = self._diffbot_api_url("article") params = { "token": self.api_token, "url": url, } response = requests.get(diffbot_url, params=params, timeout=10) # TODO: handle non-ok errors return response.json() if response.ok else {} [docs] def load(self) -> List[Document]: """Extract text from Diffbot on all the URLs and return Document instances""" docs: List[Document] = list() for url in self.urls: try: data = self._get_diffbot_data(url) text = data["objects"][0]["text"] if "objects" in data else ""
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/diffbot.html
21d90209538f-1
text = data["objects"][0]["text"] if "objects" in data else "" metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/diffbot.html
27b244933503-0
Source code for langchain.document_loaders.xml """Loader that loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredXMLLoader(UnstructuredFileLoader): """Loader that uses unstructured to load XML files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xml import partition_xml return partition_xml(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/xml.html
08429be0abef-0
Source code for langchain.document_loaders.modern_treasury """Loader that fetches data from Modern Treasury""" import json import urllib.request from base64 import b64encode from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_value MODERN_TREASURY_ENDPOINTS = { "payment_orders": "https://app.moderntreasury.com/api/payment_orders", "expected_payments": "https://app.moderntreasury.com/api/expected_payments", "returns": "https://app.moderntreasury.com/api/returns", "incoming_payment_details": "https://app.moderntreasury.com/api/\ incoming_payment_details", "counterparties": "https://app.moderntreasury.com/api/counterparties", "internal_accounts": "https://app.moderntreasury.com/api/internal_accounts", "external_accounts": "https://app.moderntreasury.com/api/external_accounts", "transactions": "https://app.moderntreasury.com/api/transactions", "ledgers": "https://app.moderntreasury.com/api/ledgers", "ledger_accounts": "https://app.moderntreasury.com/api/ledger_accounts", "ledger_transactions": "https://app.moderntreasury.com/api/ledger_transactions", "events": "https://app.moderntreasury.com/api/events", "invoices": "https://app.moderntreasury.com/api/invoices", } [docs]class ModernTreasuryLoader(BaseLoader): def __init__( self, resource: str, organization_id: Optional[str] = None,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/modern_treasury.html
08429be0abef-1
resource: str, organization_id: Optional[str] = None, api_key: Optional[str] = None, ) -> None: self.resource = resource organization_id = organization_id or get_from_env( "organization_id", "MODERN_TREASURY_ORGANIZATION_ID" ) api_key = api_key or get_from_env("api_key", "MODERN_TREASURY_API_KEY") credentials = f"{organization_id}:{api_key}".encode("utf-8") basic_auth_token = b64encode(credentials).decode("utf-8") self.headers = {"Authorization": f"Basic {basic_auth_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_value(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/modern_treasury.html
2f83fe2f6095-0
Source code for langchain.document_loaders.obsidian """Loader that loads Obsidian directory dump.""" import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ObsidianLoader(BaseLoader): """Loader that loads Obsidian files from disk.""" FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL) def __init__( self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True ): """Initialize with path.""" self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata def _parse_front_matter(self, content: str) -> dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) front_matter = {} if match: lines = match.group(1).split("\n") for line in lines: if ":" in line: key, value = line.split(":", 1) front_matter[key.strip()] = value.strip() else: # Skip lines without a colon continue return front_matter def _remove_front_matter(self, content: str) -> str: """Remove front matter metadata from the given content.""" if not self.collect_metadata: return content return self.FRONT_MATTER_REGEX.sub("", content) [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md"))
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/obsidian.html
2f83fe2f6095-1
ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p, encoding=self.encoding) as f: text = f.read() front_matter = self._parse_front_matter(text) text = self._remove_front_matter(text) metadata = { "source": str(p.name), "path": str(p), "created": p.stat().st_ctime, "last_modified": p.stat().st_mtime, "last_accessed": p.stat().st_atime, **front_matter, } docs.append(Document(page_content=text, metadata=metadata)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/obsidian.html
06a7a246cb56-0
Source code for langchain.document_loaders.airbyte_json """Loader that loads local airbyte json files.""" import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class AirbyteJSONLoader(BaseLoader): """Loader that loads local airbyte json files.""" def __init__(self, file_path: str): """Initialize with file path. This should start with '/tmp/airbyte_local/'.""" self.file_path = file_path [docs] def load(self) -> List[Document]: """Load file.""" text = "" for line in open(self.file_path, "r"): data = json.loads(line)["_airbyte_data"] text += stringify_dict(data) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/airbyte_json.html
09a4c256475a-0
Source code for langchain.document_loaders.notion """Loader that loads Notion directory dump.""" from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class NotionDirectoryLoader(BaseLoader): """Loader that loads Notion directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p) as f: text = f.read() metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notion.html
20aec4aaba8a-0
Source code for langchain.document_loaders.python import tokenize from langchain.document_loaders.text import TextLoader [docs]class PythonLoader(TextLoader): """ Load Python files, respecting any non-default encoding if specified. """ def __init__(self, file_path: str): with open(file_path, "rb") as f: encoding, _ = tokenize.detect_encoding(f.readline) super().__init__(file_path=file_path, encoding=encoding) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/python.html
1283ee36cb96-0
Source code for langchain.document_loaders.azlyrics """Loader that loads AZLyrics.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class AZLyricsLoader(WebBaseLoader): """Loader that loads AZLyrics webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() title = soup.title.text lyrics = soup.find_all("div", {"class": ""})[2].text text = title + lyrics metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/azlyrics.html
c613ff5e5213-0
Source code for langchain.document_loaders.roam """Loader that loads Roam directory dump.""" from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class RoamLoader(BaseLoader): """Loader that loads Roam files from disk.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p) as f: text = f.read() metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/roam.html
5566c18b8a45-0
Source code for langchain.document_loaders.toml import json from pathlib import Path from typing import Iterator, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class TomlLoader(BaseLoader): """ A TOML document loader that inherits from the BaseLoader class. This class can be initialized with either a single source file or a source directory containing TOML files. """ def __init__(self, source: Union[str, Path]): """Initialize the TomlLoader with a source file or directory.""" self.source = Path(source) [docs] def load(self) -> List[Document]: """Load and return all documents.""" return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Lazily load the TOML documents from the source file or directory.""" import tomli if self.source.is_file() and self.source.suffix == ".toml": files = [self.source] elif self.source.is_dir(): files = list(self.source.glob("**/*.toml")) else: raise ValueError("Invalid source path or file type") for file_path in files: with file_path.open("r", encoding="utf-8") as file: content = file.read() try: data = tomli.loads(content) doc = Document( page_content=json.dumps(data), metadata={"source": str(file_path)}, ) yield doc except tomli.TOMLDecodeError as e: print(f"Error parsing TOML file {file_path}: {e}") By Harrison Chase © Copyright 2023, Harrison Chase.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/toml.html
5566c18b8a45-1
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/toml.html
0889e4770bbd-0
Source code for langchain.document_loaders.readthedocs """Loader that loads ReadTheDocs documentation directory dump.""" from pathlib import Path from typing import Any, List, Optional, Tuple, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ReadTheDocsLoader(BaseLoader): """Loader that loads ReadTheDocs documentation directory dump.""" def __init__( self, path: Union[str, Path], encoding: Optional[str] = None, errors: Optional[str] = None, custom_html_tag: Optional[Tuple[str, dict]] = None, **kwargs: Optional[Any] ): """ Initialize ReadTheDocsLoader The loader loops over all files under `path` and extract the actual content of the files by retrieving main html tags. Default main html tags include `<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You can also define your own html tags by passing custom_html_tag, e.g. `("div", "class=main")`. The loader iterates html tags with the order of custom html tags (if exists) and default html tags. If any of the tags is not empty, the loop will break and retrieve the content out of that tag. Args: path: The location of pulled readthedocs folder. encoding: The encoding with which to open the documents. errors: Specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. custom_html_tag: Optional custom html tag to retrieve the content from files. """ try: from bs4 import BeautifulSoup except ImportError: raise ImportError(
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/readthedocs.html
0889e4770bbd-1
from bs4 import BeautifulSoup except ImportError: raise ImportError( "Could not import python packages. " "Please install it with `pip install beautifulsoup4`. " ) try: _ = BeautifulSoup( "<html><body>Parser builder library test.</body></html>", **kwargs ) except Exception as e: raise ValueError("Parsing kwargs do not appear valid") from e self.file_path = Path(path) self.encoding = encoding self.errors = errors self.custom_html_tag = custom_html_tag self.bs_kwargs = kwargs [docs] def load(self) -> List[Document]: """Load documents.""" docs = [] for p in self.file_path.rglob("*"): if p.is_dir(): continue with open(p, encoding=self.encoding, errors=self.errors) as f: text = self._clean_data(f.read()) metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs def _clean_data(self, data: str) -> str: from bs4 import BeautifulSoup soup = BeautifulSoup(data, **self.bs_kwargs) # default tags html_tags = [ ("div", {"role": "main"}), ("main", {"id": "main-content"}), ] if self.custom_html_tag is not None: html_tags.append(self.custom_html_tag) text = None # reversed order. check the custom one first for tag, attrs in html_tags[::-1]: text = soup.find(tag, attrs) # if found, break if text is not None: break if text is not None:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/readthedocs.html
0889e4770bbd-2
if text is not None: break if text is not None: text = text.get_text() else: text = "" # trim empty lines return "\n".join([t for t in text.split("\n") if t]) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/readthedocs.html
d780d04f61ea-0
Source code for langchain.document_loaders.duckdb_loader from typing import Dict, List, Optional, cast from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DuckDBLoader(BaseLoader): """Loads a query result from DuckDB into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, database: str = ":memory:", read_only: bool = False, config: Optional[Dict[str, str]] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, ): self.query = query self.database = database self.read_only = read_only self.config = config or {} self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns [docs] def load(self) -> List[Document]: try: import duckdb except ImportError: raise ImportError( "Could not import duckdb python package. " "Please install it with `pip install duckdb`." ) docs = [] with duckdb.connect( database=self.database, read_only=self.read_only, config=self.config ) as con: query_result = con.execute(self.query) results = query_result.fetchall() description = cast(list, query_result.description)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/duckdb_loader.html
d780d04f61ea-1
results = query_result.fetchall() description = cast(list, query_result.description) field_names = [c[0] for c in description] if self.page_content_columns is None: page_content_columns = field_names else: page_content_columns = self.page_content_columns if self.metadata_columns is None: metadata_columns = [] else: metadata_columns = self.metadata_columns for result in results: page_content = "\n".join( f"{column}: {result[field_names.index(column)]}" for column in page_content_columns ) metadata = { column: result[field_names.index(column)] for column in metadata_columns } doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/duckdb_loader.html
1981d81239c6-0
Source code for langchain.document_loaders.sitemap """Loader that fetches a sitemap and loads those URLs.""" import itertools import re from typing import Any, Callable, Generator, Iterable, List, Optional from langchain.document_loaders.web_base import WebBaseLoader from langchain.schema import Document def _default_parsing_function(content: Any) -> str: return str(content.get_text()) def _default_meta_function(meta: dict, _content: Any) -> dict: return {"source": meta["loc"], **meta} def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]: it = iter(iterable) while item := list(itertools.islice(it, size)): yield item [docs]class SitemapLoader(WebBaseLoader): """Loader that fetches a sitemap and loads those URLs.""" def __init__( self, web_path: str, filter_urls: Optional[List[str]] = None, parsing_function: Optional[Callable] = None, blocksize: Optional[int] = None, blocknum: int = 0, meta_function: Optional[Callable] = None, is_local: bool = False, ): """Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: list of strings or regexes that will be applied to filter the urls that are parsed and loaded parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed meta_function: Function to parse bs4.Soup output for metadata
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/sitemap.html
1981d81239c6-1
meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file """ if blocksize is not None and blocksize < 1: raise ValueError("Sitemap blocksize should be at least 1") if blocknum < 0: raise ValueError("Sitemap blocknum can not be lower then 0") try: import lxml # noqa:F401 except ImportError: raise ImportError( "lxml package not found, please install it with " "`pip install lxml`" ) super().__init__(web_path) self.filter_urls = filter_urls self.parsing_function = parsing_function or _default_parsing_function self.meta_function = meta_function or _default_meta_function self.blocksize = blocksize self.blocknum = blocknum self.is_local = is_local [docs] def parse_sitemap(self, soup: Any) -> List[dict]: """Parse sitemap xml and load into a list of dicts.""" els = [] for url in soup.find_all("url"): loc = url.find("loc") if not loc: continue # Strip leading and trailing whitespace and newlines loc_text = loc.text.strip() if self.filter_urls and not any( re.match(r, loc_text) for r in self.filter_urls ): continue els.append( { tag: prop.text for tag in ["loc", "lastmod", "changefreq", "priority"] if (prop := url.find(tag)) } )
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/sitemap.html
1981d81239c6-2
if (prop := url.find(tag)) } ) for sitemap in soup.find_all("sitemap"): loc = sitemap.find("loc") if not loc: continue soup_child = self.scrape_all([loc.text], "xml")[0] els.extend(self.parse_sitemap(soup_child)) return els [docs] def load(self) -> List[Document]: """Load sitemap.""" if self.is_local: try: import bs4 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it" " with `pip install beautifulsoup4`" ) fp = open(self.web_path) soup = bs4.BeautifulSoup(fp, "xml") else: soup = self.scrape("xml") els = self.parse_sitemap(soup) if self.blocksize is not None: elblocks = list(_batch_block(els, self.blocksize)) blockcount = len(elblocks) if blockcount - 1 < self.blocknum: raise ValueError( "Selected sitemap does not contain enough blocks for given blocknum" ) else: els = elblocks[self.blocknum] results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el]) return [ Document( page_content=self.parsing_function(results[i]), metadata=self.meta_function(els[i], results[i]), ) for i in range(len(results)) ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/sitemap.html
6809b8ac085e-0
Source code for langchain.document_loaders.s3_directory """Loading logic for loading documents from an s3 directory.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.s3_file import S3FileLoader [docs]class S3DirectoryLoader(BaseLoader): """Loading logic for loading documents from s3.""" def __init__(self, bucket: str, prefix: str = ""): """Initialize with bucket and key name.""" self.bucket = bucket self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) s3 = boto3.resource("s3") bucket = s3.Bucket(self.bucket) docs = [] for obj in bucket.objects.filter(Prefix=self.prefix): loader = S3FileLoader(self.bucket, obj.key) docs.extend(loader.load()) return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/s3_directory.html
4b7ed04b6698-0
Source code for langchain.document_loaders.azure_blob_storage_file """Loading logic for loading documents from an Azure Blob Storage file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class AzureBlobStorageFileLoader(BaseLoader): """Loading logic for loading documents from Azure Blob Storage.""" def __init__(self, conn_str: str, container: str, blob_name: str): """Initialize with connection string, container and blob name.""" self.conn_str = conn_str self.container = container self.blob = blob_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from azure.storage.blob import BlobClient except ImportError as exc: raise ValueError( "Could not import azure storage blob python package. " "Please install it with `pip install azure-storage-blob`." ) from exc client = BlobClient.from_connection_string( conn_str=self.conn_str, container_name=self.container, blob_name=self.blob ) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.container}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(f"{file_path}", "wb") as file: blob_data = client.download_blob() blob_data.readinto(file) loader = UnstructuredFileLoader(file_path) return loader.load() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/azure_blob_storage_file.html
bdfb23f74d6f-0
Source code for langchain.document_loaders.excel """Loader that loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredExcelLoader(UnstructuredFileLoader): """Loader that uses unstructured to load Microsoft Excel files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xlsx import partition_xlsx return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/excel.html
eb55d8f6d8cf-0
Source code for langchain.document_loaders.epub """Loader that loads EPub files.""" from typing import List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredEPubLoader(UnstructuredFileLoader): """Loader that uses unstructured to load epub files.""" def _get_elements(self) -> List: min_unstructured_version = "0.5.4" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( "Partitioning epub files is only supported in " f"unstructured>={min_unstructured_version}." ) from unstructured.partition.epub import partition_epub return partition_epub(filename=self.file_path, **self.unstructured_kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/epub.html
9b05cbf868bf-0
Source code for langchain.document_loaders.college_confidential """Loader that loads College Confidential.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class CollegeConfidentialLoader(WebBaseLoader): """Loader that loads College Confidential webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() text = soup.select_one("main[class='skin-handler']").text metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/college_confidential.html
573cffc3e051-0
Source code for langchain.document_loaders.whatsapp_chat import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(date: str, sender: str, text: str) -> str: """Combine message information in a readable format ready to be used.""" return f"{sender} on {date}: {text}\n\n" [docs]class WhatsAppChatLoader(BaseLoader): """Loader that loads WhatsApp messages text file.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) text_content = "" with open(p, encoding="utf8") as f: lines = f.readlines() message_line_regex = r""" \[? ( \d{1,2} [\/.] \d{1,2} [\/.] \d{2,4} ,\s \d{1,2} :\d{2} (?: :\d{2} )? (?:[ _](?:AM|PM))? ) \]? [\s-]* ([~\w\s]+) [:]+ \s (.+) """ for line in lines: result = re.match(message_line_regex, line.strip(), flags=re.VERBOSE) if result: date, sender, text = result.groups() text_content += concatenate_rows(date, sender, text)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/whatsapp_chat.html
573cffc3e051-1
text_content += concatenate_rows(date, sender, text) metadata = {"source": str(p)} return [Document(page_content=text_content, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/whatsapp_chat.html
5863a2e19c58-0
Source code for langchain.document_loaders.blackboard """Loader that loads all documents from a blackboard course.""" import contextlib import re from pathlib import Path from typing import Any, List, Optional, Tuple from urllib.parse import unquote from langchain.docstore.document import Document from langchain.document_loaders.directory import DirectoryLoader from langchain.document_loaders.pdf import PyPDFLoader from langchain.document_loaders.web_base import WebBaseLoader [docs]class BlackboardLoader(WebBaseLoader): """Loader that loads all documents from a Blackboard course. This loader is not compatible with all Blackboard courses. It is only compatible with courses that use the new Blackboard interface. To use this loader, you must have the BbRouter cookie. You can get this cookie by logging into the course and then copying the value of the BbRouter cookie from the browser's developer tools. Example: .. code-block:: python from langchain.document_loaders import BlackboardLoader loader = BlackboardLoader( blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1", bbrouter="expires:12345...", ) documents = loader.load() """ base_url: str folder_path: str load_all_recursively: bool def __init__( self, blackboard_course_url: str, bbrouter: str, load_all_recursively: bool = True, basic_auth: Optional[Tuple[str, str]] = None, cookies: Optional[dict] = None, ): """Initialize with blackboard course url.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
5863a2e19c58-1
): """Initialize with blackboard course url. The BbRouter cookie is required for most blackboard courses. Args: blackboard_course_url: Blackboard course url. bbrouter: BbRouter cookie. load_all_recursively: If True, load all documents recursively. basic_auth: Basic auth credentials. cookies: Cookies. Raises: ValueError: If blackboard course url is invalid. """ super().__init__(blackboard_course_url) # Get base url try: self.base_url = blackboard_course_url.split("/webapps/blackboard")[0] except IndexError: raise ValueError( "Invalid blackboard course url. " "Please provide a url that starts with " "https://<blackboard_url>/webapps/blackboard" ) if basic_auth is not None: self.session.auth = basic_auth # Combine cookies if cookies is None: cookies = {} cookies.update({"BbRouter": bbrouter}) self.session.cookies.update(cookies) self.load_all_recursively = load_all_recursively self.check_bs4() [docs] def check_bs4(self) -> None: """Check if BeautifulSoup4 is installed. Raises: ImportError: If BeautifulSoup4 is not installed. """ try: import bs4 # noqa: F401 except ImportError: raise ImportError( "BeautifulSoup4 is required for BlackboardLoader. " "Please install it with `pip install beautifulsoup4`." ) [docs] def load(self) -> List[Document]: """Load data into document objects. Returns: List of documents.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
5863a2e19c58-2
"""Load data into document objects. Returns: List of documents. """ if self.load_all_recursively: soup_info = self.scrape() self.folder_path = self._get_folder_path(soup_info) relative_paths = self._get_paths(soup_info) documents = [] for path in relative_paths: url = self.base_url + path print(f"Fetching documents from {url}") soup_info = self._scrape(url) with contextlib.suppress(ValueError): documents.extend(self._get_documents(soup_info)) return documents else: print(f"Fetching documents from {self.web_path}") soup_info = self.scrape() self.folder_path = self._get_folder_path(soup_info) return self._get_documents(soup_info) def _get_folder_path(self, soup: Any) -> str: """Get the folder path to save the documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path. """ # Get the course name course_name = soup.find("span", {"id": "crumb_1"}) if course_name is None: raise ValueError("No course name found.") course_name = course_name.text.strip() # Prepare the folder path course_name_clean = ( unquote(course_name) .replace(" ", "_") .replace("/", "_") .replace(":", "_") .replace(",", "_") .replace("?", "_") .replace("'", "_") .replace("!", "_") .replace('"', "_") ) # Get the folder path folder_path = Path(".") / course_name_clean
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
5863a2e19c58-3
# Get the folder path folder_path = Path(".") / course_name_clean return str(folder_path) def _get_documents(self, soup: Any) -> List[Document]: """Fetch content from page and return Documents. Args: soup: BeautifulSoup4 soup object. Returns: List of documents. """ attachments = self._get_attachments(soup) self._download_attachments(attachments) documents = self._load_documents() return documents def _get_attachments(self, soup: Any) -> List[str]: """Get all attachments from a page. Args: soup: BeautifulSoup4 soup object. Returns: List of attachments. """ from bs4 import BeautifulSoup, Tag # Get content list content_list = soup.find("ul", {"class": "contentList"}) if content_list is None: raise ValueError("No content list found.") content_list: BeautifulSoup # type: ignore # Get all attachments attachments = [] for attachment in content_list.find_all("ul", {"class": "attachments"}): attachment: Tag # type: ignore for link in attachment.find_all("a"): link: Tag # type: ignore href = link.get("href") # Only add if href is not None and does not start with # if href is not None and not href.startswith("#"): attachments.append(href) return attachments def _download_attachments(self, attachments: List[str]) -> None: """Download all attachments. Args: attachments: List of attachments. """ # Make sure the folder exists Path(self.folder_path).mkdir(parents=True, exist_ok=True)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
5863a2e19c58-4
Path(self.folder_path).mkdir(parents=True, exist_ok=True) # Download all attachments for attachment in attachments: self.download(attachment) def _load_documents(self) -> List[Document]: """Load all documents in the folder. Returns: List of documents. """ # Create the document loader loader = DirectoryLoader( path=self.folder_path, glob="*.pdf", loader_cls=PyPDFLoader # type: ignore ) # Load the documents documents = loader.load() # Return all documents return documents def _get_paths(self, soup: Any) -> List[str]: """Get all relative paths in the navbar.""" relative_paths = [] course_menu = soup.find("ul", {"class": "courseMenu"}) if course_menu is None: raise ValueError("No course menu found.") for link in course_menu.find_all("a"): href = link.get("href") if href is not None and href.startswith("/"): relative_paths.append(href) return relative_paths [docs] def download(self, path: str) -> None: """Download a file from a url. Args: path: Path to the file. """ # Get the file content response = self.session.get(self.base_url + path, allow_redirects=True) # Get the filename filename = self.parse_filename(response.url) # Write the file to disk with open(Path(self.folder_path) / filename, "wb") as f: f.write(response.content) [docs] def parse_filename(self, url: str) -> str: """Parse the filename from a url. Args:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
5863a2e19c58-5
"""Parse the filename from a url. Args: url: Url to parse the filename from. Returns: The filename. """ if (url_path := Path(url)) and url_path.suffix == ".pdf": return url_path.name else: return self._parse_filename_from_url(url) def _parse_filename_from_url(self, url: str) -> str: """Parse the filename from a url. Args: url: Url to parse the filename from. Returns: The filename. Raises: ValueError: If the filename could not be parsed. """ filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url) if filename_matches: filename = filename_matches.group(1) else: raise ValueError(f"Could not parse filename from {url}") if ".pdf" not in filename: raise ValueError(f"Incorrect file type: {filename}") filename = filename.split(".pdf")[0] + ".pdf" filename = unquote(filename) filename = filename.replace("%20", " ") return filename if __name__ == "__main__": loader = BlackboardLoader( "https://<YOUR BLACKBOARD URL" " HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID" " HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset", "<YOUR BBROUTER COOKIE HERE>", load_all_recursively=True, ) documents = loader.load() print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}") By Harrison Chase
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
5863a2e19c58-6
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
3bd4c4cfce5e-0
Source code for langchain.document_loaders.gcs_file """Loading logic for loading documents from a GCS file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class GCSFileLoader(BaseLoader): """Loading logic for loading documents from GCS.""" def __init__(self, project_name: str, bucket: str, blob: str): """Initialize with bucket and key name.""" self.bucket = bucket self.blob = blob self.project_name = project_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ValueError( "Could not import google-cloud-storage python package. " "Please install it with `pip install google-cloud-storage`." ) # Initialise a client storage_client = storage.Client(self.project_name) # Create a bucket object for our bucket bucket = storage_client.get_bucket(self.bucket) # Create a blob object from the filepath blob = bucket.blob(self.blob) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination blob.download_to_filename(file_path) loader = UnstructuredFileLoader(file_path) return loader.load() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gcs_file.html
c490a0678303-0
Source code for langchain.document_loaders.srt """Loader for .srt (subtitle) files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SRTLoader(BaseLoader): """Loader for .srt (subtitle) files.""" def __init__(self, file_path: str): """Initialize with file path.""" try: import pysrt # noqa:F401 except ImportError: raise ImportError( "package `pysrt` not found, please install it with `pip install pysrt`" ) self.file_path = file_path [docs] def load(self) -> List[Document]: """Load using pysrt file.""" import pysrt parsed_info = pysrt.open(self.file_path) text = " ".join([t.text for t in parsed_info]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/srt.html
4c19c1f4a1e8-0
Source code for langchain.document_loaders.text import logging from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.helpers import detect_file_encodings logger = logging.getLogger(__name__) [docs]class TextLoader(BaseLoader): """Load text files. Args: file_path: Path to the file to load. encoding: File encoding to use. If `None`, the file will be loaded with the default system encoding. autodetect_encoding: Whether to try to autodetect the file encoding if the specified encoding fails. """ def __init__( self, file_path: str, encoding: Optional[str] = None, autodetect_encoding: bool = False, ): """Initialize with file path.""" self.file_path = file_path self.encoding = encoding self.autodetect_encoding = autodetect_encoding [docs] def load(self) -> List[Document]: """Load from file path.""" text = "" try: with open(self.file_path, encoding=self.encoding) as f: text = f.read() except UnicodeDecodeError as e: if self.autodetect_encoding: detected_encodings = detect_file_encodings(self.file_path) for encoding in detected_encodings: logger.debug("Trying encoding: ", encoding.encoding) try: with open(self.file_path, encoding=encoding.encoding) as f: text = f.read() break except UnicodeDecodeError: continue else: raise RuntimeError(f"Error loading {self.file_path}") from e except Exception as e:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/text.html
4c19c1f4a1e8-1
except Exception as e: raise RuntimeError(f"Error loading {self.file_path}") from e metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/text.html
a24414fe5b21-0
Source code for langchain.document_loaders.fauna from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class FaunaLoader(BaseLoader): """ Attributes: query (str): The FQL query string to execute. page_content_field (str): The field that contains the content of each page. secret (str): The secret key for authenticating to FaunaDB. metadata_fields (Optional[Sequence[str]]): Optional list of field names to include in metadata. """ def __init__( self, query: str, page_content_field: str, secret: str, metadata_fields: Optional[Sequence[str]] = None, ): self.query = query self.page_content_field = page_content_field self.secret = secret self.metadata_fields = metadata_fields [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: try: from fauna import Page, fql from fauna.client import Client from fauna.encoding import QuerySuccess except ImportError: raise ImportError( "Could not import fauna python package. " "Please install it with `pip install fauna`." ) # Create Fauna Client client = Client(secret=self.secret) # Run FQL Query response: QuerySuccess = client.query(fql(self.query)) page: Page = response.data for result in page: if result is not None: document_dict = dict(result.items()) page_content = "" for key, value in document_dict.items():
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/fauna.html
a24414fe5b21-1
page_content = "" for key, value in document_dict.items(): if key == self.page_content_field: page_content = value document: Document = Document( page_content=page_content, metadata={"id": result.id, "ts": result.ts}, ) yield document if page.after is not None: yield Document( page_content="Next Page Exists", metadata={"after": page.after}, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/fauna.html
a63f55bcb4d0-0
Source code for langchain.document_loaders.twitter """Twitter document loader.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import tweepy from tweepy import OAuth2BearerHandler, OAuthHandler def _dependable_tweepy_import() -> tweepy: try: import tweepy except ImportError: raise ImportError( "tweepy package not found, please install it with `pip install tweepy`" ) return tweepy [docs]class TwitterTweetLoader(BaseLoader): """Twitter tweets loader. Read tweets of user twitter handle. First you need to go to `https://developer.twitter.com/en/docs/twitter-api /getting-started/getting-access-to-the-twitter-api` to get your token. And create a v2 version of the app. """ def __init__( self, auth_handler: Union[OAuthHandler, OAuth2BearerHandler], twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ): self.auth = auth_handler self.twitter_users = twitter_users self.number_tweets = number_tweets [docs] def load(self) -> List[Document]: """Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self.number_tweets) user = api.get_user(screen_name=username)
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/twitter.html
a63f55bcb4d0-1
user = api.get_user(screen_name=username) docs = self._format_tweets(tweets, user) results.extend(docs) return results def _format_tweets( self, tweets: List[Dict[str, Any]], user_info: dict ) -> Iterable[Document]: """Format tweets into a string.""" for tweet in tweets: metadata = { "created_at": tweet["created_at"], "user_info": user_info, } yield Document( page_content=tweet["text"], metadata=metadata, ) [docs] @classmethod def from_bearer_token( cls, oauth2_bearer_token: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from OAuth2 bearer token.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, ) [docs] @classmethod def from_secrets( cls, access_token: str, access_token_secret: str, consumer_key: str, consumer_secret: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from access tokens and secrets.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuthHandler( access_token=access_token, access_token_secret=access_token_secret,
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/twitter.html
a63f55bcb4d0-2
access_token=access_token, access_token_secret=access_token_secret, consumer_key=consumer_key, consumer_secret=consumer_secret, ) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/twitter.html
d586f90851bd-0
Source code for langchain.document_loaders.pyspark_dataframe """Load from a Spark Dataframe object""" import itertools import logging import sys from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__file__) if TYPE_CHECKING: from pyspark.sql import SparkSession [docs]class PySparkDataFrameLoader(BaseLoader): """Load PySpark DataFrames""" def __init__( self, spark_session: Optional["SparkSession"] = None, df: Optional[Any] = None, page_content_column: str = "text", fraction_of_memory: float = 0.1, ): """Initialize with a Spark DataFrame object.""" try: from pyspark.sql import DataFrame, SparkSession except ImportError: raise ImportError( "pyspark is not installed. " "Please install it with `pip install pyspark`" ) self.spark = ( spark_session if spark_session else SparkSession.builder.getOrCreate() ) if not isinstance(df, DataFrame): raise ValueError( f"Expected data_frame to be a PySpark DataFrame, got {type(df)}" ) self.df = df self.page_content_column = page_content_column self.fraction_of_memory = fraction_of_memory self.num_rows, self.max_num_rows = self.get_num_rows() self.rdd_df = self.df.rdd.map(list) self.column_names = self.df.columns [docs] def get_num_rows(self) -> Tuple[int, int]: """Gets the amount of "feasible" rows for the DataFrame""" try:
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pyspark_dataframe.html
d586f90851bd-1
"""Gets the amount of "feasible" rows for the DataFrame""" try: import psutil except ImportError as e: raise ImportError( "psutil not installed. Please install it with `pip install psutil`." ) from e row = self.df.limit(1).collect()[0] estimated_row_size = sys.getsizeof(row) mem_info = psutil.virtual_memory() available_memory = mem_info.available max_num_rows = int( (available_memory / estimated_row_size) * self.fraction_of_memory ) return min(max_num_rows, self.df.count()), max_num_rows [docs] def lazy_load(self) -> Iterator[Document]: """A lazy loader for document content.""" for row in self.rdd_df.toLocalIterator(): metadata = {self.column_names[i]: row[i] for i in range(len(row))} text = metadata[self.page_content_column] metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load from the dataframe.""" if self.df.count() > self.max_num_rows: logger.warning( f"The number of DataFrame rows is {self.df.count()}, " f"but we will only include the amount " f"of rows that can reasonably fit in memory: {self.num_rows}." ) lazy_load_iterator = self.lazy_load() return list(itertools.islice(lazy_load_iterator, self.num_rows)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 16, 2023.
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/pyspark_dataframe.html