id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 59
127
|
---|---|---|
21d345307eec-3
|
"""POST to the URL and return the text."""
return self.requests.post(url, data, **kwargs).text
[docs] def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PATCH the URL and return the text."""
return self.requests.patch(url, data, **kwargs).text
[docs] def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PUT the URL and return the text."""
return self.requests.put(url, data, **kwargs).text
[docs] def delete(self, url: str, **kwargs: Any) -> str:
"""DELETE the URL and return the text."""
return self.requests.delete(url, **kwargs).text
[docs] async def aget(self, url: str, **kwargs: Any) -> str:
"""GET the URL and return the text asynchronously."""
async with self.requests.aget(url, **kwargs) as response:
return await response.text()
[docs] async def apost(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""POST to the URL and return the text asynchronously."""
async with self.requests.apost(url, **kwargs) as response:
return await response.text()
[docs] async def apatch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
"""PATCH the URL and return the text asynchronously."""
async with self.requests.apatch(url, **kwargs) as response:
return await response.text()
[docs] async def aput(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/requests.html
|
21d345307eec-4
|
"""PUT the URL and return the text asynchronously."""
async with self.requests.aput(url, **kwargs) as response:
return await response.text()
[docs] async def adelete(self, url: str, **kwargs: Any) -> str:
"""DELETE the URL and return the text asynchronously."""
async with self.requests.adelete(url, **kwargs) as response:
return await response.text()
# For backwards compatibility
RequestsWrapper = TextRequestsWrapper
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/requests.html
|
f71fb3f033d5-0
|
Source code for langchain.document_transformers
"""Transform documents"""
from typing import Any, Callable, List, Sequence
import numpy as np
from pydantic import BaseModel, Field
from langchain.embeddings.base import Embeddings
from langchain.math_utils import cosine_similarity
from langchain.schema import BaseDocumentTransformer, Document
class _DocumentWithState(Document):
"""Wrapper for a document that includes arbitrary state."""
state: dict = Field(default_factory=dict)
"""State associated with the document."""
def to_document(self) -> Document:
"""Convert the DocumentWithState to a Document."""
return Document(page_content=self.page_content, metadata=self.metadata)
@classmethod
def from_document(cls, doc: Document) -> "_DocumentWithState":
"""Create a DocumentWithState from a Document."""
if isinstance(doc, cls):
return doc
return cls(page_content=doc.page_content, metadata=doc.metadata)
[docs]def get_stateful_documents(
documents: Sequence[Document],
) -> Sequence[_DocumentWithState]:
return [_DocumentWithState.from_document(doc) for doc in documents]
def _filter_similar_embeddings(
embedded_documents: List[List[float]], similarity_fn: Callable, threshold: float
) -> List[int]:
"""Filter redundant documents based on the similarity of their embeddings."""
similarity = np.tril(similarity_fn(embedded_documents, embedded_documents), k=-1)
redundant = np.where(similarity > threshold)
redundant_stacked = np.column_stack(redundant)
redundant_sorted = np.argsort(similarity[redundant])[::-1]
included_idxs = set(range(len(embedded_documents)))
for first_idx, second_idx in redundant_stacked[redundant_sorted]:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_transformers.html
|
f71fb3f033d5-1
|
for first_idx, second_idx in redundant_stacked[redundant_sorted]:
if first_idx in included_idxs and second_idx in included_idxs:
# Default to dropping the second document of any highly similar pair.
included_idxs.remove(second_idx)
return list(sorted(included_idxs))
def _get_embeddings_from_stateful_docs(
embeddings: Embeddings, documents: Sequence[_DocumentWithState]
) -> List[List[float]]:
if len(documents) and "embedded_doc" in documents[0].state:
embedded_documents = [doc.state["embedded_doc"] for doc in documents]
else:
embedded_documents = embeddings.embed_documents(
[d.page_content for d in documents]
)
for doc, embedding in zip(documents, embedded_documents):
doc.state["embedded_doc"] = embedding
return embedded_documents
[docs]class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):
"""Filter that drops redundant documents by comparing their embeddings."""
embeddings: Embeddings
"""Embeddings to use for embedding document contents."""
similarity_fn: Callable = cosine_similarity
"""Similarity function for comparing documents. Function expected to take as input
two matrices (List[List[float]]) and return a matrix of scores where higher values
indicate greater similarity."""
similarity_threshold: float = 0.95
"""Threshold for determining when two documents are similar enough
to be considered redundant."""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Filter down documents."""
stateful_documents = get_stateful_documents(documents)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_transformers.html
|
f71fb3f033d5-2
|
"""Filter down documents."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(
self.embeddings, stateful_documents
)
included_idxs = _filter_similar_embeddings(
embedded_documents, self.similarity_fn, self.similarity_threshold
)
return [stateful_documents[i] for i in sorted(included_idxs)]
[docs] async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_transformers.html
|
cf6e3609e60e-0
|
Source code for langchain.text_splitter
"""Functionality for splitting text."""
from __future__ import annotations
import copy
import logging
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
TypedDict,
TypeVar,
Union,
cast,
)
from langchain.docstore.document import Document
from langchain.schema import BaseDocumentTransformer
logger = logging.getLogger(__name__)
TS = TypeVar("TS", bound="TextSplitter")
def _split_text_with_regex(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)]
if len(_splits) % 2 == 0:
splits += _splits[-1:]
splits = [_splits[0]] + splits
else:
splits = text.split(separator)
else:
splits = list(text)
return [s for s in splits if s != ""]
[docs]class TextSplitter(BaseDocumentTransformer, ABC):
"""Interface for splitting text into chunks."""
def __init__(
self,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-1
|
"""Interface for splitting text into chunks."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
length_function: Callable[[str], int] = len,
keep_separator: bool = False,
add_start_index: bool = False,
) -> None:
"""Create a new TextSplitter.
Args:
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
length_function: Function that measures the length of given chunks
keep_separator: Whether or not to keep the separator in the chunks
add_start_index: If `True`, includes chunk's start index in metadata
"""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._length_function = length_function
self._keep_separator = keep_separator
self._add_start_index = add_start_index
[docs] @abstractmethod
def split_text(self, text: str) -> List[str]:
"""Split text into multiple components."""
[docs] def create_documents(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = -1
for chunk in self.split_text(text):
metadata = copy.deepcopy(_metadatas[i])
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-2
|
metadata = copy.deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata["start_index"] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
[docs] def split_documents(self, documents: Iterable[Document]) -> List[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas)
def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:
text = separator.join(docs)
text = text.strip()
if text == "":
return None
else:
return text
def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]:
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
):
if total > self._chunk_size:
logger.warning(
f"Created a chunk of size {total}, "
f"which is longer than the specified {self._chunk_size}"
)
if len(current_doc) > 0:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-3
|
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
# Keep on popping if:
# - we have a larger chunk than in the chunk overlap
# - or if we still have any chunks and the length is long
while total > self._chunk_overlap or (
total + _len + (separator_len if len(current_doc) > 0 else 0)
> self._chunk_size
and total > 0
):
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0
)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
[docs] @classmethod
def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter:
"""Text splitter that uses HuggingFace tokenizer to count length."""
try:
from transformers import PreTrainedTokenizerBase
if not isinstance(tokenizer, PreTrainedTokenizerBase):
raise ValueError(
"Tokenizer received was not an instance of PreTrainedTokenizerBase"
)
def _huggingface_tokenizer_length(text: str) -> int:
return len(tokenizer.encode(text))
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-4
|
"Please install it with `pip install transformers`."
)
return cls(length_function=_huggingface_tokenizer_length, **kwargs)
[docs] @classmethod
def from_tiktoken_encoder(
cls: Type[TS],
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> TS:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate max_tokens_for_prompt. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) -> int:
return len(
enc.encode(
text,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
)
)
if issubclass(cls, TokenTextSplitter):
extra_kwargs = {
"encoding_name": encoding_name,
"model_name": model_name,
"allowed_special": allowed_special,
"disallowed_special": disallowed_special,
}
kwargs = {**kwargs, **extra_kwargs}
return cls(length_function=_tiktoken_encoder, **kwargs)
[docs] def transform_documents(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-5
|
[docs] def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(list(documents))
[docs] async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Asynchronously transform a sequence of documents by splitting them."""
raise NotImplementedError
[docs]class CharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = _split_text_with_regex(text, self._separator, self._keep_separator)
_separator = "" if self._keep_separator else self._separator
return self._merge_splits(splits, _separator)
[docs]class LineType(TypedDict):
metadata: Dict[str, str]
content: str
[docs]class HeaderType(TypedDict):
level: int
name: str
data: str
[docs]class MarkdownHeaderTextSplitter:
"""Implementation of splitting markdown files based on specified headers."""
def __init__(
self, headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False
):
"""Create a new MarkdownHeaderTextSplitter.
Args:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-6
|
):
"""Create a new MarkdownHeaderTextSplitter.
Args:
headers_to_split_on: Headers we want to track
return_each_line: Return each line w/ associated headers
"""
# Output line-by-line or aggregated into chunks w/ common headers
self.return_each_line = return_each_line
# Given the headers we want to split on,
# (e.g., "#, ##, etc") order by length
self.headers_to_split_on = sorted(
headers_to_split_on, key=lambda split: len(split[0]), reverse=True
)
[docs] def aggregate_lines_to_chunks(self, lines: List[LineType]) -> List[LineType]:
"""Combine lines with common metadata into chunks
Args:
lines: Line of text / associated header metadata
"""
aggregated_chunks: List[LineType] = []
for line in lines:
if (
aggregated_chunks
and aggregated_chunks[-1]["metadata"] == line["metadata"]
):
# If the last line in the aggregated list
# has the same metadata as the current line,
# append the current content to the last lines's content
aggregated_chunks[-1]["content"] += " \n" + line["content"]
else:
# Otherwise, append the current line to the aggregated list
aggregated_chunks.append(line)
return aggregated_chunks
[docs] def split_text(self, text: str) -> List[LineType]:
"""Split markdown file
Args:
text: Markdown file"""
# Split the input text by newline character ("\n").
lines = text.split("\n")
# Final output
lines_with_metadata: List[LineType] = []
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-7
|
# Final output
lines_with_metadata: List[LineType] = []
# Content and metadata of the chunk currently being processed
current_content: List[str] = []
current_metadata: Dict[str, str] = {}
# Keep track of the nested header structure
# header_stack: List[Dict[str, Union[int, str]]] = []
header_stack: List[HeaderType] = []
initial_metadata: Dict[str, str] = {}
for line in lines:
stripped_line = line.strip()
# Check each line against each of the header types (e.g., #, ##)
for sep, name in self.headers_to_split_on:
# Check if line starts with a header that we intend to split on
if stripped_line.startswith(sep) and (
# Header with no text OR header is followed by space
# Both are valid conditions that sep is being used a header
len(stripped_line) == len(sep)
or stripped_line[len(sep)] == " "
):
# Ensure we are tracking the header as metadata
if name is not None:
# Get the current header level
current_header_level = sep.count("#")
# Pop out headers of lower or same level from the stack
while (
header_stack
and header_stack[-1]["level"] >= current_header_level
):
# We have encountered a new header
# at the same or higher level
popped_header = header_stack.pop()
# Clear the metadata for the
# popped header in initial_metadata
if popped_header["name"] in initial_metadata:
initial_metadata.pop(popped_header["name"])
# Push the current header to the stack
header: HeaderType = {
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-8
|
# Push the current header to the stack
header: HeaderType = {
"level": current_header_level,
"name": name,
"data": stripped_line[len(sep) :].strip(),
}
header_stack.append(header)
# Update initial_metadata with the current header
initial_metadata[name] = header["data"]
# Add the previous line to the lines_with_metadata
# only if current_content is not empty
if current_content:
lines_with_metadata.append(
{
"content": "\n".join(current_content),
"metadata": current_metadata.copy(),
}
)
current_content.clear()
break
else:
if stripped_line:
current_content.append(stripped_line)
elif current_content:
lines_with_metadata.append(
{
"content": "\n".join(current_content),
"metadata": current_metadata.copy(),
}
)
current_content.clear()
current_metadata = initial_metadata.copy()
if current_content:
lines_with_metadata.append(
{"content": "\n".join(current_content), "metadata": current_metadata}
)
# lines_with_metadata has each line with associated header metadata
# aggregate these into chunks based on common metadata
if not self.return_each_line:
return self.aggregate_lines_to_chunks(lines_with_metadata)
else:
return lines_with_metadata
# should be in newer Python versions (3.10+)
# @dataclass(frozen=True, kw_only=True, slots=True)
[docs]@dataclass(frozen=True)
class Tokenizer:
chunk_overlap: int
tokens_per_chunk: int
decode: Callable[[list[int]], str]
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-9
|
tokens_per_chunk: int
decode: Callable[[list[int]], str]
encode: Callable[[str], List[int]]
[docs]def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]:
"""Split incoming text and return chunks."""
splits: List[str] = []
input_ids = tokenizer.encode(text)
start_idx = 0
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
while start_idx < len(input_ids):
splits.append(tokenizer.decode(chunk_ids))
start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap
cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids))
chunk_ids = input_ids[start_idx:cur_idx]
return splits
[docs]class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to for TokenTextSplitter. "
"Please install it with `pip install tiktoken`."
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-10
|
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
self._tokenizer = enc
self._allowed_special = allowed_special
self._disallowed_special = disallowed_special
[docs] def split_text(self, text: str) -> List[str]:
def _encode(_text: str) -> List[int]:
return self._tokenizer.encode(
_text,
allowed_special=self._allowed_special,
disallowed_special=self._disallowed_special,
)
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self._chunk_size,
decode=self._tokenizer.decode,
encode=_encode,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
[docs]class SentenceTransformersTokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
chunk_overlap: int = 50,
model_name: str = "sentence-transformers/all-mpnet-base-v2",
tokens_per_chunk: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs, chunk_overlap=chunk_overlap)
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError(
"Could not import sentence_transformer python package. "
"This is needed in order to for SentenceTransformersTokenTextSplitter. "
"Please install it with `pip install sentence-transformers`."
)
self.model_name = model_name
self._model = SentenceTransformer(self.model_name)
self.tokenizer = self._model.tokenizer
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-11
|
self.tokenizer = self._model.tokenizer
self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk)
def _initialize_chunk_configuration(
self, *, tokens_per_chunk: Optional[int]
) -> None:
self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length)
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}'"
f" is: {self.maximum_tokens_per_chunk}."
f" Argument tokens_per_chunk={self.tokens_per_chunk}"
f" > maximum token limit."
)
[docs] def split_text(self, text: str) -> List[str]:
def encode_strip_start_and_stop_token_ids(text: str) -> List[int]:
return self._encode(text)[1:-1]
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self.tokens_per_chunk,
decode=self.tokenizer.decode,
encode=encode_strip_start_and_stop_token_ids,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
[docs] def count_tokens(self, *, text: str) -> int:
return len(self._encode(text))
_max_length_equal_32_bit_integer = 2**32
def _encode(self, text: str) -> List[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(
text,
max_length=self._max_length_equal_32_bit_integer,
truncation="do_not_truncate",
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-12
|
truncation="do_not_truncate",
)
return token_ids_with_start_and_end_token_ids
[docs]class Language(str, Enum):
CPP = "cpp"
GO = "go"
JAVA = "java"
JS = "js"
PHP = "php"
PROTO = "proto"
PYTHON = "python"
RST = "rst"
RUBY = "ruby"
RUST = "rust"
SCALA = "scala"
SWIFT = "swift"
MARKDOWN = "markdown"
LATEX = "latex"
HTML = "html"
SOL = "sol"
[docs]class RecursiveCharacterTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
if _s == "":
separator = _s
break
if re.search(_s, text):
separator = _s
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-13
|
break
if re.search(_s, text):
separator = _s
new_separators = separators[i + 1 :]
break
splits = _split_text_with_regex(text, separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
[docs] def split_text(self, text: str) -> List[str]:
return self._split_text(text, self._separators)
[docs] @classmethod
def from_language(
cls, language: Language, **kwargs: Any
) -> RecursiveCharacterTextSplitter:
separators = cls.get_separators_for_language(language)
return cls(separators=separators, **kwargs)
[docs] @staticmethod
def get_separators_for_language(language: Language) -> List[str]:
if language == Language.CPP:
return [
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nvoid ",
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-14
|
"\nclass ",
# Split along function definitions
"\nvoid ",
"\nint ",
"\nfloat ",
"\ndouble ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.GO:
return [
# Split along function definitions
"\nfunc ",
"\nvar ",
"\nconst ",
"\ntype ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JAVA:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JS:
return [
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
# Split along control flow statements
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-15
|
"\nvar ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PHP:
return [
# Split along function definitions
"\nfunction ",
# Split along class definitions
"\nclass ",
# Split along control flow statements
"\nif ",
"\nforeach ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PROTO:
return [
# Split along message definitions
"\nmessage ",
# Split along service definitions
"\nservice ",
# Split along enum definitions
"\nenum ",
# Split along option definitions
"\noption ",
# Split along import statements
"\nimport ",
# Split along syntax declarations
"\nsyntax ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PYTHON:
return [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-16
|
"\n",
" ",
"",
]
elif language == Language.RST:
return [
# Split along section titles
"\n=+\n",
"\n-+\n",
"\n\*+\n",
# Split along directive markers
"\n\n.. *\n\n",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUBY:
return [
# Split along method definitions
"\ndef ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nunless ",
"\nwhile ",
"\nfor ",
"\ndo ",
"\nbegin ",
"\nrescue ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUST:
return [
# Split along function definitions
"\nfn ",
"\nconst ",
"\nlet ",
# Split along control flow statements
"\nif ",
"\nwhile ",
"\nfor ",
"\nloop ",
"\nmatch ",
"\nconst ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SCALA:
return [
# Split along class definitions
"\nclass ",
"\nobject ",
# Split along method definitions
"\ndef ",
"\nval ",
"\nvar ",
# Split along control flow statements
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-17
|
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nmatch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SWIFT:
return [
# Split along function definitions
"\nfunc ",
# Split along class definitions
"\nclass ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.MARKDOWN:
return [
# First, try to split along Markdown headings (starting with level 2)
"\n#{1,6} ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n",
# Horizontal lines
"\n\*\*\*+\n",
"\n---+\n",
"\n___+\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
elif language == Language.LATEX:
return [
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-18
|
"",
]
elif language == Language.LATEX:
return [
# First, try to split along Latex sections
"\n\\\chapter{",
"\n\\\section{",
"\n\\\subsection{",
"\n\\\subsubsection{",
# Now split by environments
"\n\\\begin{enumerate}",
"\n\\\begin{itemize}",
"\n\\\begin{description}",
"\n\\\begin{list}",
"\n\\\begin{quote}",
"\n\\\begin{quotation}",
"\n\\\begin{verse}",
"\n\\\begin{verbatim}",
# Now split by math environments
"\n\\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
elif language == Language.HTML:
return [
# First, try to split along HTML tags
"<body",
"<div",
"<p",
"<br",
"<li",
"<h1",
"<h2",
"<h3",
"<h4",
"<h5",
"<h6",
"<span",
"<table",
"<tr",
"<td",
"<th",
"<ul",
"<ol",
"<header",
"<footer",
"<nav",
# Head
"<head",
"<style",
"<script",
"<meta",
"<title",
"",
]
elif language == Language.SOL:
return [
# Split along compiler informations definitions
"\npragma ",
"\nusing ",
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-19
|
# Split along compiler informations definitions
"\npragma ",
"\nusing ",
# Split along contract definitions
"\ncontract ",
"\ninterface ",
"\nlibrary ",
# Split along method definitions
"\nconstructor ",
"\ntype ",
"\nfunction ",
"\nevent ",
"\nmodifier ",
"\nerror ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo while ",
"\nassembly ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
else:
raise ValueError(
f"Language {language} is not supported! "
f"Please choose from {list(Language)}"
)
[docs]class NLTKTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using NLTK."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-20
|
splits = self._tokenizer(text)
return self._merge_splits(splits, self._separator)
[docs]class SpacyTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at sentences using Spacy."""
def __init__(
self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any
) -> None:
"""Initialize the spacy text splitter."""
super().__init__(**kwargs)
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
self._tokenizer = spacy.load(pipeline)
self._separator = separator
[docs] def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
splits = (str(s) for s in self._tokenizer(text).sents)
return self._merge_splits(splits, self._separator)
# For backwards compatibility
[docs]class PythonCodeTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a PythonCodeTextSplitter."""
separators = self.get_separators_for_language(Language.PYTHON)
super().__init__(separators=separators, **kwargs)
[docs]class MarkdownTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Markdown-formatted headings."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a MarkdownTextSplitter."""
separators = self.get_separators_for_language(Language.MARKDOWN)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
cf6e3609e60e-21
|
separators = self.get_separators_for_language(Language.MARKDOWN)
super().__init__(separators=separators, **kwargs)
[docs]class LatexTextSplitter(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Latex-formatted layout elements."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a LatexTextSplitter."""
separators = self.get_separators_for_language(Language.LATEX)
super().__init__(separators=separators, **kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/text_splitter.html
|
ec2bbbd9d27d-0
|
Source code for langchain.output_parsers.retry
from __future__ import annotations
from typing import TypeVar
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import (
BaseOutputParser,
OutputParserException,
PromptValue,
)
NAIVE_COMPLETION_RETRY = """Prompt:
{prompt}
Completion:
{completion}
Above, the Completion did not satisfy the constraints given in the Prompt.
Please try again:"""
NAIVE_COMPLETION_RETRY_WITH_ERROR = """Prompt:
{prompt}
Completion:
{completion}
Above, the Completion did not satisfy the constraints given in the Prompt.
Details: {error}
Please try again:"""
NAIVE_RETRY_PROMPT = PromptTemplate.from_template(NAIVE_COMPLETION_RETRY)
NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template(
NAIVE_COMPLETION_RETRY_WITH_ERROR
)
T = TypeVar("T")
[docs]class RetryOutputParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt and the completion to another
LLM, and telling it the completion did not satisfy criteria in the prompt.
"""
parser: BaseOutputParser[T]
retry_chain: LLMChain
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT,
) -> RetryOutputParser[T]:
chain = LLMChain(llm=llm, prompt=prompt)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/retry.html
|
ec2bbbd9d27d-1
|
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain)
[docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
try:
parsed_completion = self.parser.parse(completion)
except OutputParserException:
new_completion = self.retry_chain.run(
prompt=prompt_value.to_string(), completion=completion
)
parsed_completion = self.parser.parse(new_completion)
return parsed_completion
[docs] def parse(self, completion: str) -> T:
raise NotImplementedError(
"This OutputParser can only be called by the `parse_with_prompt` method."
)
[docs] def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "retry"
[docs]class RetryWithErrorOutputParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors.
Does this by passing the original prompt, the completion, AND the error
that was raised to another language model and telling it that the completion
did not work, and raised the given error. Differs from RetryOutputParser
in that this implementation provides the error that was raised back to the
LLM, which in theory should give it more information on how to fix it.
"""
parser: BaseOutputParser[T]
retry_chain: LLMChain
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT,
) -> RetryWithErrorOutputParser[T]:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/retry.html
|
ec2bbbd9d27d-2
|
) -> RetryWithErrorOutputParser[T]:
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain)
[docs] def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
try:
parsed_completion = self.parser.parse(completion)
except OutputParserException as e:
new_completion = self.retry_chain.run(
prompt=prompt_value.to_string(), completion=completion, error=repr(e)
)
parsed_completion = self.parser.parse(new_completion)
return parsed_completion
[docs] def parse(self, completion: str) -> T:
raise NotImplementedError(
"This OutputParser can only be called by the `parse_with_prompt` method."
)
[docs] def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "retry_with_error"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/retry.html
|
c7b3c0b695d2-0
|
Source code for langchain.output_parsers.list
from __future__ import annotations
from abc import abstractmethod
from typing import List
from langchain.schema import BaseOutputParser
[docs]class ListOutputParser(BaseOutputParser):
"""Class to parse the output of an LLM call to a list."""
@property
def _type(self) -> str:
return "list"
[docs] @abstractmethod
def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
[docs]class CommaSeparatedListOutputParser(ListOutputParser):
"""Parse out comma separated lists."""
[docs] def get_format_instructions(self) -> str:
return (
"Your response should be a list of comma separated values, "
"eg: `foo, bar, baz`"
)
[docs] def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
return text.strip().split(", ")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/list.html
|
cd26e38429ca-0
|
Source code for langchain.output_parsers.datetime
import random
from datetime import datetime, timedelta
from typing import List
from langchain.schema import BaseOutputParser, OutputParserException
from langchain.utils import comma_list
def _generate_random_datetime_strings(
pattern: str,
n: int = 3,
start_date: datetime = datetime(1, 1, 1),
end_date: datetime = datetime.now() + timedelta(days=3650),
) -> List[str]:
"""
Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
"""
examples = []
delta = end_date - start_date
for i in range(n):
random_delta = random.uniform(0, delta.total_seconds())
dt = start_date + timedelta(seconds=random_delta)
date_string = dt.strftime(pattern)
examples.append(date_string)
return examples
[docs]class DatetimeOutputParser(BaseOutputParser[datetime]):
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
[docs] def get_format_instructions(self) -> str:
examples = comma_list(_generate_random_datetime_strings(self.format))
return f"""Write a datetime string that matches the
following pattern: "{self.format}". Examples: {examples}"""
[docs] def parse(self, response: str) -> datetime:
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/datetime.html
|
cd26e38429ca-1
|
) from e
@property
def _type(self) -> str:
return "datetime"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/datetime.html
|
dee55c0acbbe-0
|
Source code for langchain.output_parsers.rail_parser
from __future__ import annotations
from typing import Any, Dict
from langchain.schema import BaseOutputParser
[docs]class GuardrailsOutputParser(BaseOutputParser):
guard: Any
@property
def _type(self) -> str:
return "guardrails"
[docs] @classmethod
def from_rail(cls, rail_file: str, num_reasks: int = 1) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(guard=Guard.from_rail(rail_file, num_reasks=num_reasks))
[docs] @classmethod
def from_rail_string(
cls, rail_str: str, num_reasks: int = 1
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks))
[docs] def get_format_instructions(self) -> str:
return self.guard.raw_prompt.format_instructions
[docs] def parse(self, text: str) -> Dict:
return self.guard.parse(text)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/rail_parser.html
|
653a191b3ee3-0
|
Source code for langchain.output_parsers.regex
from __future__ import annotations
import re
from typing import Dict, List, Optional
from langchain.schema import BaseOutputParser
[docs]class RegexParser(BaseOutputParser):
"""Class to parse the output into a dictionary."""
regex: str
output_keys: List[str]
default_output_key: Optional[str] = None
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_parser"
[docs] def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
match = re.search(self.regex, text)
if match:
return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)}
else:
if self.default_output_key is None:
raise ValueError(f"Could not parse output: {text}")
else:
return {
key: text if key == self.default_output_key else ""
for key in self.output_keys
}
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/regex.html
|
4f6e2cdabf6e-0
|
Source code for langchain.output_parsers.structured
from __future__ import annotations
from typing import Any, List
from pydantic import BaseModel
from langchain.output_parsers.format_instructions import STRUCTURED_FORMAT_INSTRUCTIONS
from langchain.output_parsers.json import parse_and_check_json_markdown
from langchain.schema import BaseOutputParser
line_template = '\t"{name}": {type} // {description}'
[docs]class ResponseSchema(BaseModel):
name: str
description: str
type: str = "string"
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type=schema.type
)
[docs]class StructuredOutputParser(BaseOutputParser):
response_schemas: List[ResponseSchema]
[docs] @classmethod
def from_response_schemas(
cls, response_schemas: List[ResponseSchema]
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
[docs] def get_format_instructions(self) -> str:
schema_str = "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
[docs] def parse(self, text: str) -> Any:
expected_keys = [rs.name for rs in self.response_schemas]
return parse_and_check_json_markdown(text, expected_keys)
@property
def _type(self) -> str:
return "structured"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/structured.html
|
d83d2f7feff4-0
|
Source code for langchain.output_parsers.regex_dict
from __future__ import annotations
import re
from typing import Dict, Optional
from langchain.schema import BaseOutputParser
[docs]class RegexDictParser(BaseOutputParser):
"""Class to parse the output into a dictionary."""
regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private:
output_key_to_format: Dict[str, str]
no_update_value: Optional[str] = None
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_dict_parser"
[docs] def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
result = {}
for output_key, expected_format in self.output_key_to_format.items():
specific_regex = self.regex_pattern.format(re.escape(expected_format))
matches = re.findall(specific_regex, text)
if not matches:
raise ValueError(
f"No match found for output key: {output_key} with expected format \
{expected_format} on text {text}"
)
elif len(matches) > 1:
raise ValueError(
f"Multiple matches found for output key: {output_key} with \
expected format {expected_format} on text {text}"
)
elif (
self.no_update_value is not None and matches[0] == self.no_update_value
):
continue
else:
result[output_key] = matches[0]
return result
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/regex_dict.html
|
c3e3f9bbab70-0
|
Source code for langchain.output_parsers.fix
from __future__ import annotations
from typing import TypeVar
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseOutputParser, OutputParserException
T = TypeVar("T")
[docs]class OutputFixingParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors."""
parser: BaseOutputParser[T]
retry_chain: LLMChain
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_FIX_PROMPT,
) -> OutputFixingParser[T]:
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain)
[docs] def parse(self, completion: str) -> T:
try:
parsed_completion = self.parser.parse(completion)
except OutputParserException as e:
new_completion = self.retry_chain.run(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
parsed_completion = self.parser.parse(new_completion)
return parsed_completion
[docs] def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "output_fixing"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/fix.html
|
f0540bdd0577-0
|
Source code for langchain.output_parsers.pydantic
import json
import re
from typing import Type, TypeVar
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS
from langchain.schema import BaseOutputParser, OutputParserException
T = TypeVar("T", bound=BaseModel)
[docs]class PydanticOutputParser(BaseOutputParser[T]):
pydantic_object: Type[T]
[docs] def parse(self, text: str) -> T:
try:
# Greedy search for 1st json candidate.
match = re.search(
r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL
)
json_str = ""
if match:
json_str = match.group()
json_object = json.loads(json_str, strict=False)
return self.pydantic_object.parse_obj(json_object)
except (json.JSONDecodeError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise OutputParserException(msg)
[docs] def get_format_instructions(self) -> str:
schema = self.pydantic_object.schema()
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema)
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/pydantic.html
|
f0540bdd0577-1
|
@property
def _type(self) -> str:
return "pydantic"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/output_parsers/pydantic.html
|
369bc24c53df-0
|
Source code for langchain.document_loaders.url
"""Loader that uses unstructured to load HTML files."""
import logging
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class UnstructuredURLLoader(BaseLoader):
"""Loader that uses unstructured to load HTML files."""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
mode: str = "single",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
from unstructured.__version__ import __version__ as __unstructured_version__
self.__version = __unstructured_version__
except ImportError:
raise ValueError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self._validate_mode(mode)
self.mode = mode
headers = unstructured_kwargs.pop("headers", {})
if len(headers.keys()) != 0:
warn_about_headers = False
if self.__is_non_html_available():
warn_about_headers = not self.__is_headers_available_for_non_html()
else:
warn_about_headers = not self.__is_headers_available_for_html()
if warn_about_headers:
logger.warning(
"You are using an old version of unstructured. "
"The headers parameter is ignored"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headers = headers
self.unstructured_kwargs = unstructured_kwargs
def _validate_mode(self, mode: str) -> None:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url.html
|
369bc24c53df-1
|
def _validate_mode(self, mode: str) -> None:
_valid_modes = {"single", "elements"}
if mode not in _valid_modes:
raise ValueError(
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
)
def __is_headers_available_for_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 7)
def __is_headers_available_for_non_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 13)
def __is_non_html_available(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 12)
[docs] def load(self) -> List[Document]:
"""Load file."""
from unstructured.partition.auto import partition
from unstructured.partition.html import partition_html
docs: List[Document] = list()
for url in self.urls:
try:
if self.__is_non_html_available():
if self.__is_headers_available_for_non_html():
elements = partition(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition(url=url, **self.unstructured_kwargs)
else:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url.html
|
369bc24c53df-2
|
elements = partition(url=url, **self.unstructured_kwargs)
else:
if self.__is_headers_available_for_html():
elements = partition_html(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition_html(url=url, **self.unstructured_kwargs)
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exeption: {e}")
continue
else:
raise e
if self.mode == "single":
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
elif self.mode == "elements":
for element in elements:
metadata = element.metadata.to_dict()
metadata["category"] = element.category
docs.append(Document(page_content=str(element), metadata=metadata))
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url.html
|
cdf1588ca23c-0
|
Source code for langchain.document_loaders.hn
"""Loader that loads HN."""
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class HNLoader(WebBaseLoader):
"""Load Hacker News data from either main page results or the comments page."""
[docs] def load(self) -> List[Document]:
"""Get important HN webpage information.
Components are:
- title
- content
- source url,
- time of post
- author of the post
- number of comments
- rank of the post
"""
soup_info = self.scrape()
if "item" in self.web_path:
return self.load_comments(soup_info)
else:
return self.load_results(soup_info)
[docs] def load_comments(self, soup_info: Any) -> List[Document]:
"""Load comments from a HN post."""
comments = soup_info.select("tr[class='athing comtr']")
title = soup_info.select_one("tr[id='pagespace']").get("title")
return [
Document(
page_content=comment.text.strip(),
metadata={"source": self.web_path, "title": title},
)
for comment in comments
]
[docs] def load_results(self, soup: Any) -> List[Document]:
"""Load items from an HN page."""
items = soup.select("tr[class='athing']")
documents = []
for lineItem in items:
ranking = lineItem.select_one("span[class='rank']").text
link = lineItem.find("span", {"class": "titleline"}).find("a").get("href")
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/hn.html
|
cdf1588ca23c-1
|
title = lineItem.find("span", {"class": "titleline"}).text.strip()
metadata = {
"source": self.web_path,
"title": title,
"link": link,
"ranking": ranking,
}
documents.append(
Document(
page_content=title, link=link, ranking=ranking, metadata=metadata
)
)
return documents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/hn.html
|
0aecaae5e02c-0
|
Source code for langchain.document_loaders.csv_loader
import csv
from typing import Any, Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class CSVLoader(BaseLoader):
"""Loads a CSV file into a list of documents.
Each document represents one row of the CSV file. Every row is converted into a
key/value pair and outputted to a new line in the document's page_content.
The source for each document loaded from csv is set to the value of the
`file_path` argument for all doucments by default.
You can override this by setting the `source_column` argument to the
name of a column in the CSV file.
The source of each document will then be set to the value of the column
with the name specified in `source_column`.
Output Example:
.. code-block:: txt
column1: value1
column2: value2
column3: value3
"""
def __init__(
self,
file_path: str,
source_column: Optional[str] = None,
csv_args: Optional[Dict] = None,
encoding: Optional[str] = None,
):
self.file_path = file_path
self.source_column = source_column
self.encoding = encoding
self.csv_args = csv_args or {}
[docs] def load(self) -> List[Document]:
"""Load data into document objects."""
docs = []
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/csv_loader.html
|
0aecaae5e02c-1
|
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
csv_reader = csv.DictReader(csvfile, **self.csv_args) # type: ignore
for i, row in enumerate(csv_reader):
content = "\n".join(f"{k.strip()}: {v.strip()}" for k, v in row.items())
try:
source = (
row[self.source_column]
if self.source_column is not None
else self.file_path
)
except KeyError:
raise ValueError(
f"Source column '{self.source_column}' not found in CSV file."
)
metadata = {"source": source, "row": i}
doc = Document(page_content=content, metadata=metadata)
docs.append(doc)
return docs
[docs]class UnstructuredCSVLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load CSV files."""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
validate_unstructured_version(min_unstructured_version="0.6.8")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.csv import partition_csv
return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/csv_loader.html
|
e3f5ff84cb16-0
|
Source code for langchain.document_loaders.youtube
"""Loader that loads YouTube transcript."""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from urllib.parse import parse_qs, urlparse
from pydantic import root_validator
from pydantic.dataclasses import dataclass
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"]
[docs]@dataclass
class GoogleApiClient:
"""A Generic Google Api Client.
To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google``
python package installed.
As the google api expects credentials you need to set up a google account and
register your Service. "https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
"""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
service_account_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
def __post_init__(self) -> None:
self.creds = self._load_credentials()
[docs] @root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
e3f5ff84cb16-1
|
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("credentials_path") and not values.get(
"service_account_path"
):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
creds = None
if self.service_account_path.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_path)
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
e3f5ff84cb16-2
|
token.write(creds.to_json())
return creds
ALLOWED_SCHEMAS = {"http", "https"}
ALLOWED_NETLOCK = {
"youtu.be",
"m.youtube.com",
"youtube.com",
"www.youtube.com",
"www.youtube-nocookie.com",
"vid.plus",
}
def _parse_video_id(url: str) -> Optional[str]:
"""Parse a youtube url and return the video id if valid, otherwise None."""
parsed_url = urlparse(url)
if parsed_url.scheme not in ALLOWED_SCHEMAS:
return None
if parsed_url.netloc not in ALLOWED_NETLOCK:
return None
path = parsed_url.path
if path.endswith("/watch"):
query = parsed_url.query
parsed_query = parse_qs(query)
if "v" in parsed_query:
ids = parsed_query["v"]
video_id = ids if isinstance(ids, str) else ids[0]
else:
return None
else:
path = parsed_url.path.lstrip("/")
video_id = path.split("/")[-1]
if len(video_id) != 11: # Video IDs are 11 characters long
return None
return video_id
[docs]class YoutubeLoader(BaseLoader):
"""Loader that loads Youtube transcripts."""
def __init__(
self,
video_id: str,
add_video_info: bool = False,
language: Union[str, Sequence[str]] = "en",
translation: str = "en",
continue_on_failure: bool = False,
):
"""Initialize with YouTube video ID."""
self.video_id = video_id
self.add_video_info = add_video_info
self.language = language
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
e3f5ff84cb16-3
|
self.add_video_info = add_video_info
self.language = language
if isinstance(language, str):
self.language = [language]
else:
self.language = language
self.translation = translation
self.continue_on_failure = continue_on_failure
[docs] @staticmethod
def extract_video_id(youtube_url: str) -> str:
"""Extract video id from common YT urls."""
video_id = _parse_video_id(youtube_url)
if not video_id:
raise ValueError(
f"Could not determine the video ID for the URL {youtube_url}"
)
return video_id
[docs] @classmethod
def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader:
"""Given youtube URL, load video."""
video_id = cls.extract_video_id(youtube_url)
return cls(video_id, **kwargs)
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
YouTubeTranscriptApi,
)
except ImportError:
raise ImportError(
"Could not import youtube_transcript_api python package. "
"Please install it with `pip install youtube-transcript-api`."
)
metadata = {"source": self.video_id}
if self.add_video_info:
# Get more video meta info
# Such as title, description, thumbnail url, publish_date
video_info = self._get_video_info()
metadata.update(video_info)
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
except TranscriptsDisabled:
return []
try:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
e3f5ff84cb16-4
|
except TranscriptsDisabled:
return []
try:
transcript = transcript_list.find_transcript(self.language)
except NoTranscriptFound:
en_transcript = transcript_list.find_transcript(["en"])
transcript = en_transcript.translate(self.translation)
transcript_pieces = transcript.fetch()
transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces])
return [Document(page_content=transcript, metadata=metadata)]
def _get_video_info(self) -> dict:
"""Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
"Could not import pytube python package. "
"Please install it with `pip install pytube`."
)
yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}")
video_info = {
"title": yt.title or "Unknown",
"description": yt.description or "Unknown",
"view_count": yt.views or 0,
"thumbnail_url": yt.thumbnail_url or "Unknown",
"publish_date": yt.publish_date.strftime("%Y-%m-%d %H:%M:%S")
if yt.publish_date
else "Unknown",
"length": yt.length or 0,
"author": yt.author or "Unknown",
}
return video_info
[docs]@dataclass
class GoogleApiYoutubeLoader(BaseLoader):
"""Loader that loads all Videos from a Channel
To use, you should have the ``googleapiclient,youtube_transcript_api``
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
e3f5ff84cb16-5
|
To use, you should have the ``googleapiclient,youtube_transcript_api``
python package installed.
As the service needs a google_api_client, you first have to initialize
the GoogleApiClient.
Additionally you have to either provide a channel name or a list of videoids
"https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
from langchain.document_loaders import GoogleApiYoutubeLoader
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
loader = GoogleApiYoutubeLoader(
google_api_client=google_api_client,
channel_name = "CodeAesthetic"
)
load.load()
"""
google_api_client: GoogleApiClient
channel_name: Optional[str] = None
video_ids: Optional[List[str]] = None
add_video_info: bool = True
captions_language: str = "en"
continue_on_failure: bool = False
def __post_init__(self) -> None:
self.youtube_client = self._build_youtube_client(self.google_api_client.creds)
def _build_youtube_client(self, creds: Any) -> Any:
try:
from googleapiclient.discovery import build
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
e3f5ff84cb16-6
|
"to use the Google Drive loader"
)
return build("youtube", "v3", credentials=creds)
[docs] @root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("channel_name") and not values.get("video_ids"):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _get_transcripe_for_video_id(self, video_id: str) -> str:
from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
try:
transcript = transcript_list.find_transcript([self.captions_language])
except NoTranscriptFound:
for available_transcript in transcript_list:
transcript = available_transcript.translate(self.captions_language)
continue
transcript_pieces = transcript.fetch()
return " ".join([t["text"].strip(" ") for t in transcript_pieces])
def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document:
captions = self._get_transcripe_for_video_id(video_id)
video_response = (
self.youtube_client.videos()
.list(
part="id,snippet",
id=video_id,
)
.execute()
)
return Document(
page_content=captions,
metadata=video_response.get("items")[0],
)
def _get_channel_id(self, channel_name: str) -> str:
request = self.youtube_client.search().list(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
e3f5ff84cb16-7
|
request = self.youtube_client.search().list(
part="id",
q=channel_name,
type="channel",
maxResults=1, # we only need one result since channel names are unique
)
response = request.execute()
channel_id = response["items"][0]["id"]["channelId"]
return channel_id
def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]:
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
)
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"youtube-transcript-api` "
"to use the youtube loader"
)
channel_id = self._get_channel_id(channel)
request = self.youtube_client.search().list(
part="id,snippet",
channelId=channel_id,
maxResults=50, # adjust this value to retrieve more or fewer videos
)
video_ids = []
while request is not None:
response = request.execute()
# Add each video ID to the list
for item in response["items"]:
if not item["id"].get("videoId"):
continue
meta_data = {"videoId": item["id"]["videoId"]}
if self.add_video_info:
item["snippet"].pop("thumbnails")
meta_data.update(item["snippet"])
try:
page_content = self._get_transcripe_for_video_id(
item["id"]["videoId"]
)
video_ids.append(
Document(
page_content=page_content,
metadata=meta_data,
)
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
e3f5ff84cb16-8
|
metadata=meta_data,
)
)
except (TranscriptsDisabled, NoTranscriptFound) as e:
if self.continue_on_failure:
logger.error(
"Error fetching transscript "
+ f" {item['id']['videoId']}, exception: {e}"
)
else:
raise e
pass
request = self.youtube_client.search().list_next(request, response)
return video_ids
[docs] def load(self) -> List[Document]:
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend(
[
self._get_document_for_video_id(video_id)
for video_id in self.video_ids
]
)
else:
raise ValueError("Must specify either channel_name or video_ids")
return document_list
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
|
2316c0ba8cfc-0
|
Source code for langchain.document_loaders.confluence
"""Load Data from a Confluence Space"""
import logging
from enum import Enum
from io import BytesIO
from typing import Any, Callable, Dict, List, Optional, Union
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class ContentFormat(str, Enum):
STORAGE = "body.storage"
VIEW = "body.view"
def get_content(self, page: dict) -> str:
if self == ContentFormat.STORAGE:
return page["body"]["storage"]["value"]
elif self == ContentFormat.VIEW:
return page["body"]["view"]["value"]
raise ValueError("unknown content format")
[docs]class ConfluenceLoader(BaseLoader):
"""
Load Confluence pages. Port of https://llamahub.ai/l/confluence
This currently supports username/api_key, Oauth2 login or personal access token
authentication.
Specify a list page_ids and/or space_key to load in the corresponding pages into
Document objects, if both are specified the union of both sets will be returned.
You can also specify a boolean `include_attachments` to include attachments, this
is set to False by default, if set to True all attachments will be downloaded and
ConfluenceReader will extract the text from the attachments and add it to the
Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG,
SVG, Word and Excel.
Confluence API supports difference format of page content. The storage format is the
raw XML representation for storage. The view format is the HTML representation for
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-1
|
raw XML representation for storage. The view format is the HTML representation for
viewing with macros are rendered as though it is viewed by users. You can pass
a enum `content_format` argument to `load()` to specify the content format, this is
set to `ContentFormat.STORAGE` by default.
Hint: space_key and page_id can both be found in the URL of a page in Confluence
- https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>
Example:
.. code-block:: python
from langchain.document_loaders import ConfluenceLoader
loader = ConfluenceLoader(
url="https://yoursite.atlassian.com/wiki",
username="me",
api_key="12345"
)
documents = loader.load(space_key="SPACE",limit=50)
:param url: _description_
:type url: str
:param api_key: _description_, defaults to None
:type api_key: str, optional
:param username: _description_, defaults to None
:type username: str, optional
:param oauth2: _description_, defaults to {}
:type oauth2: dict, optional
:param token: _description_, defaults to None
:type token: str, optional
:param cloud: _description_, defaults to True
:type cloud: bool, optional
:param number_of_retries: How many times to retry, defaults to 3
:type number_of_retries: Optional[int], optional
:param min_retry_seconds: defaults to 2
:type min_retry_seconds: Optional[int], optional
:param max_retry_seconds: defaults to 10
:type max_retry_seconds: Optional[int], optional
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-2
|
:type max_retry_seconds: Optional[int], optional
:param confluence_kwargs: additional kwargs to initialize confluence with
:type confluence_kwargs: dict, optional
:raises ValueError: Errors while validating input
:raises ImportError: Required dependencies not installed.
"""
def __init__(
self,
url: str,
api_key: Optional[str] = None,
username: Optional[str] = None,
oauth2: Optional[dict] = None,
token: Optional[str] = None,
cloud: Optional[bool] = True,
number_of_retries: Optional[int] = 3,
min_retry_seconds: Optional[int] = 2,
max_retry_seconds: Optional[int] = 10,
confluence_kwargs: Optional[dict] = None,
):
confluence_kwargs = confluence_kwargs or {}
errors = ConfluenceLoader.validate_init_args(
url, api_key, username, oauth2, token
)
if errors:
raise ValueError(f"Error(s) while validating input: {errors}")
self.base_url = url
self.number_of_retries = number_of_retries
self.min_retry_seconds = min_retry_seconds
self.max_retry_seconds = max_retry_seconds
try:
from atlassian import Confluence # noqa: F401
except ImportError:
raise ImportError(
"`atlassian` package not found, please run "
"`pip install atlassian-python-api`"
)
if oauth2:
self.confluence = Confluence(
url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs
)
elif token:
self.confluence = Confluence(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-3
|
)
elif token:
self.confluence = Confluence(
url=url, token=token, cloud=cloud, **confluence_kwargs
)
else:
self.confluence = Confluence(
url=url,
username=username,
password=api_key,
cloud=cloud,
**confluence_kwargs,
)
[docs] @staticmethod
def validate_init_args(
url: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
oauth2: Optional[dict] = None,
token: Optional[str] = None,
) -> Union[List, None]:
"""Validates proper combinations of init arguments"""
errors = []
if url is None:
errors.append("Must provide `base_url`")
if (api_key and not username) or (username and not api_key):
errors.append(
"If one of `api_key` or `username` is provided, "
"the other must be as well."
)
if (api_key or username) and oauth2:
errors.append(
"Cannot provide a value for `api_key` and/or "
"`username` and provide a value for `oauth2`"
)
if oauth2 and oauth2.keys() != [
"access_token",
"access_token_secret",
"consumer_key",
"key_cert",
]:
errors.append(
"You have either ommited require keys or added extra "
"keys to the oauth2 dictionary. key values should be "
"`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`"
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-4
|
)
if token and (api_key or username or oauth2):
errors.append(
"Cannot provide a value for `token` and a value for `api_key`, "
"`username` or `oauth2`"
)
if errors:
return errors
return None
[docs] def load(
self,
space_key: Optional[str] = None,
page_ids: Optional[List[str]] = None,
label: Optional[str] = None,
cql: Optional[str] = None,
include_restricted_content: bool = False,
include_archived_content: bool = False,
include_attachments: bool = False,
include_comments: bool = False,
content_format: ContentFormat = ContentFormat.STORAGE,
limit: Optional[int] = 50,
max_pages: Optional[int] = 1000,
ocr_languages: Optional[str] = None,
) -> List[Document]:
"""
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
:param include_restricted_content: defaults to False
:type include_restricted_content: bool, optional
:param include_archived_content: Whether to include archived content,
defaults to False
:type include_archived_content: bool, optional
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-5
|
defaults to False
:type include_archived_content: bool, optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param content_format: Specify content format, defaults to ContentFormat.STORAGE
:type content_format: ContentFormat
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:param ocr_languages: The languages to use for the Tesseract agent. To use a
language, you'll first need to install the appropriate
Tesseract language pack.
:type ocr_languages: str, optional
:raises ValueError: _description_
:raises ImportError: _description_
:return: _description_
:rtype: List[Document]
"""
if not space_key and not page_ids and not label and not cql:
raise ValueError(
"Must specify at least one among `space_key`, `page_ids`, "
"`label`, `cql` parameters."
)
docs = []
if space_key:
pages = self.paginate_request(
self.confluence.get_all_pages_from_space,
space=space_key,
limit=limit,
max_pages=max_pages,
status="any" if include_archived_content else "current",
expand=content_format.value,
)
docs += self.process_pages(
pages,
include_restricted_content,
include_attachments,
include_comments,
content_format,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-6
|
include_attachments,
include_comments,
content_format,
ocr_languages,
)
if label:
pages = self.paginate_request(
self.confluence.get_all_pages_by_label,
label=label,
limit=limit,
max_pages=max_pages,
)
ids_by_label = [page["id"] for page in pages]
if page_ids:
page_ids = list(set(page_ids + ids_by_label))
else:
page_ids = list(set(ids_by_label))
if cql:
pages = self.paginate_request(
self._search_content_by_cql,
cql=cql,
limit=limit,
max_pages=max_pages,
include_archived_spaces=include_archived_content,
expand=content_format.value,
)
docs += self.process_pages(
pages,
include_restricted_content,
include_attachments,
include_comments,
content_format,
ocr_languages,
)
if page_ids:
for page_id in page_ids:
get_page = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1, # type: ignore[arg-type]
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(self.confluence.get_page_by_id)
page = get_page(page_id=page_id, expand=content_format.value)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-7
|
page = get_page(page_id=page_id, expand=content_format.value)
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(
page,
include_attachments,
include_comments,
content_format,
ocr_languages,
)
docs.append(doc)
return docs
def _search_content_by_cql(
self, cql: str, include_archived_spaces: Optional[bool] = None, **kwargs: Any
) -> List[dict]:
url = "rest/api/content/search"
params: Dict[str, Any] = {"cql": cql}
params.update(kwargs)
if include_archived_spaces is not None:
params["includeArchivedSpaces"] = include_archived_spaces
response = self.confluence.get(url, params=params)
return response.get("results", [])
[docs] def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List:
"""Paginate the various methods to retrieve groups of pages.
Unfortunately, due to page size, sometimes the Confluence API
doesn't match the limit value. If `limit` is >100 confluence
seems to cap the response to 100. Also, due to the Atlassian Python
package, we don't get the "next" values from the "_links" key because
they only return the value from the results key. So here, the pagination
starts from 0 and goes until the max_pages, getting the `limit` number
of pages with each request. We have to manually check if there
are more docs based on the length of the returned list of pages, rather than
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-8
|
are more docs based on the length of the returned list of pages, rather than
just checking for the presence of a `next` key in the response like this page
would have you do:
https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/
:param retrieval_method: Function used to retrieve docs
:type retrieval_method: callable
:return: List of documents
:rtype: List
"""
max_pages = kwargs.pop("max_pages")
docs: List[dict] = []
while len(docs) < max_pages:
get_pages = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1,
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(retrieval_method)
batch = get_pages(**kwargs, start=len(docs))
if not batch:
break
docs.extend(batch)
return docs[:max_pages]
[docs] def is_public_page(self, page: dict) -> bool:
"""Check if a page is publicly accessible."""
restrictions = self.confluence.get_all_restrictions_for_content(page["id"])
return (
page["status"] == "current"
and not restrictions["read"]["restrictions"]["user"]["results"]
and not restrictions["read"]["restrictions"]["group"]["results"]
)
[docs] def process_pages(
self,
pages: List[dict],
include_restricted_content: bool,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-9
|
pages: List[dict],
include_restricted_content: bool,
include_attachments: bool,
include_comments: bool,
content_format: ContentFormat,
ocr_languages: Optional[str] = None,
) -> List[Document]:
"""Process a list of pages into a list of documents."""
docs = []
for page in pages:
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(
page,
include_attachments,
include_comments,
content_format,
ocr_languages,
)
docs.append(doc)
return docs
[docs] def process_page(
self,
page: dict,
include_attachments: bool,
include_comments: bool,
content_format: ContentFormat,
ocr_languages: Optional[str] = None,
) -> Document:
try:
from bs4 import BeautifulSoup # type: ignore
except ImportError:
raise ImportError(
"`beautifulsoup4` package not found, please run "
"`pip install beautifulsoup4`"
)
if include_attachments:
attachment_texts = self.process_attachment(page["id"], ocr_languages)
else:
attachment_texts = []
content = content_format.get_content(page)
text = BeautifulSoup(content, "lxml").get_text(" ", strip=True) + "".join(
attachment_texts
)
if include_comments:
comments = self.confluence.get_page_comments(
page["id"], expand="body.view.value", depth="all"
)["results"]
comment_texts = [
BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-10
|
BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text(
" ", strip=True
)
for comment in comments
]
text = text + "".join(comment_texts)
return Document(
page_content=text,
metadata={
"title": page["title"],
"id": page["id"],
"source": self.base_url.strip("/") + page["_links"]["webui"],
},
)
[docs] def process_attachment(
self,
page_id: str,
ocr_languages: Optional[str] = None,
) -> List[str]:
try:
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`Pillow` package not found, " "please run `pip install Pillow`"
)
# depending on setup you may also need to set the correct path for
# poppler and tesseract
attachments = self.confluence.get_attachments_from_content(page_id)["results"]
texts = []
for attachment in attachments:
media_type = attachment["metadata"]["mediaType"]
absolute_url = self.base_url + attachment["_links"]["download"]
title = attachment["title"]
if media_type == "application/pdf":
text = title + self.process_pdf(absolute_url, ocr_languages)
elif (
media_type == "image/png"
or media_type == "image/jpg"
or media_type == "image/jpeg"
):
text = title + self.process_image(absolute_url, ocr_languages)
elif (
media_type == "application/vnd.openxmlformats-officedocument"
".wordprocessingml.document"
):
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-11
|
".wordprocessingml.document"
):
text = title + self.process_doc(absolute_url)
elif media_type == "application/vnd.ms-excel":
text = title + self.process_xls(absolute_url)
elif media_type == "image/svg+xml":
text = title + self.process_svg(absolute_url, ocr_languages)
else:
continue
texts.append(text)
return texts
[docs] def process_pdf(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
from pdf2image import convert_from_bytes # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` package not found, "
"please run `pip install pytesseract pdf2image`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
images = convert_from_bytes(response.content)
except ValueError:
return text
for i, image in enumerate(images):
image_text = pytesseract.image_to_string(image, lang=ocr_languages)
text += f"Page {i + 1}:\n{image_text}\n\n"
return text
[docs] def process_image(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-12
|
try:
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `Pillow` package not found, "
"please run `pip install pytesseract Pillow`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
image = Image.open(BytesIO(response.content))
except OSError:
return text
return pytesseract.image_to_string(image, lang=ocr_languages)
[docs] def process_doc(self, link: str) -> str:
try:
import docx2txt # noqa: F401
except ImportError:
raise ImportError(
"`docx2txt` package not found, please run `pip install docx2txt`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
return docx2txt.process(file_data)
[docs] def process_xls(self, link: str) -> str:
try:
import xlrd # noqa: F401
except ImportError:
raise ImportError("`xlrd` package not found, please run `pip install xlrd`")
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-13
|
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
workbook = xlrd.open_workbook(file_contents=response.content)
for sheet in workbook.sheets():
text += f"{sheet.name}:\n"
for row in range(sheet.nrows):
for col in range(sheet.ncols):
text += f"{sheet.cell_value(row, col)}\t"
text += "\n"
text += "\n"
return text
[docs] def process_svg(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
from reportlab.graphics import renderPM # noqa: F401
from svglib.svglib import svg2rlg # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, "
"please run `pip install pytesseract Pillow reportlab svglib`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt="PNG")
img_data.seek(0)
image = Image.open(img_data)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2316c0ba8cfc-14
|
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image, lang=ocr_languages)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
|
2740bf7870f7-0
|
Source code for langchain.document_loaders.onedrive_file
from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, List
from pydantic import BaseModel, Field
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
if TYPE_CHECKING:
from O365.drive import File
CHUNK_SIZE = 1024 * 1024 * 5
[docs]class OneDriveFileLoader(BaseLoader, BaseModel):
file: File = Field(...)
class Config:
arbitrary_types_allowed = True
[docs] def load(self) -> List[Document]:
"""Load Documents"""
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.file.name}"
self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE)
loader = UnstructuredFileLoader(file_path)
return loader.load()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive_file.html
|
14fbe3bddd9a-0
|
Source code for langchain.document_loaders.conllu
"""Load CoNLL-U files."""
import csv
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class CoNLLULoader(BaseLoader):
"""Load CoNLL-U files."""
def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
[docs] def load(self) -> List[Document]:
"""Load from file path."""
with open(self.file_path, encoding="utf8") as f:
tsv = list(csv.reader(f, delimiter="\t"))
# If len(line) > 1, the line is not a comment
lines = [line for line in tsv if len(line) > 1]
text = ""
for i, line in enumerate(lines):
# Do not add a space after a punctuation mark or at the end of the sentence
if line[9] == "SpaceAfter=No" or i == len(lines) - 1:
text += line[1]
else:
text += line[1] + " "
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/conllu.html
|
bf34bfd2a8eb-0
|
Source code for langchain.document_loaders.psychic
"""Loader that loads documents from Psychic.dev."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class PsychicLoader(BaseLoader):
"""Loader that loads documents from Psychic.dev."""
def __init__(self, api_key: str, connector_id: str, connection_id: str):
"""Initialize with API key, connector id, and connection id."""
try:
from psychicapi import ConnectorId, Psychic # noqa: F401
except ImportError:
raise ImportError(
"`psychicapi` package not found, please run `pip install psychicapi`"
)
self.psychic = Psychic(secret_key=api_key)
self.connector_id = ConnectorId(connector_id)
self.connection_id = connection_id
[docs] def load(self) -> List[Document]:
"""Load documents."""
psychic_docs = self.psychic.get_documents(self.connector_id, self.connection_id)
return [
Document(
page_content=doc["content"],
metadata={"title": doc["title"], "source": doc["uri"]},
)
for doc in psychic_docs
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/psychic.html
|
e6cb6f3f1e12-0
|
Source code for langchain.document_loaders.notebook
"""Loader that loads .ipynb notebook files."""
import json
from pathlib import Path
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def concatenate_cells(
cell: dict, include_outputs: bool, max_output_length: int, traceback: bool
) -> str:
"""Combine cells information in a readable format ready to be used."""
cell_type = cell["cell_type"]
source = cell["source"]
output = cell["outputs"]
if include_outputs and cell_type == "code" and output:
if "ename" in output[0].keys():
error_name = output[0]["ename"]
error_value = output[0]["evalue"]
if traceback:
traceback = output[0]["traceback"]
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f" with description '{error_value}'\n"
f"and traceback '{traceback}'\n\n"
)
else:
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f"with description '{error_value}'\n\n"
)
elif output[0]["output_type"] == "stream":
output = output[0]["text"]
min_output = min(max_output_length, len(output))
return (
f"'{cell_type}' cell: '{source}'\n with "
f"output: '{output[:min_output]}'\n\n"
)
else:
return f"'{cell_type}' cell: '{source}'\n\n"
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notebook.html
|
e6cb6f3f1e12-1
|
return f"'{cell_type}' cell: '{source}'\n\n"
return ""
def remove_newlines(x: Any) -> Any:
"""Remove recursively newlines, no matter the data structure they are stored in."""
import pandas as pd
if isinstance(x, str):
return x.replace("\n", "")
elif isinstance(x, list):
return [remove_newlines(elem) for elem in x]
elif isinstance(x, pd.DataFrame):
return x.applymap(remove_newlines)
else:
return x
[docs]class NotebookLoader(BaseLoader):
"""Loader that loads .ipynb notebook files."""
def __init__(
self,
path: str,
include_outputs: bool = False,
max_output_length: int = 10,
remove_newline: bool = False,
traceback: bool = False,
):
"""Initialize with path."""
self.file_path = path
self.include_outputs = include_outputs
self.max_output_length = max_output_length
self.remove_newline = remove_newline
self.traceback = traceback
[docs] def load(
self,
) -> List[Document]:
"""Load documents."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is needed for Notebook Loader, "
"please install with `pip install pandas`"
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
data = pd.json_normalize(d["cells"])
filtered_data = data[["cell_type", "source", "outputs"]]
if self.remove_newline:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notebook.html
|
e6cb6f3f1e12-2
|
if self.remove_newline:
filtered_data = filtered_data.applymap(remove_newlines)
text = filtered_data.apply(
lambda x: concatenate_cells(
x, self.include_outputs, self.max_output_length, self.traceback
),
axis=1,
).str.cat(sep=" ")
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notebook.html
|
dc8e59935034-0
|
Source code for langchain.document_loaders.reddit
"""Reddit document loader."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import praw
def _dependable_praw_import() -> praw:
try:
import praw
except ImportError:
raise ValueError(
"praw package not found, please install it with `pip install praw`"
)
return praw
[docs]class RedditPostsLoader(BaseLoader):
"""Reddit posts loader.
Read posts on a subreddit.
First you need to go to
https://www.reddit.com/prefs/apps/
and create your application
"""
def __init__(
self,
client_id: str,
client_secret: str,
user_agent: str,
search_queries: Sequence[str],
mode: str,
categories: Sequence[str] = ["new"],
number_posts: Optional[int] = 10,
):
self.client_id = client_id
self.client_secret = client_secret
self.user_agent = user_agent
self.search_queries = search_queries
self.mode = mode
self.categories = categories
self.number_posts = number_posts
[docs] def load(self) -> List[Document]:
"""Load reddits."""
praw = _dependable_praw_import()
reddit = praw.Reddit(
client_id=self.client_id,
client_secret=self.client_secret,
user_agent=self.user_agent,
)
results: List[Document] = []
if self.mode == "subreddit":
for search_query in self.search_queries:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/reddit.html
|
dc8e59935034-1
|
if self.mode == "subreddit":
for search_query in self.search_queries:
for category in self.categories:
docs = self._subreddit_posts_loader(
search_query=search_query, category=category, reddit=reddit
)
results.extend(docs)
elif self.mode == "username":
for search_query in self.search_queries:
for category in self.categories:
docs = self._user_posts_loader(
search_query=search_query, category=category, reddit=reddit
)
results.extend(docs)
else:
raise ValueError(
"mode not correct, please enter 'username' or 'subreddit' as mode"
)
return results
def _subreddit_posts_loader(
self, search_query: str, category: str, reddit: praw.reddit.Reddit
) -> Iterable[Document]:
subreddit = reddit.subreddit(search_query)
method = getattr(subreddit, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {
"post_subreddit": post.subreddit_name_prefixed,
"post_category": category,
"post_title": post.title,
"post_score": post.score,
"post_id": post.id,
"post_url": post.url,
"post_author": post.author,
}
yield Document(
page_content=post.selftext,
metadata=metadata,
)
def _user_posts_loader(
self, search_query: str, category: str, reddit: praw.reddit.Reddit
) -> Iterable[Document]:
user = reddit.redditor(search_query)
method = getattr(user.submissions, category)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/reddit.html
|
dc8e59935034-2
|
method = getattr(user.submissions, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {
"post_subreddit": post.subreddit_name_prefixed,
"post_category": category,
"post_title": post.title,
"post_score": post.score,
"post_id": post.id,
"post_url": post.url,
"post_author": post.author,
}
yield Document(
page_content=post.selftext,
metadata=metadata,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/reddit.html
|
84d066abc11b-0
|
Source code for langchain.document_loaders.iugu
"""Loader that fetches data from IUGU"""
import json
import urllib.request
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env, stringify_dict
IUGU_ENDPOINTS = {
"invoices": "https://api.iugu.com/v1/invoices",
"customers": "https://api.iugu.com/v1/customers",
"charges": "https://api.iugu.com/v1/charges",
"subscriptions": "https://api.iugu.com/v1/subscriptions",
"plans": "https://api.iugu.com/v1/plans",
}
[docs]class IuguLoader(BaseLoader):
def __init__(self, resource: str, api_token: Optional[str] = None) -> None:
self.resource = resource
api_token = api_token or get_from_env("api_token", "IUGU_API_TOKEN")
self.headers = {"Authorization": f"Bearer {api_token}"}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = IUGU_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
[docs] def load(self) -> List[Document]:
return self._get_resource()
By Harrison Chase
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/iugu.html
|
84d066abc11b-1
|
return self._get_resource()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/iugu.html
|
bbf2a9d116e5-0
|
Source code for langchain.document_loaders.powerpoint
"""Loader that loads powerpoint files."""
import os
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredPowerPointLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load powerpoint files."""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_ppt = detect_filetype(self.file_path) == FileType.PPT
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_ppt = extension == ".ppt"
if is_ppt and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .ppt files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_ppt:
from unstructured.partition.ppt import partition_ppt
return partition_ppt(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.pptx import partition_pptx
return partition_pptx(filename=self.file_path, **self.unstructured_kwargs)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/powerpoint.html
|
bbf2a9d116e5-1
|
return partition_pptx(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/powerpoint.html
|
57a30178976f-0
|
Source code for langchain.document_loaders.mastodon
"""Mastodon document loader."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import mastodon
def _dependable_mastodon_import() -> mastodon:
try:
import mastodon
except ImportError:
raise ValueError(
"Mastodon.py package not found, "
"please install it with `pip install Mastodon.py`"
)
return mastodon
[docs]class MastodonTootsLoader(BaseLoader):
"""Mastodon toots loader."""
def __init__(
self,
mastodon_accounts: Sequence[str],
number_toots: Optional[int] = 100,
exclude_replies: bool = False,
access_token: Optional[str] = None,
api_base_url: str = "https://mastodon.social",
):
"""Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account.
exclude_replies: Whether to exclude reply toots from the load.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
"""
mastodon = _dependable_mastodon_import()
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/mastodon.html
|
57a30178976f-1
|
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
self.api = mastodon.Mastodon(
access_token=access_token, api_base_url=api_base_url
)
self.mastodon_accounts = mastodon_accounts
self.number_toots = number_toots
self.exclude_replies = exclude_replies
[docs] def load(self) -> List[Document]:
"""Load toots into documents."""
results: List[Document] = []
for account in self.mastodon_accounts:
user = self.api.account_lookup(account)
toots = self.api.account_statuses(
user.id,
only_media=False,
pinned=False,
exclude_replies=self.exclude_replies,
exclude_reblogs=True,
limit=self.number_toots,
)
docs = self._format_toots(toots, user)
results.extend(docs)
return results
def _format_toots(
self, toots: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format toots into documents.
Adding user info, and selected toot fields into the metadata.
"""
for toot in toots:
metadata = {
"created_at": toot["created_at"],
"user_info": user_info,
"is_reply": toot["in_reply_to_id"] is not None,
}
yield Document(
page_content=toot["content"],
metadata=metadata,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/mastodon.html
|
b317c9a823e8-0
|
Source code for langchain.document_loaders.facebook_chat
"""Loader that loads Facebook chat json dump."""
import datetime
import json
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used."""
sender = row["sender_name"]
text = row["content"]
date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime(
"%Y-%m-%d %H:%M:%S"
)
return f"{sender} on {date}: {text}\n\n"
[docs]class FacebookChatLoader(BaseLoader):
"""Loader that loads Facebook messages json directory dump."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message.get("content") and isinstance(message["content"], str)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/facebook_chat.html
|
3d4bd0b0a7e4-0
|
Source code for langchain.document_loaders.tomarkdown
"""Loader that loads HTML to markdown using 2markdown."""
from __future__ import annotations
from typing import Iterator, List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ToMarkdownLoader(BaseLoader):
"""Loader that loads HTML to markdown using 2markdown."""
def __init__(self, url: str, api_key: str):
"""Initialize with url and api key."""
self.url = url
self.api_key = api_key
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load the file."""
response = requests.post(
"https://2markdown.com/api/2md",
headers={"X-Api-Key": self.api_key},
json={"url": self.url},
)
text = response.json()["article"]
metadata = {"source": self.url}
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load file."""
return list(self.lazy_load())
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/tomarkdown.html
|
7d21f0e5ecf7-0
|
Source code for langchain.document_loaders.markdown
"""Loader that loads Markdown files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredMarkdownLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load markdown files."""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.partition.md import partition_md
# NOTE(MthwRobinson) - enables the loader to work when you're using pre-release
# versions of unstructured like 0.4.17-dev1
_unstructured_version = __unstructured_version__.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
if unstructured_version < (0, 4, 16):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning markdown files is only supported in unstructured>=0.4.16."
)
return partition_md(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/markdown.html
|
32b409ee7d1b-0
|
Source code for langchain.document_loaders.image_captions
"""
Loader that loads image captions
By default, the loader utilizes the pre-trained BLIP image captioning model.
https://huggingface.co/Salesforce/blip-image-captioning-base
"""
from typing import Any, List, Tuple, Union
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ImageCaptionLoader(BaseLoader):
"""Loader that loads the captions of an image"""
def __init__(
self,
path_images: Union[str, List[str]],
blip_processor: str = "Salesforce/blip-image-captioning-base",
blip_model: str = "Salesforce/blip-image-captioning-base",
):
"""
Initialize with a list of image paths
"""
if isinstance(path_images, str):
self.image_paths = [path_images]
else:
self.image_paths = path_images
self.blip_processor = blip_processor
self.blip_model = blip_model
[docs] def load(self) -> List[Document]:
"""
Load from a list of image files
"""
try:
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"`transformers` package not found, please install with "
"`pip install transformers`."
)
processor = BlipProcessor.from_pretrained(self.blip_processor)
model = BlipForConditionalGeneration.from_pretrained(self.blip_model)
results = []
for path_image in self.image_paths:
caption, metadata = self._get_captions_and_metadata(
model=model, processor=processor, path_image=path_image
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/image_captions.html
|
32b409ee7d1b-1
|
model=model, processor=processor, path_image=path_image
)
doc = Document(page_content=caption, metadata=metadata)
results.append(doc)
return results
def _get_captions_and_metadata(
self, model: Any, processor: Any, path_image: str
) -> Tuple[str, dict]:
"""
Helper function for getting the captions and metadata of an image
"""
try:
from PIL import Image
except ImportError:
raise ImportError(
"`PIL` package not found, please install with `pip install pillow`"
)
try:
if path_image.startswith("http://") or path_image.startswith("https://"):
image = Image.open(requests.get(path_image, stream=True).raw).convert(
"RGB"
)
else:
image = Image.open(path_image).convert("RGB")
except Exception:
raise ValueError(f"Could not get image data for {path_image}")
inputs = processor(image, "an image of", return_tensors="pt")
output = model.generate(**inputs)
caption: str = processor.decode(output[0])
metadata: dict = {"image_path": path_image}
return caption, metadata
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/image_captions.html
|
a1b4b409ddaa-0
|
Source code for langchain.document_loaders.html
"""Loader that uses unstructured to load HTML files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredHTMLLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load HTML files."""
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html
return partition_html(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/html.html
|
db55a19f9388-0
|
Source code for langchain.document_loaders.github
from abc import ABC
from datetime import datetime
from typing import Dict, Iterator, List, Literal, Optional, Union
import requests
from pydantic import BaseModel, root_validator, validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_dict_or_env
class BaseGitHubLoader(BaseLoader, BaseModel, ABC):
"""Load issues of a GitHub repository."""
repo: str
"""Name of repository"""
access_token: str
"""Personal access token - see https://github.com/settings/tokens?type=beta"""
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that access token exists in environment."""
values["access_token"] = get_from_dict_or_env(
values, "access_token", "GITHUB_PERSONAL_ACCESS_TOKEN"
)
return values
@property
def headers(self) -> Dict[str, str]:
return {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {self.access_token}",
}
[docs]class GitHubIssuesLoader(BaseGitHubLoader):
include_prs: bool = True
"""If True include Pull Requests in results, otherwise ignore them."""
milestone: Union[int, Literal["*", "none"], None] = None
"""If integer is passed, it should be a milestone's number field.
If the string '*' is passed, issues with any milestone are accepted.
If the string 'none' is passed, issues without milestones are returned.
"""
state: Optional[Literal["open", "closed", "all"]] = None
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
|
db55a19f9388-1
|
state: Optional[Literal["open", "closed", "all"]] = None
"""Filter on issue state. Can be one of: 'open', 'closed', 'all'."""
assignee: Optional[str] = None
"""Filter on assigned user. Pass 'none' for no user and '*' for any user."""
creator: Optional[str] = None
"""Filter on the user that created the issue."""
mentioned: Optional[str] = None
"""Filter on a user that's mentioned in the issue."""
labels: Optional[List[str]] = None
"""Label names to filter one. Example: bug,ui,@high."""
sort: Optional[Literal["created", "updated", "comments"]] = None
"""What to sort results by. Can be one of: 'created', 'updated', 'comments'.
Default is 'created'."""
direction: Optional[Literal["asc", "desc"]] = None
"""The direction to sort the results by. Can be one of: 'asc', 'desc'."""
since: Optional[str] = None
"""Only show notifications updated after the given time.
This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ."""
@validator("since")
def validate_since(cls, v: Optional[str]) -> Optional[str]:
if v:
try:
datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
raise ValueError(
"Invalid value for 'since'. Expected a date string in "
f"YYYY-MM-DDTHH:MM:SSZ format. Received: {v}"
)
return v
[docs] def lazy_load(self) -> Iterator[Document]:
"""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
|
db55a19f9388-2
|
[docs] def lazy_load(self) -> Iterator[Document]:
"""
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
"""
url: Optional[str] = self.url
while url:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
issues = response.json()
for issue in issues:
doc = self.parse_issue(issue)
if not self.include_prs and doc.metadata["is_pull_request"]:
continue
yield doc
if response.links and response.links.get("next"):
url = response.links["next"]["url"]
else:
url = None
[docs] def load(self) -> List[Document]:
"""
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
"""
return list(self.lazy_load())
[docs] def parse_issue(self, issue: dict) -> Document:
"""Create Document objects from a list of GitHub issues."""
metadata = {
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
|
db55a19f9388-3
|
"""Create Document objects from a list of GitHub issues."""
metadata = {
"url": issue["html_url"],
"title": issue["title"],
"creator": issue["user"]["login"],
"created_at": issue["created_at"],
"comments": issue["comments"],
"state": issue["state"],
"labels": [label["name"] for label in issue["labels"]],
"assignee": issue["assignee"]["login"] if issue["assignee"] else None,
"milestone": issue["milestone"]["title"] if issue["milestone"] else None,
"locked": issue["locked"],
"number": issue["number"],
"is_pull_request": "pull_request" in issue,
}
content = issue["body"] if issue["body"] is not None else ""
return Document(page_content=content, metadata=metadata)
@property
def query_params(self) -> str:
labels = ",".join(self.labels) if self.labels else self.labels
query_params_dict = {
"milestone": self.milestone,
"state": self.state,
"assignee": self.assignee,
"creator": self.creator,
"mentioned": self.mentioned,
"labels": labels,
"sort": self.sort,
"direction": self.direction,
"since": self.since,
}
query_params_list = [
f"{k}={v}" for k, v in query_params_dict.items() if v is not None
]
query_params = "&".join(query_params_list)
return query_params
@property
def url(self) -> str:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
|
db55a19f9388-4
|
return query_params
@property
def url(self) -> str:
return f"https://api.github.com/repos/{self.repo}/issues?{self.query_params}"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
|
5f2b9a179cc1-0
|
Source code for langchain.document_loaders.embaas
import base64
import warnings
from typing import Any, Dict, Iterator, List, Optional
import requests
from pydantic import BaseModel, root_validator, validator
from typing_extensions import NotRequired, TypedDict
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser, BaseLoader
from langchain.document_loaders.blob_loaders import Blob
from langchain.text_splitter import TextSplitter
from langchain.utils import get_from_dict_or_env
EMBAAS_DOC_API_URL = "https://api.embaas.io/v1/document/extract-text/bytes/"
class EmbaasDocumentExtractionParameters(TypedDict):
"""Parameters for the embaas document extraction API."""
mime_type: NotRequired[str]
"""The mime type of the document."""
file_extension: NotRequired[str]
"""The file extension of the document."""
file_name: NotRequired[str]
"""The file name of the document."""
should_chunk: NotRequired[bool]
"""Whether to chunk the document into pages."""
chunk_size: NotRequired[int]
"""The maximum size of the text chunks."""
chunk_overlap: NotRequired[int]
"""The maximum overlap allowed between chunks."""
chunk_splitter: NotRequired[str]
"""The text splitter class name for creating chunks."""
separators: NotRequired[List[str]]
"""The separators for chunks."""
should_embed: NotRequired[bool]
"""Whether to create embeddings for the document in the response."""
model: NotRequired[str]
"""The model to pass to the Embaas document extraction API."""
instruction: NotRequired[str]
"""The instruction to pass to the Embaas document extraction API."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/embaas.html
|
5f2b9a179cc1-1
|
"""The instruction to pass to the Embaas document extraction API."""
class EmbaasDocumentExtractionPayload(EmbaasDocumentExtractionParameters):
bytes: str
"""The base64 encoded bytes of the document to extract text from."""
class BaseEmbaasLoader(BaseModel):
embaas_api_key: Optional[str] = None
api_url: str = EMBAAS_DOC_API_URL
"""The URL of the embaas document extraction API."""
params: EmbaasDocumentExtractionParameters = EmbaasDocumentExtractionParameters()
"""Additional parameters to pass to the embaas document extraction API."""
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
embaas_api_key = get_from_dict_or_env(
values, "embaas_api_key", "EMBAAS_API_KEY"
)
values["embaas_api_key"] = embaas_api_key
return values
[docs]class EmbaasBlobLoader(BaseEmbaasLoader, BaseBlobParser):
"""Wrapper around embaas's document byte loader service.
To use, you should have the
environment variable ``EMBAAS_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
# Default parsing
from langchain.document_loaders.embaas import EmbaasBlobLoader
loader = EmbaasBlobLoader()
blob = Blob.from_path(path="example.mp3")
documents = loader.parse(blob=blob)
# Custom api parameters (create embeddings automatically)
from langchain.document_loaders.embaas import EmbaasBlobLoader
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/embaas.html
|
5f2b9a179cc1-2
|
from langchain.document_loaders.embaas import EmbaasBlobLoader
loader = EmbaasBlobLoader(
params={
"should_embed": True,
"model": "e5-large-v2",
"chunk_size": 256,
"chunk_splitter": "CharacterTextSplitter"
}
)
blob = Blob.from_path(path="example.pdf")
documents = loader.parse(blob=blob)
"""
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
yield from self._get_documents(blob=blob)
@staticmethod
def _api_response_to_documents(chunks: List[Dict[str, Any]]) -> List[Document]:
"""Convert the API response to a list of documents."""
docs = []
for chunk in chunks:
metadata = chunk["metadata"]
if chunk.get("embedding", None) is not None:
metadata["embedding"] = chunk["embedding"]
doc = Document(page_content=chunk["text"], metadata=metadata)
docs.append(doc)
return docs
def _generate_payload(self, blob: Blob) -> EmbaasDocumentExtractionPayload:
"""Generates payload for the API request."""
base64_byte_str = base64.b64encode(blob.as_bytes()).decode()
payload: EmbaasDocumentExtractionPayload = EmbaasDocumentExtractionPayload(
bytes=base64_byte_str,
# Workaround for mypy issue: https://github.com/python/mypy/issues/9408
# type: ignore
**self.params,
)
if blob.mimetype is not None and payload.get("mime_type", None) is None:
payload["mime_type"] = blob.mimetype
return payload
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/document_loaders/embaas.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.