id
stringlengths 14
16
| text
stringlengths 31
2.41k
| source
stringlengths 53
121
|
---|---|---|
cee2f501b181-0 | Source code for langchain.utilities.bash
"""Wrapper around subprocess to run commands."""
from __future__ import annotations
import platform
import re
import subprocess
from typing import TYPE_CHECKING, List, Union
from uuid import uuid4
if TYPE_CHECKING:
import pexpect
def _lazy_import_pexpect() -> pexpect:
"""Import pexpect only when needed."""
if platform.system() == "Windows":
raise ValueError("Persistent bash processes are not yet supported on Windows.")
try:
import pexpect
except ImportError:
raise ImportError(
"pexpect required for persistent bash processes."
" To install, run `pip install pexpect`."
)
return pexpect
[docs]class BashProcess:
"""Executes bash commands and returns the output."""
def __init__(
self,
strip_newlines: bool = False,
return_err_output: bool = False,
persistent: bool = False,
):
"""Initialize with stripping newlines."""
self.strip_newlines = strip_newlines
self.return_err_output = return_err_output
self.prompt = ""
self.process = None
if persistent:
self.prompt = str(uuid4())
self.process = self._initialize_persistent_process(self.prompt)
@staticmethod
def _initialize_persistent_process(prompt: str) -> pexpect.spawn:
# Start bash in a clean environment
# Doesn't work on windows
pexpect = _lazy_import_pexpect()
process = pexpect.spawn(
"env", ["-i", "bash", "--norc", "--noprofile"], encoding="utf-8"
)
# Set the custom prompt
process.sendline("PS1=" + prompt) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/bash.html |
cee2f501b181-1 | # Set the custom prompt
process.sendline("PS1=" + prompt)
process.expect_exact(prompt, timeout=10)
return process
[docs] def run(self, commands: Union[str, List[str]]) -> str:
"""Run commands and return final output."""
if isinstance(commands, str):
commands = [commands]
commands = ";".join(commands)
if self.process is not None:
return self._run_persistent(
commands,
)
else:
return self._run(commands)
def _run(self, command: str) -> str:
"""Run commands and return final output."""
try:
output = subprocess.run(
command,
shell=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).stdout.decode()
except subprocess.CalledProcessError as error:
if self.return_err_output:
return error.stdout.decode()
return str(error)
if self.strip_newlines:
output = output.strip()
return output
[docs] def process_output(self, output: str, command: str) -> str:
# Remove the command from the output using a regular expression
pattern = re.escape(command) + r"\s*\n"
output = re.sub(pattern, "", output, count=1)
return output.strip()
def _run_persistent(self, command: str) -> str:
"""Run commands and return final output."""
pexpect = _lazy_import_pexpect()
if self.process is None:
raise ValueError("Process not initialized")
self.process.sendline(command)
# Clear the output with an empty string
self.process.expect(self.prompt, timeout=10) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/bash.html |
cee2f501b181-2 | self.process.expect(self.prompt, timeout=10)
self.process.sendline("")
try:
self.process.expect([self.prompt, pexpect.EOF], timeout=10)
except pexpect.TIMEOUT:
return f"Timeout error while executing command {command}"
if self.process.after == pexpect.EOF:
return f"Exited with error status: {self.process.exitstatus}"
output = self.process.before
output = self.process_output(output, command)
if self.strip_newlines:
return output.strip()
return output | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/bash.html |
fc7d36900803-0 | Source code for langchain.utilities.brave_search
import json
import requests
from pydantic import BaseModel, Field
[docs]class BraveSearchWrapper(BaseModel):
api_key: str
search_kwargs: dict = Field(default_factory=dict)
[docs] def run(self, query: str) -> str:
headers = {
"X-Subscription-Token": self.api_key,
"Accept": "application/json",
}
base_url = "https://api.search.brave.com/res/v1/web/search"
req = requests.PreparedRequest()
params = {**self.search_kwargs, **{"q": query}}
req.prepare_url(base_url, params)
if req.url is None:
raise ValueError("prepared url is None, this should not happen")
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f"HTTP error {response.status_code}")
parsed_response = response.json()
web_search_results = parsed_response.get("web", {}).get("results", [])
final_results = []
if isinstance(web_search_results, list):
for item in web_search_results:
final_results.append(
{
"title": item.get("title"),
"link": item.get("url"),
"snippet": item.get("description"),
}
)
return json.dumps(final_results) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/brave_search.html |
a46e4d1fecb1-0 | Source code for langchain.utilities.wikipedia
"""Util that calls Wikipedia."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
WIKIPEDIA_MAX_QUERY_LENGTH = 300
[docs]class WikipediaAPIWrapper(BaseModel):
"""Wrapper around WikipediaAPI.
To use, you should have the ``wikipedia`` python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
"""
wiki_client: Any #: :meta private:
top_k_results: int = 3
lang: str = "en"
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import wikipedia
wikipedia.set_lang(values["lang"])
values["wiki_client"] = wikipedia
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
return values
[docs] def run(self, query: str) -> str:
"""Run Wikipedia search and get page summaries."""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
summaries = []
for page_title in page_titles[: self.top_k_results]: | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/wikipedia.html |
a46e4d1fecb1-1 | summaries = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if summary := self._formatted_page_summary(page_title, wiki_page):
summaries.append(summary)
if not summaries:
return "No good Wikipedia Search Result was found"
return "\n\n".join(summaries)[: self.doc_content_chars_max]
@staticmethod
def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]:
return f"Page: {page_title}\nSummary: {wiki_page.summary}"
def _page_to_document(self, page_title: str, wiki_page: Any) -> Document:
main_meta = {
"title": page_title,
"summary": wiki_page.summary,
"source": wiki_page.url,
}
add_meta = (
{
"categories": wiki_page.categories,
"page_url": wiki_page.url,
"image_urls": wiki_page.images,
"related_titles": wiki_page.links,
"parent_id": wiki_page.parent_id,
"references": wiki_page.references,
"revision_id": wiki_page.revision_id,
"sections": wiki_page.sections,
}
if self.load_all_available_meta
else {}
)
doc = Document(
page_content=wiki_page.content[: self.doc_content_chars_max],
metadata={
**main_meta,
**add_meta,
},
)
return doc
def _fetch_page(self, page: str) -> Optional[str]:
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (
self.wiki_client.exceptions.PageError, | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/wikipedia.html |
a46e4d1fecb1-2 | except (
self.wiki_client.exceptions.PageError,
self.wiki_client.exceptions.DisambiguationError,
):
return None
[docs] def load(self, query: str) -> List[Document]:
"""
Run Wikipedia search and get the article text plus the meta information.
See
Returns: a list of documents.
"""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
docs = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if doc := self._page_to_document(page_title, wiki_page):
docs.append(doc)
return docs | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/wikipedia.html |
fa84a49ec24e-0 | Source code for langchain.utilities.zapier
"""Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/start/
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to nla@zapier.com for
developer support.
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from pydantic import BaseModel, Extra, root_validator
from requests import Request, Session
from langchain.utils import get_from_dict_or_env
[docs]class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/start/
This wrapper supports both API Key and OAuth Credential auth methods. API Key
is the fastest way to get started using this wrapper.
Call this wrapper with either `zapier_nla_api_key` or
`zapier_nla_oauth_access_token` arguments, or set the `ZAPIER_NLA_API_KEY`
environment variable. If both arguments are set, the Access Token will take
precedence.
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use OAuth. Review the full docs above to learn how to create
your own provider and generate credentials.
""" | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/zapier.html |
fa84a49ec24e-1 | your own provider and generate credentials.
"""
zapier_nla_api_key: str
zapier_nla_oauth_access_token: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _format_headers(self) -> Dict[str, str]:
"""Format headers for requests."""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
}
if self.zapier_nla_oauth_access_token:
headers.update(
{"Authorization": f"Bearer {self.zapier_nla_oauth_access_token}"}
)
else:
headers.update({"X-API-Key": self.zapier_nla_api_key})
return headers
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(self._format_headers())
return session
async def _arequest(self, method: str, url: str, **kwargs: Any) -> Dict[str, Any]:
"""Make an async request."""
async with aiohttp.ClientSession(headers=self._format_headers()) as session:
async with session.request(method, url, **kwargs) as response:
response.raise_for_status()
return await response.json()
def _create_action_payload( # type: ignore[no-untyped-def]
self, instructions: str, params: Optional[Dict] = None, preview_only=False
) -> Dict:
"""Create a payload for an action."""
data = params if params else {}
data.update(
{
"instructions": instructions,
}
) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/zapier.html |
fa84a49ec24e-2 | {
"instructions": instructions,
}
)
if preview_only:
data.update({"preview_only": True})
return data
def _create_action_url(self, action_id: str) -> str:
"""Create a url for an action."""
return self.zapier_nla_api_base + f"exposed/{action_id}/execute/"
def _create_action_request( # type: ignore[no-untyped-def]
self,
action_id: str,
instructions: str,
params: Optional[Dict] = None,
preview_only=False,
) -> Request:
data = self._create_action_payload(instructions, params, preview_only)
return Request(
"POST",
self._create_action_url(action_id),
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key_default = None
# If there is a oauth_access_key passed in the values
# we don't need a nla_api_key it can be blank
if "zapier_nla_oauth_access_token" in values:
zapier_nla_api_key_default = ""
else:
values["zapier_nla_oauth_access_token"] = ""
# we require at least one API Key
zapier_nla_api_key = get_from_dict_or_env(
values,
"zapier_nla_api_key",
"ZAPIER_NLA_API_KEY",
zapier_nla_api_key_default,
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/zapier.html |
fa84a49ec24e-3 | return values
[docs] async def alist(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/docs)
"""
response = await self._arequest("GET", self.zapier_nla_api_base + "exposed/")
return response["results"]
[docs] def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
""" | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/zapier.html |
fa84a49ec24e-4 | """
session = self._get_session()
try:
response = session.get(self.zapier_nla_api_base + "exposed/")
response.raise_for_status()
except requests.HTTPError as http_err:
if response.status_code == 401:
if self.zapier_nla_oauth_access_token:
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your "
f"access token is correct and doesn't need to be "
f"refreshed. Err: {http_err}"
)
raise requests.HTTPError(
f"An unauthorized response occurred. Check that your api "
f"key is correct. Err: {http_err}"
)
raise http_err
return response.json()["results"]
[docs] def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._create_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
[docs] async def arun(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict: | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/zapier.html |
fa84a49ec24e-5 | ) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params),
)
return response["result"]
[docs] def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({"preview_only": True})
request = self._create_action_request(action_id, instructions, params, True)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["input_params"]
[docs] async def apreview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
response = await self._arequest(
"POST", | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/zapier.html |
fa84a49ec24e-6 | response = await self._arequest(
"POST",
self._create_action_url(action_id),
json=self._create_action_payload(instructions, params, preview_only=True),
)
return response["result"]
[docs] def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
[docs] async def arun_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.arun(*args, **kwargs)
return json.dumps(data)
[docs] def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
[docs] async def apreview_as_str( # type: ignore[no-untyped-def]
self, *args, **kwargs
) -> str:
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = await self.apreview(*args, **kwargs)
return json.dumps(data)
[docs] def list_as_str(self) -> str: # type: ignore[no-untyped-def] | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/zapier.html |
fa84a49ec24e-7 | """Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
[docs] async def alist_as_str(self) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = await self.alist()
return json.dumps(actions) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/zapier.html |
7852bd69f34f-0 | Source code for langchain.utilities.powerbi
"""Wrapper around a Power BI endpoint."""
from __future__ import annotations
import asyncio
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
import aiohttp
import requests
from aiohttp import ServerTimeoutError
from pydantic import BaseModel, Field, root_validator, validator
from requests.exceptions import Timeout
_LOGGER = logging.getLogger(__name__)
BASE_URL = os.getenv("POWERBI_BASE_URL", "https://api.powerbi.com/v1.0/myorg")
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
[docs]class PowerBIDataset(BaseModel):
"""Create PowerBI engine from dataset ID and credential or token.
Use either the credential or a supplied token to authenticate.
If both are supplied the credential is used to generate a token.
The impersonated_user_name is the UPN of a user to be impersonated.
If the model is not RLS enabled, this will be ignored.
"""
dataset_id: str
table_names: List[str]
group_id: Optional[str] = None
credential: Optional[TokenCredential] = None
token: Optional[str] = None
impersonated_user_name: Optional[str] = None
sample_rows_in_table_info: int = Field(default=1, gt=0, le=10)
schemas: Dict[str, str] = Field(default_factory=dict)
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@validator("table_names", allow_reuse=True)
def fix_table_names(cls, table_names: List[str]) -> List[str]:
"""Fix the table names.""" | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
7852bd69f34f-1 | """Fix the table names."""
return [fix_table_name(table) for table in table_names]
@root_validator(pre=True, allow_reuse=True)
def token_or_credential_present(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that at least one of token and credentials is present."""
if "token" in values or "credential" in values:
return values
raise ValueError("Please provide either a credential or a token.")
@property
def request_url(self) -> str:
"""Get the request url."""
if self.group_id:
return f"{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301
return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301
@property
def headers(self) -> Dict[str, str]:
"""Get the token."""
if self.token:
return {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.token,
}
from azure.core.exceptions import (
ClientAuthenticationError, # pylint: disable=import-outside-toplevel
)
if self.credential:
try:
token = self.credential.get_token(
"https://analysis.windows.net/powerbi/api/.default"
).token
return {
"Content-Type": "application/json",
"Authorization": "Bearer " + token,
}
except Exception as exc: # pylint: disable=broad-exception-caught
raise ClientAuthenticationError(
"Could not get a token from the supplied credentials."
) from exc | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
7852bd69f34f-2 | "Could not get a token from the supplied credentials."
) from exc
raise ClientAuthenticationError("No credential or token supplied.")
[docs] def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
return self.table_names
[docs] def get_schemas(self) -> str:
"""Get the available schema's."""
if self.schemas:
return ", ".join([f"{key}: {value}" for key, value in self.schemas.items()])
return "No known schema's yet. Use the schema_powerbi tool first."
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def _get_tables_to_query(
self, table_names: Optional[Union[List[str], str]] = None
) -> Optional[List[str]]:
"""Get the tables names that need to be queried, after checking they exist."""
if table_names is not None:
if (
isinstance(table_names, list)
and len(table_names) > 0
and table_names[0] != ""
):
fixed_tables = [fix_table_name(table) for table in table_names]
non_existing_tables = [
table for table in fixed_tables if table not in self.table_names
]
if non_existing_tables:
_LOGGER.warning(
"Table(s) %s not found in dataset.",
", ".join(non_existing_tables),
)
tables = [
table for table in fixed_tables if table not in non_existing_tables
]
return tables if tables else None
if isinstance(table_names, str) and table_names != "": | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
7852bd69f34f-3 | if isinstance(table_names, str) and table_names != "":
if table_names not in self.table_names:
_LOGGER.warning("Table %s not found in dataset.", table_names)
return None
return [fix_table_name(table_names)]
return self.table_names
def _get_tables_todo(self, tables_todo: List[str]) -> List[str]:
"""Get the tables that still need to be queried."""
return [table for table in tables_todo if table not in self.schemas]
def _get_schema_for_tables(self, table_names: List[str]) -> str:
"""Create a string of the table schemas for the supplied tables."""
schemas = [
schema for table, schema in self.schemas.items() if table in table_names
]
return ", ".join(schemas)
[docs] def get_table_info(
self, table_names: Optional[Union[List[str], str]] = None
) -> str:
"""Get information about specified tables."""
tables_requested = self._get_tables_to_query(table_names)
if tables_requested is None:
return "No (valid) tables requested."
tables_todo = self._get_tables_todo(tables_requested)
for table in tables_todo:
self._get_schema(table)
return self._get_schema_for_tables(tables_requested)
[docs] async def aget_table_info(
self, table_names: Optional[Union[List[str], str]] = None
) -> str:
"""Get information about specified tables."""
tables_requested = self._get_tables_to_query(table_names)
if tables_requested is None:
return "No (valid) tables requested."
tables_todo = self._get_tables_todo(tables_requested) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
7852bd69f34f-4 | tables_todo = self._get_tables_todo(tables_requested)
await asyncio.gather(*[self._aget_schema(table) for table in tables_todo])
return self._get_schema_for_tables(tables_requested)
def _get_schema(self, table: str) -> None:
"""Get the schema for a table."""
try:
result = self.run(
f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})"
)
self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"])
except Timeout:
_LOGGER.warning("Timeout while getting table info for %s", table)
self.schemas[table] = "unknown"
except Exception as exc: # pylint: disable=broad-exception-caught
_LOGGER.warning("Error while getting table info for %s: %s", table, exc)
self.schemas[table] = "unknown"
async def _aget_schema(self, table: str) -> None:
"""Get the schema for a table."""
try:
result = await self.arun(
f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})"
)
self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"])
except ServerTimeoutError:
_LOGGER.warning("Timeout while getting table info for %s", table)
self.schemas[table] = "unknown"
except Exception as exc: # pylint: disable=broad-exception-caught
_LOGGER.warning("Error while getting table info for %s: %s", table, exc)
self.schemas[table] = "unknown" | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
7852bd69f34f-5 | self.schemas[table] = "unknown"
def _create_json_content(self, command: str) -> dict[str, Any]:
"""Create the json content for the request."""
return {
"queries": [{"query": rf"{command}"}],
"impersonatedUserName": self.impersonated_user_name,
"serializerSettings": {"includeNulls": True},
}
[docs] def run(self, command: str) -> Any:
"""Execute a DAX command and return a json representing the results."""
_LOGGER.debug("Running command: %s", command)
result = requests.post(
self.request_url,
json=self._create_json_content(command),
headers=self.headers,
timeout=10,
)
return result.json()
[docs] async def arun(self, command: str) -> Any:
"""Execute a DAX command and return the result asynchronously."""
_LOGGER.debug("Running command: %s", command)
if self.aiosession:
async with self.aiosession.post(
self.request_url,
headers=self.headers,
json=self._create_json_content(command),
timeout=10,
) as response:
response_json = await response.json(content_type=response.content_type)
return response_json
async with aiohttp.ClientSession() as session:
async with session.post(
self.request_url,
headers=self.headers,
json=self._create_json_content(command),
timeout=10,
) as response:
response_json = await response.json(content_type=response.content_type)
return response_json
def json_to_md(
json_contents: List[Dict[str, Union[str, int, float]]], | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
7852bd69f34f-6 | json_contents: List[Dict[str, Union[str, int, float]]],
table_name: Optional[str] = None,
) -> str:
"""Converts a JSON object to a markdown table."""
output_md = ""
headers = json_contents[0].keys()
for header in headers:
header.replace("[", ".").replace("]", "")
if table_name:
header.replace(f"{table_name}.", "")
output_md += f"| {header} "
output_md += "|\n"
for row in json_contents:
for value in row.values():
output_md += f"| {value} "
output_md += "|\n"
return output_md
def fix_table_name(table: str) -> str:
"""Add single quotes around table names that contain spaces."""
if " " in table and not table.startswith("'") and not table.endswith("'"):
return f"'{table}'"
return table | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
1d8265495db5-0 | Source code for langchain.utilities.bibtex
"""Util that calls bibtexparser."""
import logging
from typing import Any, Dict, List, Mapping
from pydantic import BaseModel, Extra, root_validator
logger = logging.getLogger(__name__)
OPTIONAL_FIELDS = [
"annotate",
"booktitle",
"editor",
"howpublished",
"journal",
"keywords",
"note",
"organization",
"publisher",
"school",
"series",
"type",
"doi",
"issn",
"isbn",
]
[docs]class BibtexparserWrapper(BaseModel):
"""Wrapper around bibtexparser.
To use, you should have the ``bibtexparser`` python package installed.
https://bibtexparser.readthedocs.io/en/master/
This wrapper will use bibtexparser to load a collection of references from
a bibtex file and fetch document summaries.
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import bibtexparser # noqa
except ImportError:
raise ImportError(
"Could not import bibtexparser python package. "
"Please install it with `pip install bibtexparser`."
)
return values
[docs] def load_bibtex_entries(self, path: str) -> List[Dict[str, Any]]:
"""Load bibtex entries from the bibtex file at the given path."""
import bibtexparser | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/bibtex.html |
1d8265495db5-1 | import bibtexparser
with open(path) as file:
entries = bibtexparser.load(file).entries
return entries
[docs] def get_metadata(
self, entry: Mapping[str, Any], load_extra: bool = False
) -> Dict[str, Any]:
"""Get metadata for the given entry."""
publication = entry.get("journal") or entry.get("booktitle")
if "url" in entry:
url = entry["url"]
elif "doi" in entry:
url = f'https://doi.org/{entry["doi"]}'
else:
url = None
meta = {
"id": entry.get("ID"),
"published_year": entry.get("year"),
"title": entry.get("title"),
"publication": publication,
"authors": entry.get("author"),
"abstract": entry.get("abstract"),
"url": url,
}
if load_extra:
for field in OPTIONAL_FIELDS:
meta[field] = entry.get(field)
return {k: v for k, v in meta.items() if v is not None} | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/bibtex.html |
a994b8cfffcc-0 | Source code for langchain.utilities.python
import sys
from io import StringIO
from typing import Dict, Optional
from pydantic import BaseModel, Field
[docs]class PythonREPL(BaseModel):
"""Simulates a standalone Python REPL."""
globals: Optional[Dict] = Field(default_factory=dict, alias="_globals")
locals: Optional[Dict] = Field(default_factory=dict, alias="_locals")
[docs] def run(self, command: str) -> str:
"""Run command with own globals/locals and returns anything printed."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, self.globals, self.locals)
sys.stdout = old_stdout
output = mystdout.getvalue()
except Exception as e:
sys.stdout = old_stdout
output = repr(e)
return output | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/python.html |
c0b54b49294e-0 | Source code for langchain.utilities.openweathermap
"""Util that calls OpenWeatherMap using PyOWM."""
from typing import Any, Dict, Optional
from pydantic import Extra, root_validator
from langchain.tools.base import BaseModel
from langchain.utils import get_from_dict_or_env
[docs]class OpenWeatherMapAPIWrapper(BaseModel):
"""Wrapper for OpenWeatherMap API using PyOWM.
Docs for using:
1. Go to OpenWeatherMap and sign up for an API key
2. Save your API KEY into OPENWEATHERMAP_API_KEY env variable
3. pip install pyowm
"""
owm: Any
openweathermap_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
openweathermap_api_key = get_from_dict_or_env(
values, "openweathermap_api_key", "OPENWEATHERMAP_API_KEY"
)
try:
import pyowm
except ImportError:
raise ImportError(
"pyowm is not installed. Please install it with `pip install pyowm`"
)
owm = pyowm.OWM(openweathermap_api_key)
values["owm"] = owm
return values
def _format_weather_info(self, location: str, w: Any) -> str:
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
temperature = w.temperature("celsius")
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openweathermap.html |
c0b54b49294e-1 | heat_index = w.heat_index
clouds = w.clouds
return (
f"In {location}, the current weather is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
f"Temperature: \n"
f" - Current: {temperature['temp']}°C\n"
f" - High: {temperature['temp_max']}°C\n"
f" - Low: {temperature['temp_min']}°C\n"
f" - Feels like: {temperature['feels_like']}°C\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index}\n"
f"Cloud cover: {clouds}%"
)
[docs] def run(self, location: str) -> str:
"""Get the current weather information for a specified location."""
mgr = self.owm.weather_manager()
observation = mgr.weather_at_place(location)
w = observation.weather
return self._format_weather_info(location, w) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openweathermap.html |
a9555fcff753-0 | Source code for langchain.utilities.google_serper
"""Util that calls Google Search using the Serper.dev API."""
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from pydantic.class_validators import root_validator
from pydantic.main import BaseModel
from typing_extensions import Literal
from langchain.utils import get_from_dict_or_env
[docs]class GoogleSerperAPIWrapper(BaseModel):
"""Wrapper around the Serper.dev Google Search API.
You can create a free API key at https://serper.dev.
To use, you should have the environment variable ``SERPER_API_KEY``
set with your API key, or pass `serper_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain import GoogleSerperAPIWrapper
google_serper = GoogleSerperAPIWrapper()
"""
k: int = 10
gl: str = "us"
hl: str = "en"
# "places" and "images" is available from Serper but not implemented in the
# parser of run(). They can be used in results()
type: Literal["news", "search", "places", "images"] = "search"
result_key_for_type = {
"news": "news",
"places": "places",
"images": "images",
"search": "organic",
}
tbs: Optional[str] = None
serper_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator() | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
a9555fcff753-1 | arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
serper_api_key = get_from_dict_or_env(
values, "serper_api_key", "SERPER_API_KEY"
)
values["serper_api_key"] = serper_api_key
return values
[docs] def results(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
return self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
[docs] def run(self, query: str, **kwargs: Any) -> str:
"""Run query through GoogleSearch and parse result."""
results = self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
return self._parse_results(results)
[docs] async def aresults(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return results
[docs] async def arun(self, query: str, **kwargs: Any) -> str: | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
a9555fcff753-2 | """Run query through GoogleSearch and parse result async."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return self._parse_results(results)
def _parse_snippets(self, results: dict) -> List[str]:
snippets = []
if results.get("answerBox"):
answer_box = results.get("answerBox", {})
if answer_box.get("answer"):
return [answer_box.get("answer")]
elif answer_box.get("snippet"):
return [answer_box.get("snippet").replace("\n", " ")]
elif answer_box.get("snippetHighlighted"):
return answer_box.get("snippetHighlighted")
if results.get("knowledgeGraph"):
kg = results.get("knowledgeGraph", {})
title = kg.get("title")
entity_type = kg.get("type")
if entity_type:
snippets.append(f"{title}: {entity_type}.")
description = kg.get("description")
if description:
snippets.append(description)
for attribute, value in kg.get("attributes", {}).items():
snippets.append(f"{title} {attribute}: {value}.")
for result in results[self.result_key_for_type[self.type]][: self.k]:
if "snippet" in result:
snippets.append(result["snippet"])
for attribute, value in result.get("attributes", {}).items():
snippets.append(f"{attribute}: {value}.")
if len(snippets) == 0:
return ["No good Google Search Result was found"]
return snippets | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
a9555fcff753-3 | return ["No good Google Search Result was found"]
return snippets
def _parse_results(self, results: dict) -> str:
return " ".join(self._parse_snippets(results))
def _google_serper_api_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
response = requests.post(
f"https://google.serper.dev/{search_type}", headers=headers, params=params
)
response.raise_for_status()
search_results = response.json()
return search_results
async def _async_google_serper_search_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
url = f"https://google.serper.dev/{search_type}"
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
url, params=params, headers=headers, raise_for_status=False
) as response:
search_results = await response.json()
else:
async with self.aiosession.post( | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
a9555fcff753-4 | else:
async with self.aiosession.post(
url, params=params, headers=headers, raise_for_status=True
) as response:
search_results = await response.json()
return search_results | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
a3a7544cbaf9-0 | Source code for langchain.utilities.max_compute
from __future__ import annotations
from typing import TYPE_CHECKING, Iterator, List, Optional
from langchain.utils import get_from_env
if TYPE_CHECKING:
from odps import ODPS
[docs]class MaxComputeAPIWrapper:
"""Interface for querying Alibaba Cloud MaxCompute tables."""
def __init__(self, client: ODPS):
"""Initialize MaxCompute document loader.
Args:
client: odps.ODPS MaxCompute client object.
"""
self.client = client
[docs] @classmethod
def from_params(
cls,
endpoint: str,
project: str,
*,
access_id: Optional[str] = None,
secret_access_key: Optional[str] = None,
) -> MaxComputeAPIWrapper:
"""Convenience constructor that builds the odsp.ODPS MaxCompute client from
given parameters.
Args:
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`.
"""
try:
from odps import ODPS
except ImportError as ex:
raise ImportError(
"Could not import pyodps python package. "
"Please install it with `pip install pyodps` or refer to "
"https://pyodps.readthedocs.io/."
) from ex | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/max_compute.html |
a3a7544cbaf9-1 | "https://pyodps.readthedocs.io/."
) from ex
access_id = access_id or get_from_env("access_id", "MAX_COMPUTE_ACCESS_ID")
secret_access_key = secret_access_key or get_from_env(
"secret_access_key", "MAX_COMPUTE_SECRET_ACCESS_KEY"
)
client = ODPS(
access_id=access_id,
secret_access_key=secret_access_key,
project=project,
endpoint=endpoint,
)
if not client.exist_project(project):
raise ValueError(f'The project "{project}" does not exist.')
return cls(client)
[docs] def lazy_query(self, query: str) -> Iterator[dict]:
# Execute SQL query.
with self.client.execute_sql(query).open_reader() as reader:
if reader.count == 0:
raise ValueError("Table contains no data.")
for record in reader:
yield {k: v for k, v in record}
[docs] def query(self, query: str) -> List[dict]:
return list(self.lazy_query(query)) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/max_compute.html |
c99101d93200-0 | Source code for langchain.utilities.google_search
"""Util that calls Google Search."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
[docs]class GoogleSearchAPIWrapper(BaseModel):
"""Wrapper for Google Search API.
Adapted from: Instructions adapted from https://stackoverflow.com/questions/
37083058/
programmatically-searching-google-in-python-using-custom-search
TODO: DOCS for using it
1. Install google-api-python-client
- If you don't already have a Google account, sign up.
- If you have never created a Google APIs Console project,
read the Managing Projects page and create a project in the Google API Console.
- Install the library using pip install google-api-python-client
The current version of the library is 2.70.0 at this time
2. To create an API key:
- Navigate to the APIs & Services→Credentials panel in Cloud Console.
- Select Create credentials, then select API key from the drop-down menu.
- The API key created dialog box displays your newly created key.
- You now have an API_KEY
3. Setup Custom Search Engine so you can search the entire web
- Create a custom search engine in this link.
- In Sites to search, add any valid URL (i.e. www.stackoverflow.com).
- That’s all you have to fill up, the rest doesn’t matter.
In the left-side menu, click Edit search engine → {your search engine name}
→ Setup Set Search the entire web to ON. Remove the URL you added from
the list of Sites to search.
- Under Search engine ID you’ll find the search-engine-ID. | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_search.html |
c99101d93200-1 | - Under Search engine ID you’ll find the search-engine-ID.
4. Enable the Custom Search API
- Navigate to the APIs & Services→Dashboard panel in Cloud Console.
- Click Enable APIs and Services.
- Search for Custom Search API and click on it.
- Click Enable.
URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis
.com
"""
search_engine: Any #: :meta private:
google_api_key: Optional[str] = None
google_cse_id: Optional[str] = None
k: int = 10
siterestrict: bool = False
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _google_search_results(self, search_term: str, **kwargs: Any) -> List[dict]:
cse = self.search_engine.cse()
if self.siterestrict:
cse = cse.siterestrict()
res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute()
return res.get("items", [])
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
values["google_api_key"] = google_api_key
google_cse_id = get_from_dict_or_env(values, "google_cse_id", "GOOGLE_CSE_ID")
values["google_cse_id"] = google_cse_id
try:
from googleapiclient.discovery import build
except ImportError:
raise ImportError( | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_search.html |
c99101d93200-2 | except ImportError:
raise ImportError(
"google-api-python-client is not installed. "
"Please install it with `pip install google-api-python-client`"
)
service = build("customsearch", "v1", developerKey=google_api_key)
values["search_engine"] = service
return values
[docs] def run(self, query: str) -> str:
"""Run query through GoogleSearch and parse result."""
snippets = []
results = self._google_search_results(query, num=self.k)
if len(results) == 0:
return "No good Google Search Result was found"
for result in results:
if "snippet" in result:
snippets.append(result["snippet"])
return " ".join(snippets)
[docs] def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through GoogleSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._google_search_results(query, num=num_results)
if len(results) == 0:
return [{"Result": "No good Google Search Result was found"}]
for result in results:
metadata_result = {
"title": result["title"],
"link": result["link"],
}
if "snippet" in result:
metadata_result["snippet"] = result["snippet"]
metadata_results.append(metadata_result) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_search.html |
c99101d93200-3 | metadata_result["snippet"] = result["snippet"]
metadata_results.append(metadata_result)
return metadata_results | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/google_search.html |
aad4504d1062-0 | Source code for langchain.utilities.arxiv
"""Util that calls Arxiv."""
import logging
import os
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
[docs]class ArxivAPIWrapper(BaseModel):
"""Wrapper around ArxivAPI.
To use, you should have the ``arxiv`` python package installed.
https://lukasschwab.me/arxiv.py/index.html
This wrapper will use the Arxiv API to conduct searches and
fetch document summaries. By default, it will return the document summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
Set doc_content_chars_max=None if you don't want to limit the content size.
Parameters:
top_k_results: number of the top-scored document used for the arxiv tool
ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool.
load_max_docs: a limit to the number of loaded documents
load_all_available_meta:
if True: the `metadata` of the loaded Documents gets all available meta info
(see https://lukasschwab.me/arxiv.py/index.html#Result),
if False: the `metadata` gets only the most informative fields.
"""
arxiv_search: Any #: :meta private:
arxiv_exceptions: Any # :meta private:
top_k_results: int = 3
ARXIV_MAX_QUERY_LENGTH = 300
load_max_docs: int = 100
load_all_available_meta: bool = False
doc_content_chars_max: Optional[int] = 4000
class Config: | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
aad4504d1062-1 | doc_content_chars_max: Optional[int] = 4000
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import arxiv
values["arxiv_search"] = arxiv.Search
values["arxiv_exceptions"] = (
arxiv.ArxivError,
arxiv.UnexpectedEmptyPageError,
arxiv.HTTPError,
)
values["arxiv_result"] = arxiv.Result
except ImportError:
raise ImportError(
"Could not import arxiv python package. "
"Please install it with `pip install arxiv`."
)
return values
[docs] def run(self, query: str) -> str:
"""
Run Arxiv search and get the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
See https://lukasschwab.me/arxiv.py/index.html#Result
It uses only the most informative fields of article meta information.
"""
try:
results = self.arxiv_search( # type: ignore
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results
).results()
except self.arxiv_exceptions as ex:
return f"Arxiv exception: {ex}"
docs = [
f"Published: {result.updated.date()}\nTitle: {result.title}\n"
f"Authors: {', '.join(a.name for a in result.authors)}\n"
f"Summary: {result.summary}"
for result in results
] | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
aad4504d1062-2 | f"Summary: {result.summary}"
for result in results
]
if docs:
return "\n\n".join(docs)[: self.doc_content_chars_max]
else:
return "No good Arxiv Result was found"
[docs] def load(self, query: str) -> List[Document]:
"""
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
"""
try:
import fitz
except ImportError:
raise ImportError(
"PyMuPDF package not found, please install it with "
"`pip install pymupdf`"
)
try:
results = self.arxiv_search( # type: ignore
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.load_max_docs
).results()
except self.arxiv_exceptions as ex:
logger.debug("Error on arxiv: %s", ex)
return []
docs: List[Document] = []
for result in results:
try:
doc_file_name: str = result.download_pdf()
with fitz.open(doc_file_name) as doc_file:
text: str = "".join(page.get_text() for page in doc_file)
except FileNotFoundError as f_ex:
logger.debug(f_ex)
continue
if self.load_all_available_meta:
extra_metadata = {
"entry_id": result.entry_id,
"published_first_time": str(result.published.date()),
"comment": result.comment,
"journal_ref": result.journal_ref,
"doi": result.doi, | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
aad4504d1062-3 | "journal_ref": result.journal_ref,
"doi": result.doi,
"primary_category": result.primary_category,
"categories": result.categories,
"links": [link.href for link in result.links],
}
else:
extra_metadata = {}
metadata = {
"Published": str(result.updated.date()),
"Title": result.title,
"Authors": ", ".join(a.name for a in result.authors),
"Summary": result.summary,
**extra_metadata,
}
doc = Document(
page_content=text[: self.doc_content_chars_max], metadata=metadata
)
docs.append(doc)
os.remove(doc_file_name)
return docs | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
94eb0546b494-0 | Source code for langchain.utilities.wolfram_alpha
"""Util that calls WolframAlpha."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
[docs]class WolframAlphaAPIWrapper(BaseModel):
"""Wrapper for Wolfram Alpha.
Docs for using:
1. Go to wolfram alpha and sign up for a developer account
2. Create an app and get your APP ID
3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
4. pip install wolframalpha
"""
wolfram_client: Any #: :meta private:
wolfram_alpha_appid: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(
values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID"
)
values["wolfram_alpha_appid"] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
"wolframalpha is not installed. "
"Please install it with `pip install wolframalpha`"
)
client = wolframalpha.Client(wolfram_alpha_appid)
values["wolfram_client"] = client
return values
[docs] def run(self, query: str) -> str:
"""Run query through WolframAlpha and parse result."""
res = self.wolfram_client.query(query) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/wolfram_alpha.html |
94eb0546b494-1 | res = self.wolfram_client.query(query)
try:
assumption = next(res.pods).text
answer = next(res.results).text
except StopIteration:
return "Wolfram Alpha wasn't able to answer it"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "No good Wolfram Alpha Result was found"
else:
return f"Assumption: {assumption} \nAnswer: {answer}" | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/wolfram_alpha.html |
acfe0d4cb1a7-0 | Source code for langchain.utilities.graphql
import json
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
[docs]class GraphQLAPIWrapper(BaseModel):
"""Wrapper around GraphQL API.
To use, you should have the ``gql`` python package installed.
This wrapper will use the GraphQL API to conduct queries.
"""
custom_headers: Optional[Dict[str, str]] = None
graphql_endpoint: str
gql_client: Any #: :meta private:
gql_function: Callable[[str], Any] #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
except ImportError as e:
raise ImportError(
"Could not import gql python package. "
f"Try installing it with `pip install gql`. Received error: {e}"
)
headers = values.get("custom_headers")
transport = RequestsHTTPTransport(
url=values["graphql_endpoint"],
headers=headers,
)
client = Client(transport=transport, fetch_schema_from_transport=True)
values["gql_client"] = client
values["gql_function"] = gql
return values
[docs] def run(self, query: str) -> str:
"""Run a GraphQL query and get the results."""
result = self._execute_query(query)
return json.dumps(result, indent=2) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/graphql.html |
acfe0d4cb1a7-1 | return json.dumps(result, indent=2)
def _execute_query(self, query: str) -> Dict[str, Any]:
"""Execute a GraphQL query and return the results."""
document_node = self.gql_function(query)
result = self.gql_client.execute(document_node)
return result | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/graphql.html |
ec13144fcf3c-0 | Source code for langchain.utilities.openapi
"""Utility functions for parsing an OpenAPI spec."""
import copy
import json
import logging
import re
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, Union
import requests
import yaml
from openapi_schema_pydantic import (
Components,
OpenAPI,
Operation,
Parameter,
PathItem,
Paths,
Reference,
RequestBody,
Schema,
)
from pydantic import ValidationError
logger = logging.getLogger(__name__)
class HTTPVerb(str, Enum):
"""HTTP verbs."""
GET = "get"
PUT = "put"
POST = "post"
DELETE = "delete"
OPTIONS = "options"
HEAD = "head"
PATCH = "patch"
TRACE = "trace"
@classmethod
def from_str(cls, verb: str) -> "HTTPVerb":
"""Parse an HTTP verb."""
try:
return cls(verb)
except ValueError:
raise ValueError(f"Invalid HTTP verb. Valid values are {cls.__members__}")
[docs]class OpenAPISpec(OpenAPI):
"""OpenAPI Model that removes misformatted parts of the spec."""
@property
def _paths_strict(self) -> Paths:
if not self.paths:
raise ValueError("No paths found in spec")
return self.paths
def _get_path_strict(self, path: str) -> PathItem:
path_item = self._paths_strict.get(path)
if not path_item:
raise ValueError(f"No path found for {path}")
return path_item
@property
def _components_strict(self) -> Components: | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html |
ec13144fcf3c-1 | @property
def _components_strict(self) -> Components:
"""Get components or err."""
if self.components is None:
raise ValueError("No components found in spec. ")
return self.components
@property
def _parameters_strict(self) -> Dict[str, Union[Parameter, Reference]]:
"""Get parameters or err."""
parameters = self._components_strict.parameters
if parameters is None:
raise ValueError("No parameters found in spec. ")
return parameters
@property
def _schemas_strict(self) -> Dict[str, Schema]:
"""Get the dictionary of schemas or err."""
schemas = self._components_strict.schemas
if schemas is None:
raise ValueError("No schemas found in spec. ")
return schemas
@property
def _request_bodies_strict(self) -> Dict[str, Union[RequestBody, Reference]]:
"""Get the request body or err."""
request_bodies = self._components_strict.requestBodies
if request_bodies is None:
raise ValueError("No request body found in spec. ")
return request_bodies
def _get_referenced_parameter(self, ref: Reference) -> Union[Parameter, Reference]:
"""Get a parameter (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
parameters = self._parameters_strict
if ref_name not in parameters:
raise ValueError(f"No parameter found for {ref_name}")
return parameters[ref_name]
def _get_root_referenced_parameter(self, ref: Reference) -> Parameter:
"""Get the root reference or err."""
parameter = self._get_referenced_parameter(ref)
while isinstance(parameter, Reference): | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html |
ec13144fcf3c-2 | parameter = self._get_referenced_parameter(ref)
while isinstance(parameter, Reference):
parameter = self._get_referenced_parameter(parameter)
return parameter
[docs] def get_referenced_schema(self, ref: Reference) -> Schema:
"""Get a schema (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
schemas = self._schemas_strict
if ref_name not in schemas:
raise ValueError(f"No schema found for {ref_name}")
return schemas[ref_name]
[docs] def get_schema(self, schema: Union[Reference, Schema]) -> Schema:
if isinstance(schema, Reference):
return self.get_referenced_schema(schema)
return schema
def _get_root_referenced_schema(self, ref: Reference) -> Schema:
"""Get the root reference or err."""
schema = self.get_referenced_schema(ref)
while isinstance(schema, Reference):
schema = self.get_referenced_schema(schema)
return schema
def _get_referenced_request_body(
self, ref: Reference
) -> Optional[Union[Reference, RequestBody]]:
"""Get a request body (or nested reference) or err."""
ref_name = ref.ref.split("/")[-1]
request_bodies = self._request_bodies_strict
if ref_name not in request_bodies:
raise ValueError(f"No request body found for {ref_name}")
return request_bodies[ref_name]
def _get_root_referenced_request_body(
self, ref: Reference
) -> Optional[RequestBody]:
"""Get the root request Body or err."""
request_body = self._get_referenced_request_body(ref)
while isinstance(request_body, Reference): | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html |
ec13144fcf3c-3 | while isinstance(request_body, Reference):
request_body = self._get_referenced_request_body(request_body)
return request_body
@staticmethod
def _alert_unsupported_spec(obj: dict) -> None:
"""Alert if the spec is not supported."""
warning_message = (
" This may result in degraded performance."
+ " Convert your OpenAPI spec to 3.1.* spec"
+ " for better support."
)
swagger_version = obj.get("swagger")
openapi_version = obj.get("openapi")
if isinstance(openapi_version, str):
if openapi_version != "3.1.0":
logger.warning(
f"Attempting to load an OpenAPI {openapi_version}"
f" spec. {warning_message}"
)
else:
pass
elif isinstance(swagger_version, str):
logger.warning(
f"Attempting to load a Swagger {swagger_version}"
f" spec. {warning_message}"
)
else:
raise ValueError(
"Attempting to load an unsupported spec:"
f"\n\n{obj}\n{warning_message}"
)
[docs] @classmethod
def parse_obj(cls, obj: dict) -> "OpenAPISpec":
try:
cls._alert_unsupported_spec(obj)
return super().parse_obj(obj)
except ValidationError as e:
# We are handling possibly misconfigured specs and want to do a best-effort
# job to get a reasonable interface out of it.
new_obj = copy.deepcopy(obj)
for error in e.errors():
keys = error["loc"]
item = new_obj
for key in keys[:-1]:
item = item[key] | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html |
ec13144fcf3c-4 | for key in keys[:-1]:
item = item[key]
item.pop(keys[-1], None)
return cls.parse_obj(new_obj)
[docs] @classmethod
def from_spec_dict(cls, spec_dict: dict) -> "OpenAPISpec":
"""Get an OpenAPI spec from a dict."""
return cls.parse_obj(spec_dict)
[docs] @classmethod
def from_text(cls, text: str) -> "OpenAPISpec":
"""Get an OpenAPI spec from a text."""
try:
spec_dict = json.loads(text)
except json.JSONDecodeError:
spec_dict = yaml.safe_load(text)
return cls.from_spec_dict(spec_dict)
[docs] @classmethod
def from_file(cls, path: Union[str, Path]) -> "OpenAPISpec":
"""Get an OpenAPI spec from a file path."""
path_ = path if isinstance(path, Path) else Path(path)
if not path_.exists():
raise FileNotFoundError(f"{path} does not exist")
with path_.open("r") as f:
return cls.from_text(f.read())
[docs] @classmethod
def from_url(cls, url: str) -> "OpenAPISpec":
"""Get an OpenAPI spec from a URL."""
response = requests.get(url)
return cls.from_text(response.text)
@property
def base_url(self) -> str:
"""Get the base url."""
return self.servers[0].url
[docs] def get_methods_for_path(self, path: str) -> List[str]:
"""Return a list of valid methods for the specified path."""
path_item = self._get_path_strict(path)
results = [] | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html |
ec13144fcf3c-5 | path_item = self._get_path_strict(path)
results = []
for method in HTTPVerb:
operation = getattr(path_item, method.value, None)
if isinstance(operation, Operation):
results.append(method.value)
return results
[docs] def get_parameters_for_path(self, path: str) -> List[Parameter]:
path_item = self._get_path_strict(path)
parameters = []
if not path_item.parameters:
return []
for parameter in path_item.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
[docs] def get_operation(self, path: str, method: str) -> Operation:
"""Get the operation object for a given path and HTTP method."""
path_item = self._get_path_strict(path)
operation_obj = getattr(path_item, method, None)
if not isinstance(operation_obj, Operation):
raise ValueError(f"No {method} method found for {path}")
return operation_obj
[docs] def get_parameters_for_operation(self, operation: Operation) -> List[Parameter]:
"""Get the components for a given operation."""
parameters = []
if operation.parameters:
for parameter in operation.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
[docs] def get_request_body_for_operation(
self, operation: Operation
) -> Optional[RequestBody]:
"""Get the request body for a given operation."""
request_body = operation.requestBody
if isinstance(request_body, Reference):
request_body = self._get_root_referenced_request_body(request_body)
return request_body | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html |
ec13144fcf3c-6 | return request_body
[docs] @staticmethod
def get_cleaned_operation_id(operation: Operation, path: str, method: str) -> str:
"""Get a cleaned operation id from an operation id."""
operation_id = operation.operationId
if operation_id is None:
# Replace all punctuation of any kind with underscore
path = re.sub(r"[^a-zA-Z0-9]", "_", path.lstrip("/"))
operation_id = f"{path}_{method}"
return operation_id.replace("-", "_").replace(".", "_").replace("/", "_") | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/openapi.html |
84d5f9b19804-0 | Source code for langchain.utilities.apify
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, root_validator
from langchain.document_loaders import ApifyDatasetLoader
from langchain.document_loaders.base import Document
from langchain.utils import get_from_dict_or_env
[docs]class ApifyWrapper(BaseModel):
"""Wrapper around Apify.
To use, you should have the ``apify-client`` python package installed,
and the environment variable ``APIFY_API_TOKEN`` set with your API key, or pass
`apify_api_token` as a named parameter to the constructor.
"""
apify_client: Any
apify_client_async: Any
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment.
"""
apify_api_token = get_from_dict_or_env(
values, "apify_api_token", "APIFY_API_TOKEN"
)
try:
from apify_client import ApifyClient, ApifyClientAsync
values["apify_client"] = ApifyClient(apify_api_token)
values["apify_client_async"] = ApifyClientAsync(apify_api_token)
except ImportError:
raise ValueError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
[docs] def call_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None, | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/apify.html |
84d5f9b19804-1 | *,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
actor_call = self.apify_client.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
[docs] async def acall_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None, | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/apify.html |
84d5f9b19804-2 | memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to
an instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
actor_call = await self.apify_client_async.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
[docs] def call_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None, | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/apify.html |
84d5f9b19804-3 | timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
task_call = self.apify_client.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
[docs] async def acall_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader: | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/apify.html |
84d5f9b19804-4 | timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
task_call = await self.apify_client_async.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/apify.html |
10275b7d5f2a-0 | Source code for langchain.utilities.jira
"""Util that calls Jira."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.tools.jira.prompt import (
JIRA_CATCH_ALL_PROMPT,
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
JIRA_GET_ALL_PROJECTS_PROMPT,
JIRA_ISSUE_CREATE_PROMPT,
JIRA_JQL_PROMPT,
)
from langchain.utils import get_from_dict_or_env
# TODO: think about error handling, more specific api specs, and jql/project limits
[docs]class JiraAPIWrapper(BaseModel):
"""Wrapper for Jira API."""
jira: Any #: :meta private:
confluence: Any
jira_username: Optional[str] = None
jira_api_token: Optional[str] = None
jira_instance_url: Optional[str] = None
operations: List[Dict] = [
{
"mode": "jql",
"name": "JQL Query",
"description": JIRA_JQL_PROMPT,
},
{
"mode": "get_projects",
"name": "Get Projects",
"description": JIRA_GET_ALL_PROJECTS_PROMPT,
},
{
"mode": "create_issue",
"name": "Create Issue",
"description": JIRA_ISSUE_CREATE_PROMPT,
},
{
"mode": "other",
"name": "Catch all Jira API call",
"description": JIRA_CATCH_ALL_PROMPT,
},
{
"mode": "create_page",
"name": "Create confluence page", | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/jira.html |
10275b7d5f2a-1 | "mode": "create_page",
"name": "Create confluence page",
"description": JIRA_CONFLUENCE_PAGE_CREATE_PROMPT,
},
]
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
[docs] def list(self) -> List[Dict]:
return self.operations
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
jira_username = get_from_dict_or_env(values, "jira_username", "JIRA_USERNAME")
values["jira_username"] = jira_username
jira_api_token = get_from_dict_or_env(
values, "jira_api_token", "JIRA_API_TOKEN"
)
values["jira_api_token"] = jira_api_token
jira_instance_url = get_from_dict_or_env(
values, "jira_instance_url", "JIRA_INSTANCE_URL"
)
values["jira_instance_url"] = jira_instance_url
try:
from atlassian import Confluence, Jira
except ImportError:
raise ImportError(
"atlassian-python-api is not installed. "
"Please install it with `pip install atlassian-python-api`"
)
jira = Jira(
url=jira_instance_url,
username=jira_username,
password=jira_api_token,
cloud=True,
)
confluence = Confluence(
url=jira_instance_url,
username=jira_username,
password=jira_api_token,
cloud=True,
)
values["jira"] = jira
values["confluence"] = confluence | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/jira.html |
10275b7d5f2a-2 | values["jira"] = jira
values["confluence"] = confluence
return values
[docs] def parse_issues(self, issues: Dict) -> List[dict]:
parsed = []
for issue in issues["issues"]:
key = issue["key"]
summary = issue["fields"]["summary"]
created = issue["fields"]["created"][0:10]
priority = issue["fields"]["priority"]["name"]
status = issue["fields"]["status"]["name"]
try:
assignee = issue["fields"]["assignee"]["displayName"]
except Exception:
assignee = "None"
rel_issues = {}
for related_issue in issue["fields"]["issuelinks"]:
if "inwardIssue" in related_issue.keys():
rel_type = related_issue["type"]["inward"]
rel_key = related_issue["inwardIssue"]["key"]
rel_summary = related_issue["inwardIssue"]["fields"]["summary"]
if "outwardIssue" in related_issue.keys():
rel_type = related_issue["type"]["outward"]
rel_key = related_issue["outwardIssue"]["key"]
rel_summary = related_issue["outwardIssue"]["fields"]["summary"]
rel_issues = {"type": rel_type, "key": rel_key, "summary": rel_summary}
parsed.append(
{
"key": key,
"summary": summary,
"created": created,
"assignee": assignee,
"priority": priority,
"status": status,
"related_issues": rel_issues,
}
)
return parsed
[docs] def parse_projects(self, projects: List[dict]) -> List[dict]:
parsed = [] | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/jira.html |
10275b7d5f2a-3 | parsed = []
for project in projects:
id = project["id"]
key = project["key"]
name = project["name"]
type = project["projectTypeKey"]
style = project["style"]
parsed.append(
{"id": id, "key": key, "name": name, "type": type, "style": style}
)
return parsed
[docs] def search(self, query: str) -> str:
issues = self.jira.jql(query)
parsed_issues = self.parse_issues(issues)
parsed_issues_str = (
"Found " + str(len(parsed_issues)) + " issues:\n" + str(parsed_issues)
)
return parsed_issues_str
[docs] def project(self) -> str:
projects = self.jira.projects()
parsed_projects = self.parse_projects(projects)
parsed_projects_str = (
"Found " + str(len(parsed_projects)) + " projects:\n" + str(parsed_projects)
)
return parsed_projects_str
[docs] def issue_create(self, query: str) -> str:
try:
import json
except ImportError:
raise ImportError(
"json is not installed. Please install it with `pip install json`"
)
params = json.loads(query)
return self.jira.issue_create(fields=dict(params))
[docs] def page_create(self, query: str) -> str:
try:
import json
except ImportError:
raise ImportError(
"json is not installed. Please install it with `pip install json`"
)
params = json.loads(query)
return self.confluence.create_page(**dict(params)) | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/jira.html |
10275b7d5f2a-4 | params = json.loads(query)
return self.confluence.create_page(**dict(params))
[docs] def other(self, query: str) -> str:
context = {"self": self}
exec(f"result = {query}", context)
result = context["result"]
return str(result)
[docs] def run(self, mode: str, query: str) -> str:
if mode == "jql":
return self.search(query)
elif mode == "get_projects":
return self.project()
elif mode == "create_issue":
return self.issue_create(query)
elif mode == "other":
return self.other(query)
elif mode == "create_page":
return self.page_create(query)
else:
raise ValueError(f"Got unexpected mode {mode}") | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/jira.html |
b68f7bd371b5-0 | Source code for langchain.utilities.awslambda
"""Util that calls Lambda."""
import json
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
[docs]class LambdaWrapper(BaseModel):
"""Wrapper for AWS Lambda SDK.
Docs for using:
1. pip install boto3
2. Create a lambda function using the AWS Console or CLI
3. Run `aws configure` and enter your AWS credentials
"""
lambda_client: Any #: :meta private:
function_name: Optional[str] = None
awslambda_tool_name: Optional[str] = None
awslambda_tool_description: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
import boto3
except ImportError:
raise ImportError(
"boto3 is not installed. Please install it with `pip install boto3`"
)
values["lambda_client"] = boto3.client("lambda")
values["function_name"] = values["function_name"]
return values
[docs] def run(self, query: str) -> str:
"""Invoke Lambda function and parse result."""
res = self.lambda_client.invoke(
FunctionName=self.function_name,
InvocationType="RequestResponse",
Payload=json.dumps({"body": query}),
)
try:
payload_stream = res["Payload"]
payload_string = payload_stream.read().decode("utf-8")
answer = json.loads(payload_string)["body"]
except StopIteration: | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/awslambda.html |
b68f7bd371b5-1 | answer = json.loads(payload_string)["body"]
except StopIteration:
return "Failed to parse response from Lambda"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "Request failed."
else:
return f"Result: {answer}" | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/awslambda.html |
c747911cb81d-0 | Source code for langchain.utilities.bing_search
"""Util that calls Bing Search.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
from typing import Dict, List
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
[docs]class BingSearchAPIWrapper(BaseModel):
"""Wrapper for Bing Search API.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
bing_subscription_key: str
bing_search_url: str
k: int = 10
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _bing_search_results(self, search_term: str, count: int) -> List[dict]:
headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key}
params = {
"q": search_term,
"count": count,
"textDecorations": True,
"textFormat": "HTML",
}
response = requests.get(
self.bing_search_url, headers=headers, params=params # type: ignore
)
response.raise_for_status()
search_results = response.json()
return search_results["webPages"]["value"]
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
bing_subscription_key = get_from_dict_or_env( | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/bing_search.html |
c747911cb81d-1 | bing_subscription_key = get_from_dict_or_env(
values, "bing_subscription_key", "BING_SUBSCRIPTION_KEY"
)
values["bing_subscription_key"] = bing_subscription_key
bing_search_url = get_from_dict_or_env(
values,
"bing_search_url",
"BING_SEARCH_URL",
# default="https://api.bing.microsoft.com/v7.0/search",
)
values["bing_search_url"] = bing_search_url
return values
[docs] def run(self, query: str) -> str:
"""Run query through BingSearch and parse result."""
snippets = []
results = self._bing_search_results(query, count=self.k)
if len(results) == 0:
return "No good Bing Search Result was found"
for result in results:
snippets.append(result["snippet"])
return " ".join(snippets)
[docs] def results(self, query: str, num_results: int) -> List[Dict]:
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{"Result": "No good Bing Search Result was found"}]
for result in results:
metadata_result = {
"snippet": result["snippet"],
"title": result["name"], | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/bing_search.html |
c747911cb81d-2 | "snippet": result["snippet"],
"title": result["name"],
"link": result["url"],
}
metadata_results.append(metadata_result)
return metadata_results | https://api.python.langchain.com/en/latest/_modules/langchain/utilities/bing_search.html |
d3daa5164eb7-0 | Model I/O
LangChain provides interfaces and integrations for working with language models.
Prompts
Models
Output Parsers | https://api.python.langchain.com/en/stable/model_io.html |
b0729971b7ce-0 | Data connection
LangChain has a number of modules that help you load, structure, store, and retrieve documents.
Document Loaders
Document Transformers
Embeddings
Vector Stores
Retrievers | https://api.python.langchain.com/en/stable/data_connection.html |
8fcc09bdad6d-0 | Models
LangChain provides interfaces and integrations for a number of different types of models.
LLMs
Chat Models | https://api.python.langchain.com/en/stable/models.html |
941f37850735-0 | API Reference
Full documentation on all methods, classes, and APIs in the LangChain Python package.
Abstractions
Base classes
Core
Model I/O
Data connection
Chains
Agents
Memory
Callbacks
Additional
Utilities
Experimental | https://api.python.langchain.com/en/stable/index.html |
2cfb94a2e9a8-0 | Prompts
The reference guides here all relate to objects for working with Prompts.
Prompt Templates
Example Selector | https://api.python.langchain.com/en/stable/prompts.html |
86df0f796b6d-0 | Please activate JavaScript to enable the search functionality. | https://api.python.langchain.com/en/stable/search.html |
a1409c1b5e0f-0 | Index
_
| A
| B
| C
| D
| E
| F
| G
| H
| I
| J
| K
| L
| M
| N
| O
| P
| Q
| R
| S
| T
| U
| V
| W
| Y
| Z
_
__call__() (langchain.llms.AI21 method)
(langchain.llms.AlephAlpha method)
(langchain.llms.AmazonAPIGateway method)
(langchain.llms.Anthropic method)
(langchain.llms.Anyscale method)
(langchain.llms.Aviary method)
(langchain.llms.AzureMLOnlineEndpoint method)
(langchain.llms.AzureOpenAI method)
(langchain.llms.Banana method)
(langchain.llms.Baseten method)
(langchain.llms.Beam method)
(langchain.llms.Bedrock method)
(langchain.llms.CerebriumAI method)
(langchain.llms.Clarifai method)
(langchain.llms.Cohere method)
(langchain.llms.CTransformers method)
(langchain.llms.Databricks method)
(langchain.llms.DeepInfra method)
(langchain.llms.FakeListLLM method)
(langchain.llms.ForefrontAI method)
(langchain.llms.GooglePalm method)
(langchain.llms.GooseAI method)
(langchain.llms.GPT4All method)
(langchain.llms.HuggingFaceEndpoint method)
(langchain.llms.HuggingFaceHub method)
(langchain.llms.HuggingFacePipeline method)
(langchain.llms.HuggingFaceTextGenInference method)
(langchain.llms.HumanInputLLM method)
(langchain.llms.LlamaCpp method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-1 | (langchain.llms.LlamaCpp method)
(langchain.llms.ManifestWrapper method)
(langchain.llms.Modal method)
(langchain.llms.MosaicML method)
(langchain.llms.NLPCloud method)
(langchain.llms.OctoAIEndpoint method)
(langchain.llms.OpenAI method)
(langchain.llms.OpenAIChat method)
(langchain.llms.OpenLLM method)
(langchain.llms.OpenLM method)
(langchain.llms.Petals method)
(langchain.llms.PipelineAI method)
(langchain.llms.PredictionGuard method)
(langchain.llms.PromptLayerOpenAI method)
(langchain.llms.PromptLayerOpenAIChat method)
(langchain.llms.Replicate method)
(langchain.llms.RWKV method)
(langchain.llms.SagemakerEndpoint method)
(langchain.llms.SelfHostedHuggingFaceLLM method)
(langchain.llms.SelfHostedPipeline method)
(langchain.llms.StochasticAI method)
(langchain.llms.TextGen method)
(langchain.llms.VertexAI method)
(langchain.llms.Writer method)
A
aadd_documents() (langchain.retrievers.TimeWeightedVectorStoreRetriever method)
(langchain.vectorstores.VectorStore method)
aadd_texts() (langchain.vectorstores.VectorStore method)
aapply() (langchain.chains.ConversationChain method)
(langchain.chains.LLMChain method)
aapply_and_parse() (langchain.chains.ConversationChain method)
(langchain.chains.LLMChain method)
acall() (langchain.chains.AnalyzeDocumentChain method)
(langchain.chains.APIChain method)
(langchain.chains.ChatVectorDBChain method)
(langchain.chains.ConstitutionalChain method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-2 | (langchain.chains.ConstitutionalChain method)
(langchain.chains.ConversationalRetrievalChain method)
(langchain.chains.ConversationChain method)
(langchain.chains.FlareChain method)
(langchain.chains.GraphCypherQAChain method)
(langchain.chains.GraphQAChain method)
(langchain.chains.HypotheticalDocumentEmbedder method)
(langchain.chains.KuzuQAChain method)
(langchain.chains.LLMBashChain method)
(langchain.chains.LLMChain method)
(langchain.chains.LLMCheckerChain method)
(langchain.chains.LLMMathChain method)
(langchain.chains.LLMRequestsChain method)
(langchain.chains.LLMRouterChain method)
(langchain.chains.LLMSummarizationCheckerChain method)
(langchain.chains.MapReduceChain method)
(langchain.chains.MapReduceDocumentsChain method)
(langchain.chains.MapRerankDocumentsChain method)
(langchain.chains.MultiPromptChain method)
(langchain.chains.MultiRetrievalQAChain method)
(langchain.chains.MultiRouteChain method)
(langchain.chains.NatBotChain method)
(langchain.chains.NebulaGraphQAChain method)
(langchain.chains.OpenAIModerationChain method)
(langchain.chains.OpenAPIEndpointChain method)
(langchain.chains.PALChain method)
(langchain.chains.QAGenerationChain method)
(langchain.chains.QAWithSourcesChain method)
(langchain.chains.RefineDocumentsChain method)
(langchain.chains.RetrievalQA method)
(langchain.chains.RetrievalQAWithSourcesChain method)
(langchain.chains.RouterChain method)
(langchain.chains.SequentialChain method)
(langchain.chains.SimpleSequentialChain method)
(langchain.chains.SQLDatabaseChain method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-3 | (langchain.chains.SimpleSequentialChain method)
(langchain.chains.SQLDatabaseChain method)
(langchain.chains.SQLDatabaseSequentialChain method)
(langchain.chains.StuffDocumentsChain method)
(langchain.chains.TransformChain method)
(langchain.chains.VectorDBQA method)
(langchain.chains.VectorDBQAWithSourcesChain method)
acall_actor() (langchain.utilities.ApifyWrapper method)
acall_actor_task() (langchain.utilities.ApifyWrapper method)
access_token (langchain.document_loaders.DocugamiLoader attribute)
account_sid (langchain.utilities.TwilioAPIWrapper attribute)
acombine_docs() (langchain.chains.MapReduceDocumentsChain method)
(langchain.chains.MapRerankDocumentsChain method)
(langchain.chains.RefineDocumentsChain method)
(langchain.chains.StuffDocumentsChain method)
acompress_documents() (langchain.retrievers.document_compressors.CohereRerank method)
(langchain.retrievers.document_compressors.DocumentCompressorPipeline method)
(langchain.retrievers.document_compressors.EmbeddingsFilter method)
(langchain.retrievers.document_compressors.LLMChainExtractor method)
(langchain.retrievers.document_compressors.LLMChainFilter method)
AcreomLoader (class in langchain.document_loaders)
action_id (langchain.tools.ZapierNLARunAction attribute)
add_ai_message() (langchain.schema.BaseChatMessageHistory method)
add_bos_token (langchain.llms.TextGen attribute)
add_documents() (langchain.retrievers.TimeWeightedVectorStoreRetriever method)
(langchain.retrievers.WeaviateHybridSearchRetriever method)
(langchain.vectorstores.VectorStore method)
add_embeddings() (langchain.vectorstores.FAISS method)
(langchain.vectorstores.Hologres method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-4 | (langchain.vectorstores.Hologres method)
add_example() (langchain.prompts.example_selector.LengthBasedExampleSelector method)
(langchain.prompts.example_selector.NGramOverlapExampleSelector method)
(langchain.prompts.example_selector.SemanticSimilarityExampleSelector method)
(langchain.prompts.LengthBasedExampleSelector method)
(langchain.prompts.NGramOverlapExampleSelector method)
(langchain.prompts.SemanticSimilarityExampleSelector method)
add_memories() (langchain.experimental.GenerativeAgentMemory method)
add_memory() (langchain.experimental.GenerativeAgentMemory method)
add_message() (langchain.memory.CassandraChatMessageHistory method)
(langchain.memory.ChatMessageHistory method)
(langchain.memory.CosmosDBChatMessageHistory method)
(langchain.memory.DynamoDBChatMessageHistory method)
(langchain.memory.FileChatMessageHistory method)
(langchain.memory.MomentoChatMessageHistory method)
(langchain.memory.MongoDBChatMessageHistory method)
(langchain.memory.PostgresChatMessageHistory method)
(langchain.memory.RedisChatMessageHistory method)
(langchain.memory.SQLChatMessageHistory method)
(langchain.memory.ZepChatMessageHistory method)
(langchain.schema.BaseChatMessageHistory method)
add_note() (langchain.schema.OutputParserException method)
add_texts() (langchain.retrievers.ElasticSearchBM25Retriever method)
(langchain.retrievers.MilvusRetriever method)
(langchain.retrievers.PineconeHybridSearchRetriever method)
(langchain.retrievers.ZillizRetriever method)
(langchain.vectorstores.AlibabaCloudOpenSearch method)
(langchain.vectorstores.AnalyticDB method)
(langchain.vectorstores.Annoy method)
(langchain.vectorstores.AtlasDB method)
(langchain.vectorstores.AwaDB method)
(langchain.vectorstores.AzureSearch method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-5 | (langchain.vectorstores.AwaDB method)
(langchain.vectorstores.AzureSearch method)
(langchain.vectorstores.Cassandra method)
(langchain.vectorstores.Chroma method)
(langchain.vectorstores.Clarifai method)
(langchain.vectorstores.Clickhouse method)
(langchain.vectorstores.DeepLake method)
(langchain.vectorstores.ElasticVectorSearch method)
(langchain.vectorstores.FAISS method)
(langchain.vectorstores.Hologres method)
(langchain.vectorstores.LanceDB method)
(langchain.vectorstores.MatchingEngine method)
(langchain.vectorstores.Milvus method)
(langchain.vectorstores.MongoDBAtlasVectorSearch method)
(langchain.vectorstores.MyScale method)
(langchain.vectorstores.OpenSearchVectorSearch method)
(langchain.vectorstores.Pinecone method)
(langchain.vectorstores.Qdrant method)
(langchain.vectorstores.Redis method)
(langchain.vectorstores.Rockset method)
(langchain.vectorstores.SingleStoreDB method)
(langchain.vectorstores.SKLearnVectorStore method)
(langchain.vectorstores.StarRocks method)
(langchain.vectorstores.SupabaseVectorStore method)
(langchain.vectorstores.Tair method)
(langchain.vectorstores.Tigris method)
(langchain.vectorstores.Typesense method)
(langchain.vectorstores.Vectara method)
(langchain.vectorstores.VectorStore method)
(langchain.vectorstores.Weaviate method)
add_user_message() (langchain.schema.BaseChatMessageHistory method)
add_vectors() (langchain.vectorstores.SupabaseVectorStore method)
add_video_info (langchain.document_loaders.GoogleApiYoutubeLoader attribute)
adelete() (langchain.utilities.TextRequestsWrapper method)
aembed_documents() (langchain.embeddings.OpenAIEmbeddings method)
aembed_query() (langchain.embeddings.OpenAIEmbeddings method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-6 | aembed_query() (langchain.embeddings.OpenAIEmbeddings method)
afrom_documents() (langchain.vectorstores.VectorStore class method)
afrom_texts() (langchain.vectorstores.VectorStore class method)
age (langchain.experimental.GenerativeAgent attribute)
agenerate() (langchain.chains.ConversationChain method)
(langchain.chains.LLMChain method)
(langchain.llms.AI21 method)
(langchain.llms.AlephAlpha method)
(langchain.llms.AmazonAPIGateway method)
(langchain.llms.Anthropic method)
(langchain.llms.Anyscale method)
(langchain.llms.Aviary method)
(langchain.llms.AzureMLOnlineEndpoint method)
(langchain.llms.AzureOpenAI method)
(langchain.llms.Banana method)
(langchain.llms.Baseten method)
(langchain.llms.Beam method)
(langchain.llms.Bedrock method)
(langchain.llms.CerebriumAI method)
(langchain.llms.Clarifai method)
(langchain.llms.Cohere method)
(langchain.llms.CTransformers method)
(langchain.llms.Databricks method)
(langchain.llms.DeepInfra method)
(langchain.llms.FakeListLLM method)
(langchain.llms.ForefrontAI method)
(langchain.llms.GooglePalm method)
(langchain.llms.GooseAI method)
(langchain.llms.GPT4All method)
(langchain.llms.HuggingFaceEndpoint method)
(langchain.llms.HuggingFaceHub method)
(langchain.llms.HuggingFacePipeline method)
(langchain.llms.HuggingFaceTextGenInference method)
(langchain.llms.HumanInputLLM method)
(langchain.llms.LlamaCpp method)
(langchain.llms.ManifestWrapper method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-7 | (langchain.llms.LlamaCpp method)
(langchain.llms.ManifestWrapper method)
(langchain.llms.Modal method)
(langchain.llms.MosaicML method)
(langchain.llms.NLPCloud method)
(langchain.llms.OctoAIEndpoint method)
(langchain.llms.OpenAI method)
(langchain.llms.OpenAIChat method)
(langchain.llms.OpenLLM method)
(langchain.llms.OpenLM method)
(langchain.llms.Petals method)
(langchain.llms.PipelineAI method)
(langchain.llms.PredictionGuard method)
(langchain.llms.PromptLayerOpenAI method)
(langchain.llms.PromptLayerOpenAIChat method)
(langchain.llms.Replicate method)
(langchain.llms.RWKV method)
(langchain.llms.SagemakerEndpoint method)
(langchain.llms.SelfHostedHuggingFaceLLM method)
(langchain.llms.SelfHostedPipeline method)
(langchain.llms.StochasticAI method)
(langchain.llms.TextGen method)
(langchain.llms.VertexAI method)
(langchain.llms.Writer method)
agenerate_prompt() (langchain.llms.AI21 method)
(langchain.llms.AlephAlpha method)
(langchain.llms.AmazonAPIGateway method)
(langchain.llms.Anthropic method)
(langchain.llms.Anyscale method)
(langchain.llms.Aviary method)
(langchain.llms.AzureMLOnlineEndpoint method)
(langchain.llms.AzureOpenAI method)
(langchain.llms.Banana method)
(langchain.llms.Baseten method)
(langchain.llms.Beam method)
(langchain.llms.Bedrock method)
(langchain.llms.CerebriumAI method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-8 | (langchain.llms.CerebriumAI method)
(langchain.llms.Clarifai method)
(langchain.llms.Cohere method)
(langchain.llms.CTransformers method)
(langchain.llms.Databricks method)
(langchain.llms.DeepInfra method)
(langchain.llms.FakeListLLM method)
(langchain.llms.ForefrontAI method)
(langchain.llms.GooglePalm method)
(langchain.llms.GooseAI method)
(langchain.llms.GPT4All method)
(langchain.llms.HuggingFaceEndpoint method)
(langchain.llms.HuggingFaceHub method)
(langchain.llms.HuggingFacePipeline method)
(langchain.llms.HuggingFaceTextGenInference method)
(langchain.llms.HumanInputLLM method)
(langchain.llms.LlamaCpp method)
(langchain.llms.ManifestWrapper method)
(langchain.llms.Modal method)
(langchain.llms.MosaicML method)
(langchain.llms.NLPCloud method)
(langchain.llms.OctoAIEndpoint method)
(langchain.llms.OpenAI method)
(langchain.llms.OpenAIChat method)
(langchain.llms.OpenLLM method)
(langchain.llms.OpenLM method)
(langchain.llms.Petals method)
(langchain.llms.PipelineAI method)
(langchain.llms.PredictionGuard method)
(langchain.llms.PromptLayerOpenAI method)
(langchain.llms.PromptLayerOpenAIChat method)
(langchain.llms.Replicate method)
(langchain.llms.RWKV method)
(langchain.llms.SagemakerEndpoint method)
(langchain.llms.SelfHostedHuggingFaceLLM method)
(langchain.llms.SelfHostedPipeline method)
(langchain.llms.StochasticAI method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-9 | (langchain.llms.StochasticAI method)
(langchain.llms.TextGen method)
(langchain.llms.VertexAI method)
(langchain.llms.Writer method)
agent (langchain.agents.AgentExecutor attribute)
AgentAction (class in langchain.schema)
AgentFinish (class in langchain.schema)
AgentType (class in langchain.agents)
aget() (langchain.utilities.TextRequestsWrapper method)
aget_relevant_documents() (langchain.retrievers.AmazonKendraRetriever method)
(langchain.retrievers.ArxivRetriever method)
(langchain.retrievers.AzureCognitiveSearchRetriever method)
(langchain.retrievers.ChatGPTPluginRetriever method)
(langchain.retrievers.ContextualCompressionRetriever method)
(langchain.retrievers.DataberryRetriever method)
(langchain.retrievers.DocArrayRetriever method)
(langchain.retrievers.ElasticSearchBM25Retriever method)
(langchain.retrievers.KNNRetriever method)
(langchain.retrievers.LlamaIndexGraphRetriever method)
(langchain.retrievers.LlamaIndexRetriever method)
(langchain.retrievers.MergerRetriever method)
(langchain.retrievers.MetalRetriever method)
(langchain.retrievers.MilvusRetriever method)
(langchain.retrievers.MultiQueryRetriever method)
(langchain.retrievers.PineconeHybridSearchRetriever method)
(langchain.retrievers.PubMedRetriever method)
(langchain.retrievers.RemoteLangChainRetriever method)
(langchain.retrievers.SelfQueryRetriever method)
(langchain.retrievers.SVMRetriever method)
(langchain.retrievers.TFIDFRetriever method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-10 | (langchain.retrievers.TFIDFRetriever method)
(langchain.retrievers.TimeWeightedVectorStoreRetriever method)
(langchain.retrievers.VespaRetriever method)
(langchain.retrievers.WeaviateHybridSearchRetriever method)
(langchain.retrievers.WikipediaRetriever method)
(langchain.retrievers.ZepRetriever method)
(langchain.retrievers.ZillizRetriever method)
(langchain.schema.BaseRetriever method)
aget_table_info() (langchain.utilities.PowerBIDataset method)
aggregate_importance (langchain.experimental.GenerativeAgentMemory attribute)
aggregate_lines_to_chunks() (langchain.text_splitter.MarkdownHeaderTextSplitter method)
ai_prefix (langchain.agents.ConversationalAgent attribute)
(langchain.memory.ConversationBufferMemory attribute)
(langchain.memory.ConversationBufferWindowMemory attribute)
(langchain.memory.ConversationEntityMemory attribute)
(langchain.memory.ConversationKGMemory attribute)
(langchain.memory.ConversationStringBufferMemory attribute)
(langchain.memory.ConversationTokenBufferMemory attribute)
AimCallbackHandler (class in langchain.callbacks)
aiosession (langchain.retrievers.AzureCognitiveSearchRetriever attribute)
(langchain.retrievers.ChatGPTPluginRetriever attribute)
(langchain.utilities.GoogleSerperAPIWrapper attribute)
(langchain.utilities.PowerBIDataset attribute)
(langchain.utilities.SearxSearchWrapper attribute)
(langchain.utilities.SerpAPIWrapper attribute)
(langchain.utilities.TextRequestsWrapper attribute)
AirbyteJSONLoader (class in langchain.document_loaders)
AirtableLoader (class in langchain.document_loaders)
aiter() (langchain.callbacks.AsyncIteratorCallbackHandler method)
aleph_alpha_api_key (langchain.embeddings.AlephAlphaAsymmetricSemanticEmbedding attribute) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-11 | (langchain.llms.AlephAlpha attribute)
AlibabaCloudOpenSearch (class in langchain.vectorstores)
AlibabaCloudOpenSearchSettings (class in langchain.vectorstores)
alist() (langchain.utilities.ZapierNLAWrapper method)
alist_as_str() (langchain.utilities.ZapierNLAWrapper method)
allow_download (langchain.llms.GPT4All attribute)
allowed_special (langchain.llms.AzureOpenAI attribute)
(langchain.llms.OpenAI attribute)
(langchain.llms.OpenAIChat attribute)
(langchain.llms.OpenLM attribute)
(langchain.llms.PromptLayerOpenAIChat attribute)
allowed_tools (langchain.agents.Agent attribute)
aload() (langchain.document_loaders.WebBaseLoader method)
alpha (langchain.retrievers.PineconeHybridSearchRetriever attribute)
always_verbose (langchain.callbacks.AsyncIteratorCallbackHandler property)
(langchain.callbacks.OpenAICallbackHandler property)
amax_marginal_relevance_search() (langchain.vectorstores.VectorStore method)
amax_marginal_relevance_search_by_vector() (langchain.vectorstores.VectorStore method)
AmazonKendraRetriever (class in langchain.retrievers)
amerge_documents() (langchain.retrievers.MergerRetriever method)
AnalyticDB (class in langchain.vectorstores)
analyze_text() (langchain.callbacks.ClearMLCallbackHandler method)
Annoy (class in langchain.vectorstores)
answer_key (langchain.chains.MapRerankDocumentsChain attribute)
apatch() (langchain.utilities.TextRequestsWrapper method)
api (langchain.document_loaders.DocugamiLoader attribute)
(langchain.output_parsers.GuardrailsOutputParser attribute)
api_answer_chain (langchain.chains.APIChain attribute)
api_docs (langchain.chains.APIChain attribute) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-12 | api_docs (langchain.chains.APIChain attribute)
api_key (langchain.memory.MotorheadMemory attribute)
(langchain.retrievers.AzureCognitiveSearchRetriever attribute)
(langchain.retrievers.DataberryRetriever attribute)
(langchain.utilities.BraveSearchWrapper attribute)
api_operation (langchain.chains.OpenAPIEndpointChain attribute)
api_request_chain (langchain.chains.APIChain attribute)
(langchain.chains.OpenAPIEndpointChain attribute)
api_resource (langchain.agents.agent_toolkits.GmailToolkit attribute)
api_response_chain (langchain.chains.OpenAPIEndpointChain attribute)
api_spec (langchain.tools.AIPluginTool attribute)
api_token (langchain.llms.Databricks attribute)
api_url (langchain.embeddings.EmbaasEmbeddings attribute)
(langchain.llms.AmazonAPIGateway attribute)
(langchain.llms.StochasticAI attribute)
api_version (langchain.retrievers.AzureCognitiveSearchRetriever attribute)
api_wrapper (langchain.tools.ArxivQueryRun attribute)
(langchain.tools.BingSearchResults attribute)
(langchain.tools.BingSearchRun attribute)
(langchain.tools.DuckDuckGoSearchResults attribute)
(langchain.tools.DuckDuckGoSearchRun attribute)
(langchain.tools.GooglePlacesTool attribute)
(langchain.tools.GoogleSearchResults attribute)
(langchain.tools.GoogleSearchRun attribute)
(langchain.tools.GoogleSerperResults attribute)
(langchain.tools.GoogleSerperRun attribute)
(langchain.tools.JiraAction attribute)
(langchain.tools.MetaphorSearchResults attribute)
(langchain.tools.OpenWeatherMapQueryRun attribute)
(langchain.tools.PubmedQueryRun attribute)
(langchain.tools.SceneXplainTool attribute)
(langchain.tools.WikipediaQueryRun attribute)
(langchain.tools.WolframAlphaQueryRun attribute) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-13 | (langchain.tools.WolframAlphaQueryRun attribute)
(langchain.tools.ZapierNLAListActions attribute)
(langchain.tools.ZapierNLARunAction attribute)
apify_client (langchain.document_loaders.ApifyDatasetLoader attribute)
(langchain.utilities.ApifyWrapper attribute)
apify_client_async (langchain.utilities.ApifyWrapper attribute)
aplan() (langchain.agents.Agent method)
(langchain.agents.BaseMultiActionAgent method)
(langchain.agents.BaseSingleActionAgent method)
(langchain.agents.LLMSingleActionAgent method)
(langchain.agents.OpenAIFunctionsAgent method)
apost() (langchain.utilities.TextRequestsWrapper method)
app_creation() (langchain.llms.Beam method)
app_id (langchain.llms.Clarifai attribute)
append_to_last_tokens() (langchain.callbacks.FinalStreamingStdOutCallbackHandler method)
apply() (langchain.chains.AnalyzeDocumentChain method)
(langchain.chains.APIChain method)
(langchain.chains.ChatVectorDBChain method)
(langchain.chains.ConstitutionalChain method)
(langchain.chains.ConversationalRetrievalChain method)
(langchain.chains.ConversationChain method)
(langchain.chains.FlareChain method)
(langchain.chains.GraphCypherQAChain method)
(langchain.chains.GraphQAChain method)
(langchain.chains.HypotheticalDocumentEmbedder method)
(langchain.chains.KuzuQAChain method)
(langchain.chains.LLMBashChain method)
(langchain.chains.LLMChain method)
(langchain.chains.LLMCheckerChain method)
(langchain.chains.LLMMathChain method)
(langchain.chains.LLMRequestsChain method)
(langchain.chains.LLMRouterChain method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-14 | (langchain.chains.LLMRouterChain method)
(langchain.chains.LLMSummarizationCheckerChain method)
(langchain.chains.MapReduceChain method)
(langchain.chains.MapReduceDocumentsChain method)
(langchain.chains.MapRerankDocumentsChain method)
(langchain.chains.MultiPromptChain method)
(langchain.chains.MultiRetrievalQAChain method)
(langchain.chains.MultiRouteChain method)
(langchain.chains.NatBotChain method)
(langchain.chains.NebulaGraphQAChain method)
(langchain.chains.OpenAIModerationChain method)
(langchain.chains.OpenAPIEndpointChain method)
(langchain.chains.PALChain method)
(langchain.chains.QAGenerationChain method)
(langchain.chains.QAWithSourcesChain method)
(langchain.chains.RefineDocumentsChain method)
(langchain.chains.RetrievalQA method)
(langchain.chains.RetrievalQAWithSourcesChain method)
(langchain.chains.RouterChain method)
(langchain.chains.SequentialChain method)
(langchain.chains.SimpleSequentialChain method)
(langchain.chains.SQLDatabaseChain method)
(langchain.chains.SQLDatabaseSequentialChain method)
(langchain.chains.StuffDocumentsChain method)
(langchain.chains.TransformChain method)
(langchain.chains.VectorDBQA method)
(langchain.chains.VectorDBQAWithSourcesChain method)
apply_and_parse() (langchain.chains.ConversationChain method)
(langchain.chains.LLMChain method)
apredict() (langchain.chains.ConversationChain method)
(langchain.chains.LLMChain method)
(langchain.llms.AI21 method)
(langchain.llms.AlephAlpha method)
(langchain.llms.AmazonAPIGateway method)
(langchain.llms.Anthropic method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-15 | (langchain.llms.Anthropic method)
(langchain.llms.Anyscale method)
(langchain.llms.Aviary method)
(langchain.llms.AzureMLOnlineEndpoint method)
(langchain.llms.AzureOpenAI method)
(langchain.llms.Banana method)
(langchain.llms.Baseten method)
(langchain.llms.Beam method)
(langchain.llms.Bedrock method)
(langchain.llms.CerebriumAI method)
(langchain.llms.Clarifai method)
(langchain.llms.Cohere method)
(langchain.llms.CTransformers method)
(langchain.llms.Databricks method)
(langchain.llms.DeepInfra method)
(langchain.llms.FakeListLLM method)
(langchain.llms.ForefrontAI method)
(langchain.llms.GooglePalm method)
(langchain.llms.GooseAI method)
(langchain.llms.GPT4All method)
(langchain.llms.HuggingFaceEndpoint method)
(langchain.llms.HuggingFaceHub method)
(langchain.llms.HuggingFacePipeline method)
(langchain.llms.HuggingFaceTextGenInference method)
(langchain.llms.HumanInputLLM method)
(langchain.llms.LlamaCpp method)
(langchain.llms.ManifestWrapper method)
(langchain.llms.Modal method)
(langchain.llms.MosaicML method)
(langchain.llms.NLPCloud method)
(langchain.llms.OctoAIEndpoint method)
(langchain.llms.OpenAI method)
(langchain.llms.OpenAIChat method)
(langchain.llms.OpenLLM method)
(langchain.llms.OpenLM method)
(langchain.llms.Petals method)
(langchain.llms.PipelineAI method)
(langchain.llms.PredictionGuard method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-16 | (langchain.llms.PredictionGuard method)
(langchain.llms.PromptLayerOpenAI method)
(langchain.llms.PromptLayerOpenAIChat method)
(langchain.llms.Replicate method)
(langchain.llms.RWKV method)
(langchain.llms.SagemakerEndpoint method)
(langchain.llms.SelfHostedHuggingFaceLLM method)
(langchain.llms.SelfHostedPipeline method)
(langchain.llms.StochasticAI method)
(langchain.llms.TextGen method)
(langchain.llms.VertexAI method)
(langchain.llms.Writer method)
apredict_and_parse() (langchain.chains.ConversationChain method)
(langchain.chains.LLMChain method)
apredict_messages() (langchain.llms.AI21 method)
(langchain.llms.AlephAlpha method)
(langchain.llms.AmazonAPIGateway method)
(langchain.llms.Anthropic method)
(langchain.llms.Anyscale method)
(langchain.llms.Aviary method)
(langchain.llms.AzureMLOnlineEndpoint method)
(langchain.llms.AzureOpenAI method)
(langchain.llms.Banana method)
(langchain.llms.Baseten method)
(langchain.llms.Beam method)
(langchain.llms.Bedrock method)
(langchain.llms.CerebriumAI method)
(langchain.llms.Clarifai method)
(langchain.llms.Cohere method)
(langchain.llms.CTransformers method)
(langchain.llms.Databricks method)
(langchain.llms.DeepInfra method)
(langchain.llms.FakeListLLM method)
(langchain.llms.ForefrontAI method)
(langchain.llms.GooglePalm method)
(langchain.llms.GooseAI method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-17 | (langchain.llms.GooglePalm method)
(langchain.llms.GooseAI method)
(langchain.llms.GPT4All method)
(langchain.llms.HuggingFaceEndpoint method)
(langchain.llms.HuggingFaceHub method)
(langchain.llms.HuggingFacePipeline method)
(langchain.llms.HuggingFaceTextGenInference method)
(langchain.llms.HumanInputLLM method)
(langchain.llms.LlamaCpp method)
(langchain.llms.ManifestWrapper method)
(langchain.llms.Modal method)
(langchain.llms.MosaicML method)
(langchain.llms.NLPCloud method)
(langchain.llms.OctoAIEndpoint method)
(langchain.llms.OpenAI method)
(langchain.llms.OpenAIChat method)
(langchain.llms.OpenLLM method)
(langchain.llms.OpenLM method)
(langchain.llms.Petals method)
(langchain.llms.PipelineAI method)
(langchain.llms.PredictionGuard method)
(langchain.llms.PromptLayerOpenAI method)
(langchain.llms.PromptLayerOpenAIChat method)
(langchain.llms.Replicate method)
(langchain.llms.RWKV method)
(langchain.llms.SagemakerEndpoint method)
(langchain.llms.SelfHostedHuggingFaceLLM method)
(langchain.llms.SelfHostedPipeline method)
(langchain.llms.StochasticAI method)
(langchain.llms.TextGen method)
(langchain.llms.VertexAI method)
(langchain.llms.Writer method)
aprep_prompts() (langchain.chains.ConversationChain method)
(langchain.chains.LLMChain method)
apreview() (langchain.utilities.ZapierNLAWrapper method)
apreview_as_str() (langchain.utilities.ZapierNLAWrapper method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-18 | apreview_as_str() (langchain.utilities.ZapierNLAWrapper method)
aput() (langchain.utilities.TextRequestsWrapper method)
arbitrary_types_allowed (langchain.experimental.BabyAGI.Config attribute)
(langchain.experimental.GenerativeAgent.Config attribute)
(langchain.retrievers.WeaviateHybridSearchRetriever.Config attribute)
are_all_true_prompt (langchain.chains.LLMSummarizationCheckerChain attribute)
aresults() (langchain.utilities.GoogleSerperAPIWrapper method)
(langchain.utilities.SearxSearchWrapper method)
(langchain.utilities.SerpAPIWrapper method)
ArgillaCallbackHandler (class in langchain.callbacks)
args (langchain.agents.Tool property)
(langchain.output_parsers.GuardrailsOutputParser attribute)
(langchain.tools.BaseTool property)
(langchain.tools.StructuredTool property)
(langchain.tools.Tool property)
args_schema (langchain.tools.AIPluginTool attribute)
(langchain.tools.BaseTool attribute)
(langchain.tools.ClickTool attribute)
(langchain.tools.CopyFileTool attribute)
(langchain.tools.CurrentWebPageTool attribute)
(langchain.tools.DeleteFileTool attribute)
(langchain.tools.ExtractHyperlinksTool attribute)
(langchain.tools.ExtractTextTool attribute)
(langchain.tools.FileSearchTool attribute)
(langchain.tools.GetElementsTool attribute)
(langchain.tools.GmailCreateDraft attribute)
(langchain.tools.GmailGetMessage attribute)
(langchain.tools.GmailGetThread attribute)
(langchain.tools.GmailSearch attribute)
(langchain.tools.GooglePlacesTool attribute)
(langchain.tools.ListDirectoryTool attribute)
(langchain.tools.MoveFileTool attribute)
(langchain.tools.NavigateBackTool attribute)
(langchain.tools.NavigateTool attribute)
(langchain.tools.ReadFileTool attribute)
(langchain.tools.ShellTool attribute)
(langchain.tools.SleepTool attribute) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-19 | (langchain.tools.ShellTool attribute)
(langchain.tools.SleepTool attribute)
(langchain.tools.StructuredTool attribute)
(langchain.tools.Tool attribute)
(langchain.tools.WriteFileTool attribute)
ArizeCallbackHandler (class in langchain.callbacks)
aroute() (langchain.chains.LLMRouterChain method)
(langchain.chains.RouterChain method)
arun() (langchain.chains.AnalyzeDocumentChain method)
(langchain.chains.APIChain method)
(langchain.chains.ChatVectorDBChain method)
(langchain.chains.ConstitutionalChain method)
(langchain.chains.ConversationalRetrievalChain method)
(langchain.chains.ConversationChain method)
(langchain.chains.FlareChain method)
(langchain.chains.GraphCypherQAChain method)
(langchain.chains.GraphQAChain method)
(langchain.chains.HypotheticalDocumentEmbedder method)
(langchain.chains.KuzuQAChain method)
(langchain.chains.LLMBashChain method)
(langchain.chains.LLMChain method)
(langchain.chains.LLMCheckerChain method)
(langchain.chains.LLMMathChain method)
(langchain.chains.LLMRequestsChain method)
(langchain.chains.LLMRouterChain method)
(langchain.chains.LLMSummarizationCheckerChain method)
(langchain.chains.MapReduceChain method)
(langchain.chains.MapReduceDocumentsChain method)
(langchain.chains.MapRerankDocumentsChain method)
(langchain.chains.MultiPromptChain method)
(langchain.chains.MultiRetrievalQAChain method)
(langchain.chains.MultiRouteChain method)
(langchain.chains.NatBotChain method)
(langchain.chains.NebulaGraphQAChain method)
(langchain.chains.OpenAIModerationChain method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-20 | (langchain.chains.OpenAIModerationChain method)
(langchain.chains.OpenAPIEndpointChain method)
(langchain.chains.PALChain method)
(langchain.chains.QAGenerationChain method)
(langchain.chains.QAWithSourcesChain method)
(langchain.chains.RefineDocumentsChain method)
(langchain.chains.RetrievalQA method)
(langchain.chains.RetrievalQAWithSourcesChain method)
(langchain.chains.RouterChain method)
(langchain.chains.SequentialChain method)
(langchain.chains.SimpleSequentialChain method)
(langchain.chains.SQLDatabaseChain method)
(langchain.chains.SQLDatabaseSequentialChain method)
(langchain.chains.StuffDocumentsChain method)
(langchain.chains.TransformChain method)
(langchain.chains.VectorDBQA method)
(langchain.chains.VectorDBQAWithSourcesChain method)
(langchain.tools.BaseTool method)
(langchain.utilities.GoogleSerperAPIWrapper method)
(langchain.utilities.PowerBIDataset method)
(langchain.utilities.SearxSearchWrapper method)
(langchain.utilities.SerpAPIWrapper method)
(langchain.utilities.ZapierNLAWrapper method)
arun_as_str() (langchain.utilities.ZapierNLAWrapper method)
arxiv_exceptions (langchain.utilities.ArxivAPIWrapper attribute)
ArxivLoader (class in langchain.document_loaders)
as_bytes() (langchain.document_loaders.Blob method)
as_bytes_io() (langchain.document_loaders.Blob method)
as_retriever() (langchain.vectorstores.Redis method)
(langchain.vectorstores.SingleStoreDB method)
(langchain.vectorstores.Vectara method)
(langchain.vectorstores.VectorStore method)
as_string() (langchain.document_loaders.Blob method)
asearch() (langchain.vectorstores.VectorStore method) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-21 | asearch() (langchain.vectorstores.VectorStore method)
asimilarity_search() (langchain.vectorstores.VectorStore method)
asimilarity_search_by_vector() (langchain.vectorstores.VectorStore method)
asimilarity_search_with_relevance_scores() (langchain.vectorstores.VectorStore method)
assignee (langchain.document_loaders.GitHubIssuesLoader attribute)
async_browser (langchain.agents.agent_toolkits.PlayWrightBrowserToolkit attribute)
async_from_zapier_nla_wrapper() (langchain.agents.agent_toolkits.ZapierToolkit class method)
AsyncIteratorCallbackHandler (class in langchain.callbacks)
AtlasDB (class in langchain.vectorstores)
atransform_documents() (langchain.document_transformers.EmbeddingsRedundantFilter method)
(langchain.schema.BaseDocumentTransformer method)
(langchain.text_splitter.TextSplitter method)
auth_token (langchain.utilities.TwilioAPIWrapper attribute)
auth_with_token (langchain.document_loaders.OneDriveLoader attribute)
AutoGPT (class in langchain.experimental)
AwaDB (class in langchain.vectorstores)
awslambda_tool_description (langchain.utilities.LambdaWrapper attribute)
awslambda_tool_name (langchain.utilities.LambdaWrapper attribute)
AZLyricsLoader (class in langchain.document_loaders)
AzureBlobStorageContainerLoader (class in langchain.document_loaders)
AzureBlobStorageFileLoader (class in langchain.document_loaders)
AzureSearch (class in langchain.vectorstores)
B
BabyAGI (class in langchain.experimental)
bad_words (langchain.llms.NLPCloud attribute)
ban_eos_token (langchain.llms.TextGen attribute)
base_compressor (langchain.retrievers.ContextualCompressionRetriever attribute)
base_embeddings (langchain.chains.HypotheticalDocumentEmbedder attribute) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-22 | base_embeddings (langchain.chains.HypotheticalDocumentEmbedder attribute)
base_prompt (langchain.tools.ZapierNLARunAction attribute)
base_retriever (langchain.retrievers.ContextualCompressionRetriever attribute)
base_url (langchain.document_loaders.BlackboardLoader attribute)
(langchain.llms.AI21 attribute)
(langchain.llms.ForefrontAI attribute)
(langchain.llms.Writer attribute)
(langchain.tools.APIOperation attribute)
(langchain.tools.OpenAPISpec property)
BaseChatMessageHistory (class in langchain.schema)
BaseDocumentTransformer (class in langchain.schema)
BaseRetriever (class in langchain.schema)
BashProcess (class in langchain.utilities)
batch_size (langchain.llms.AzureOpenAI attribute)
(langchain.llms.OpenAI attribute)
(langchain.llms.OpenLM attribute)
bearer_token (langchain.retrievers.ChatGPTPluginRetriever attribute)
best_of (langchain.llms.AlephAlpha attribute)
(langchain.llms.AzureOpenAI attribute)
(langchain.llms.OpenAI attribute)
(langchain.llms.OpenLM attribute)
(langchain.llms.Writer attribute)
BibtexLoader (class in langchain.document_loaders)
BigQueryLoader (class in langchain.document_loaders)
BiliBiliLoader (class in langchain.document_loaders)
binary_location (langchain.document_loaders.SeleniumURLLoader attribute)
bing_search_url (langchain.utilities.BingSearchAPIWrapper attribute)
bing_subscription_key (langchain.utilities.BingSearchAPIWrapper attribute)
BlackboardLoader (class in langchain.document_loaders)
blob_loader (langchain.document_loaders.EmbaasLoader attribute)
BlobLoader (class in langchain.document_loaders)
BlockchainDocumentLoader (class in langchain.document_loaders) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-23 | BlockchainDocumentLoader (class in langchain.document_loaders)
body_params (langchain.tools.APIOperation property)
browser (langchain.document_loaders.SeleniumURLLoader attribute)
bs_get_text_kwargs (langchain.document_loaders.WebBaseLoader attribute)
BSHTMLLoader (class in langchain.document_loaders)
buffer (langchain.memory.ConversationBufferMemory property)
(langchain.memory.ConversationBufferWindowMemory property)
(langchain.memory.ConversationEntityMemory property)
(langchain.memory.ConversationStringBufferMemory attribute)
(langchain.memory.ConversationSummaryBufferMemory property)
(langchain.memory.ConversationSummaryMemory attribute)
(langchain.memory.ConversationTokenBufferMemory property)
C
cache_folder (langchain.embeddings.HuggingFaceEmbeddings attribute)
(langchain.embeddings.HuggingFaceInstructEmbeddings attribute)
call_actor() (langchain.utilities.ApifyWrapper method)
call_actor_task() (langchain.utilities.ApifyWrapper method)
callback_manager (langchain.agents.agent_toolkits.PowerBIToolkit attribute)
(langchain.chains.AnalyzeDocumentChain attribute)
(langchain.chains.APIChain attribute)
(langchain.chains.ChatVectorDBChain attribute)
(langchain.chains.ConstitutionalChain attribute)
(langchain.chains.ConversationalRetrievalChain attribute)
(langchain.chains.ConversationChain attribute)
(langchain.chains.FlareChain attribute)
(langchain.chains.GraphCypherQAChain attribute)
(langchain.chains.GraphQAChain attribute)
(langchain.chains.HypotheticalDocumentEmbedder attribute)
(langchain.chains.KuzuQAChain attribute)
(langchain.chains.LLMBashChain attribute)
(langchain.chains.LLMChain attribute)
(langchain.chains.LLMCheckerChain attribute)
(langchain.chains.LLMMathChain attribute)
(langchain.chains.LLMRequestsChain attribute) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-24 | (langchain.chains.LLMRequestsChain attribute)
(langchain.chains.LLMRouterChain attribute)
(langchain.chains.LLMSummarizationCheckerChain attribute)
(langchain.chains.MapReduceChain attribute)
(langchain.chains.MapReduceDocumentsChain attribute)
(langchain.chains.MapRerankDocumentsChain attribute)
(langchain.chains.MultiPromptChain attribute)
(langchain.chains.MultiRetrievalQAChain attribute)
(langchain.chains.MultiRouteChain attribute)
(langchain.chains.NatBotChain attribute)
(langchain.chains.NebulaGraphQAChain attribute)
(langchain.chains.OpenAIModerationChain attribute)
(langchain.chains.OpenAPIEndpointChain attribute)
(langchain.chains.PALChain attribute)
(langchain.chains.QAGenerationChain attribute)
(langchain.chains.QAWithSourcesChain attribute)
(langchain.chains.RefineDocumentsChain attribute)
(langchain.chains.RetrievalQA attribute)
(langchain.chains.RetrievalQAWithSourcesChain attribute)
(langchain.chains.RouterChain attribute)
(langchain.chains.SequentialChain attribute)
(langchain.chains.SimpleSequentialChain attribute)
(langchain.chains.SQLDatabaseChain attribute)
(langchain.chains.SQLDatabaseSequentialChain attribute)
(langchain.chains.StuffDocumentsChain attribute)
(langchain.chains.TransformChain attribute)
(langchain.chains.VectorDBQA attribute)
(langchain.chains.VectorDBQAWithSourcesChain attribute)
(langchain.tools.BaseTool attribute)
(langchain.tools.Tool attribute)
callbacks (langchain.chains.AnalyzeDocumentChain attribute)
(langchain.chains.APIChain attribute)
(langchain.chains.ChatVectorDBChain attribute)
(langchain.chains.ConstitutionalChain attribute)
(langchain.chains.ConversationalRetrievalChain attribute)
(langchain.chains.ConversationChain attribute) | https://api.python.langchain.com/en/stable/genindex.html |
a1409c1b5e0f-25 | (langchain.chains.ConversationChain attribute)
(langchain.chains.FlareChain attribute)
(langchain.chains.GraphCypherQAChain attribute)
(langchain.chains.GraphQAChain attribute)
(langchain.chains.HypotheticalDocumentEmbedder attribute)
(langchain.chains.KuzuQAChain attribute)
(langchain.chains.LLMBashChain attribute)
(langchain.chains.LLMChain attribute)
(langchain.chains.LLMCheckerChain attribute)
(langchain.chains.LLMMathChain attribute)
(langchain.chains.LLMRequestsChain attribute)
(langchain.chains.LLMRouterChain attribute)
(langchain.chains.LLMSummarizationCheckerChain attribute)
(langchain.chains.MapReduceChain attribute)
(langchain.chains.MapReduceDocumentsChain attribute)
(langchain.chains.MapRerankDocumentsChain attribute)
(langchain.chains.MultiPromptChain attribute)
(langchain.chains.MultiRetrievalQAChain attribute)
(langchain.chains.MultiRouteChain attribute)
(langchain.chains.NatBotChain attribute)
(langchain.chains.NebulaGraphQAChain attribute)
(langchain.chains.OpenAIModerationChain attribute)
(langchain.chains.OpenAPIEndpointChain attribute)
(langchain.chains.PALChain attribute)
(langchain.chains.QAGenerationChain attribute)
(langchain.chains.QAWithSourcesChain attribute)
(langchain.chains.RefineDocumentsChain attribute)
(langchain.chains.RetrievalQA attribute)
(langchain.chains.RetrievalQAWithSourcesChain attribute)
(langchain.chains.RouterChain attribute)
(langchain.chains.SequentialChain attribute)
(langchain.chains.SimpleSequentialChain attribute)
(langchain.chains.SQLDatabaseChain attribute)
(langchain.chains.SQLDatabaseSequentialChain attribute)
(langchain.chains.StuffDocumentsChain attribute) | https://api.python.langchain.com/en/stable/genindex.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.