id
stringlengths 14
16
| text
stringlengths 31
2.41k
| source
stringlengths 53
121
|
---|---|---|
6f6dbf4a4161-1 | )
http_operation_tools.append(endpoint_tool)
return http_operation_tools
[docs] @classmethod
def from_llm_and_spec(
cls,
llm: BaseLanguageModel,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit by creating tools for each operation."""
http_operation_tools = cls._get_http_operation_tools(
llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs
)
return cls(nla_tools=http_operation_tools)
[docs] @classmethod
def from_llm_and_url(
cls,
llm: BaseLanguageModel,
open_api_url: str,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(open_api_url)
return cls.from_llm_and_spec(
llm=llm, spec=spec, requests=requests, verbose=verbose, **kwargs
)
[docs] @classmethod
def from_llm_and_ai_plugin(
cls,
llm: BaseLanguageModel,
ai_plugin: AIPlugin,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(ai_plugin.api.url) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html |
6f6dbf4a4161-2 | spec = OpenAPISpec.from_url(ai_plugin.api.url)
# TODO: Merge optional Auth information with the `requests` argument
return cls.from_llm_and_spec(
llm=llm,
spec=spec,
requests=requests,
verbose=verbose,
**kwargs,
)
[docs] @classmethod
def from_llm_and_ai_plugin_url(
cls,
llm: BaseLanguageModel,
ai_plugin_url: str,
requests: Optional[Requests] = None,
verbose: bool = False,
**kwargs: Any,
) -> NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
plugin = AIPlugin.from_url(ai_plugin_url)
return cls.from_llm_and_ai_plugin(
llm=llm, ai_plugin=plugin, requests=requests, verbose=verbose, **kwargs
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/nla/toolkit.html |
f5ec50f30ce9-0 | Source code for langchain.agents.agent_toolkits.powerbi.toolkit
"""Toolkit for interacting with a Power BI dataset."""
from typing import List, Optional
from pydantic import Field
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.tools import BaseTool
from langchain.tools.powerbi.prompt import QUESTION_TO_QUERY
from langchain.tools.powerbi.tool import (
InfoPowerBITool,
ListPowerBITool,
QueryPowerBITool,
)
from langchain.utilities.powerbi import PowerBIDataset
[docs]class PowerBIToolkit(BaseToolkit):
"""Toolkit for interacting with PowerBI dataset."""
powerbi: PowerBIDataset = Field(exclude=True)
llm: BaseLanguageModel = Field(exclude=True)
examples: Optional[str] = None
max_iterations: int = 5
callback_manager: Optional[BaseCallbackManager] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
if self.callback_manager:
chain = LLMChain(
llm=self.llm,
callback_manager=self.callback_manager,
prompt=PromptTemplate(
template=QUESTION_TO_QUERY,
input_variables=["tool_input", "tables", "schemas", "examples"],
),
)
else:
chain = LLMChain(
llm=self.llm,
prompt=PromptTemplate(
template=QUESTION_TO_QUERY, | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/toolkit.html |
f5ec50f30ce9-1 | prompt=PromptTemplate(
template=QUESTION_TO_QUERY,
input_variables=["tool_input", "tables", "schemas", "examples"],
),
)
return [
QueryPowerBITool(
llm_chain=chain,
powerbi=self.powerbi,
examples=self.examples,
max_iterations=self.max_iterations,
),
InfoPowerBITool(powerbi=self.powerbi),
ListPowerBITool(powerbi=self.powerbi),
] | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/toolkit.html |
d00d134df72b-0 | Source code for langchain.agents.agent_toolkits.powerbi.chat_base
"""Power BI agent."""
from typing import Any, Dict, List, Optional
from langchain.agents import AgentExecutor
from langchain.agents.agent import AgentOutputParser
from langchain.agents.agent_toolkits.powerbi.prompt import (
POWERBI_CHAT_PREFIX,
POWERBI_CHAT_SUFFIX,
)
from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.callbacks.base import BaseCallbackManager
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_memory import BaseChatMemory
from langchain.utilities.powerbi import PowerBIDataset
[docs]def create_pbi_chat_agent(
llm: BaseChatModel,
toolkit: Optional[PowerBIToolkit],
powerbi: Optional[PowerBIDataset] = None,
callback_manager: Optional[BaseCallbackManager] = None,
output_parser: Optional[AgentOutputParser] = None,
prefix: str = POWERBI_CHAT_PREFIX,
suffix: str = POWERBI_CHAT_SUFFIX,
examples: Optional[str] = None,
input_variables: Optional[List[str]] = None,
memory: Optional[BaseChatMemory] = None,
top_k: int = 10,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a pbi agent from an Chat LLM and tools.
If you supply only a toolkit and no powerbi dataset, the same LLM is used for both.
"""
if toolkit is None: | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/chat_base.html |
d00d134df72b-1 | """
if toolkit is None:
if powerbi is None:
raise ValueError("Must provide either a toolkit or powerbi dataset")
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
tools = toolkit.get_tools()
agent = ConversationalChatAgent.from_llm_and_tools(
llm=llm,
tools=tools,
system_message=prefix.format(top_k=top_k),
human_message=suffix,
input_variables=input_variables,
callback_manager=callback_manager,
output_parser=output_parser,
verbose=verbose,
**kwargs,
)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
memory=memory
or ConversationBufferMemory(memory_key="chat_history", return_messages=True),
verbose=verbose,
**(agent_executor_kwargs or {}),
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/chat_base.html |
203c758a42ae-0 | Source code for langchain.agents.agent_toolkits.powerbi.base
"""Power BI agent."""
from typing import Any, Dict, List, Optional
from langchain.agents import AgentExecutor
from langchain.agents.agent_toolkits.powerbi.prompt import (
POWERBI_PREFIX,
POWERBI_SUFFIX,
)
from langchain.agents.agent_toolkits.powerbi.toolkit import PowerBIToolkit
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.utilities.powerbi import PowerBIDataset
[docs]def create_pbi_agent(
llm: BaseLanguageModel,
toolkit: Optional[PowerBIToolkit],
powerbi: Optional[PowerBIDataset] = None,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = POWERBI_PREFIX,
suffix: str = POWERBI_SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
examples: Optional[str] = None,
input_variables: Optional[List[str]] = None,
top_k: int = 10,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a pbi agent from an LLM and tools."""
if toolkit is None:
if powerbi is None:
raise ValueError("Must provide either a toolkit or powerbi dataset")
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
tools = toolkit.get_tools() | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/base.html |
203c758a42ae-1 | tools = toolkit.get_tools()
agent = ZeroShotAgent(
llm_chain=LLMChain(
llm=llm,
prompt=ZeroShotAgent.create_prompt(
tools,
prefix=prefix.format(top_k=top_k),
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
),
callback_manager=callback_manager, # type: ignore
verbose=verbose,
),
allowed_tools=[tool.name for tool in tools],
**kwargs,
)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/powerbi/base.html |
a68c5bab9629-0 | Source code for langchain.agents.agent_toolkits.spark_sql.toolkit
"""Toolkit for interacting with Spark SQL."""
from typing import List
from pydantic import Field
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.base_language import BaseLanguageModel
from langchain.tools import BaseTool
from langchain.tools.spark_sql.tool import (
InfoSparkSQLTool,
ListSparkSQLTool,
QueryCheckerTool,
QuerySparkSQLTool,
)
from langchain.utilities.spark_sql import SparkSQL
[docs]class SparkSQLToolkit(BaseToolkit):
"""Toolkit for interacting with Spark SQL."""
db: SparkSQL = Field(exclude=True)
llm: BaseLanguageModel = Field(exclude=True)
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
QuerySparkSQLTool(db=self.db),
InfoSparkSQLTool(db=self.db),
ListSparkSQLTool(db=self.db),
QueryCheckerTool(db=self.db, llm=self.llm),
] | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark_sql/toolkit.html |
dab3cd0e4f13-0 | Source code for langchain.agents.agent_toolkits.spark_sql.base
"""Spark SQL agent."""
from typing import Any, Dict, List, Optional
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.spark_sql.prompt import SQL_PREFIX, SQL_SUFFIX
from langchain.agents.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
[docs]def create_spark_sql_agent(
llm: BaseLanguageModel,
toolkit: SparkSQLToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = SQL_PREFIX,
suffix: str = SQL_SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
top_k: int = 10,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a sql agent from an LLM and tools."""
tools = toolkit.get_tools()
prefix = prefix.format(top_k=top_k)
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm, | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark_sql/base.html |
dab3cd0e4f13-1 | llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/spark_sql/base.html |
cff02565fe92-0 | Source code for langchain.agents.agent_toolkits.csv.base
"""Agent for working with csvs."""
from typing import Any, List, Optional, Union
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain.base_language import BaseLanguageModel
[docs]def create_csv_agent(
llm: BaseLanguageModel,
path: Union[str, List[str]],
pandas_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Create csv agent by loading to a dataframe and using pandas agent."""
try:
import pandas as pd
except ImportError:
raise ValueError(
"pandas package not found, please install with `pip install pandas`"
)
_kwargs = pandas_kwargs or {}
if isinstance(path, str):
df = pd.read_csv(path, **_kwargs)
elif isinstance(path, list):
df = []
for item in path:
if not isinstance(item, str):
raise ValueError(f"Expected str, got {type(path)}")
df.append(pd.read_csv(item, **_kwargs))
else:
raise ValueError(f"Expected str or list, got {type(path)}")
return create_pandas_dataframe_agent(llm, df, **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/csv/base.html |
aa154115c70d-0 | Source code for langchain.agents.agent_toolkits.azure_cognitive_services.toolkit
from __future__ import annotations
import sys
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools.azure_cognitive_services import (
AzureCogsFormRecognizerTool,
AzureCogsImageAnalysisTool,
AzureCogsSpeech2TextTool,
AzureCogsText2SpeechTool,
)
from langchain.tools.base import BaseTool
[docs]class AzureCognitiveServicesToolkit(BaseToolkit):
"""Toolkit for Azure Cognitive Services."""
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools = [
AzureCogsFormRecognizerTool(),
AzureCogsSpeech2TextTool(),
AzureCogsText2SpeechTool(),
]
# TODO: Remove check once azure-ai-vision supports MacOS.
if sys.platform.startswith("linux") or sys.platform.startswith("win"):
tools.append(AzureCogsImageAnalysisTool())
return tools | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/azure_cognitive_services/toolkit.html |
14a55ce8af05-0 | Source code for langchain.agents.agent_toolkits.jira.toolkit
"""Jira Toolkit."""
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.jira.tool import JiraAction
from langchain.utilities.jira import JiraAPIWrapper
[docs]class JiraToolkit(BaseToolkit):
"""Jira Toolkit."""
tools: List[BaseTool] = []
[docs] @classmethod
def from_jira_api_wrapper(cls, jira_api_wrapper: JiraAPIWrapper) -> "JiraToolkit":
actions = jira_api_wrapper.list()
tools = [
JiraAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=jira_api_wrapper,
)
for action in actions
]
return cls(tools=tools)
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/jira/toolkit.html |
2388140f5260-0 | Source code for langchain.agents.agent_toolkits.vectorstore.toolkit
"""Toolkit for interacting with a vector store."""
from typing import List
from pydantic import BaseModel, Field
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.base_language import BaseLanguageModel
from langchain.llms.openai import OpenAI
from langchain.tools import BaseTool
from langchain.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
from langchain.vectorstores.base import VectorStore
[docs]class VectorStoreInfo(BaseModel):
"""Information about a vectorstore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs]class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a vector store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
description = VectorStoreQATool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/toolkit.html |
2388140f5260-1 | self.vectorstore_info.name, self.vectorstore_info.description
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
[docs]class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between vector stores."""
vectorstores: List[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = []
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name, vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/toolkit.html |
2cc085fe5d0e-0 | Source code for langchain.agents.agent_toolkits.vectorstore.base
"""VectorStore agent."""
from typing import Any, Dict, Optional
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX
from langchain.agents.agent_toolkits.vectorstore.toolkit import (
VectorStoreRouterToolkit,
VectorStoreToolkit,
)
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
[docs]def create_vectorstore_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = PREFIX,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a vectorstore agent from an LLM and tools."""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
)
[docs]def create_vectorstore_router_agent( | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/base.html |
2cc085fe5d0e-1 | )
[docs]def create_vectorstore_router_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreRouterToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = ROUTER_PREFIX,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a vectorstore router agent from an LLM and tools."""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/vectorstore/base.html |
f995a3517e4f-0 | Source code for langchain.agents.agent_toolkits.sql.toolkit
"""Toolkit for interacting with a SQL database."""
from typing import List
from pydantic import Field
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.base_language import BaseLanguageModel
from langchain.sql_database import SQLDatabase
from langchain.tools import BaseTool
from langchain.tools.sql_database.tool import (
InfoSQLDatabaseTool,
ListSQLDatabaseTool,
QuerySQLCheckerTool,
QuerySQLDataBaseTool,
)
[docs]class SQLDatabaseToolkit(BaseToolkit):
"""Toolkit for interacting with SQL databases."""
db: SQLDatabase = Field(exclude=True)
llm: BaseLanguageModel = Field(exclude=True)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self.db.dialect
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
query_sql_database_tool_description = (
"Input to this tool is a detailed and correct SQL query, output is a "
"result from the database. If the query is not correct, an error message "
"will be returned. If an error is returned, rewrite the query, check the "
"query, and try again. If you encounter an issue with Unknown column "
"'xxxx' in 'field list', using schema_sql_db to query the correct table "
"fields."
)
info_sql_database_tool_description = (
"Input to this tool is a comma-separated list of tables, output is the "
"schema and sample rows for those tables. " | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/toolkit.html |
f995a3517e4f-1 | "schema and sample rows for those tables. "
"Be sure that the tables actually exist by calling list_tables_sql_db "
"first! Example Input: 'table1, table2, table3'"
)
return [
QuerySQLDataBaseTool(
db=self.db, description=query_sql_database_tool_description
),
InfoSQLDatabaseTool(
db=self.db, description=info_sql_database_tool_description
),
ListSQLDatabaseTool(db=self.db),
QuerySQLCheckerTool(db=self.db, llm=self.llm),
] | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/toolkit.html |
aa59cae9d7e1-0 | Source code for langchain.agents.agent_toolkits.sql.base
"""SQL agent."""
from typing import Any, Dict, List, Optional
from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent
from langchain.agents.agent_toolkits.sql.prompt import (
SQL_FUNCTIONS_SUFFIX,
SQL_PREFIX,
SQL_SUFFIX,
)
from langchain.agents.agent_toolkits.sql.toolkit import SQLDatabaseToolkit
from langchain.agents.agent_types import AgentType
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain.schema import AIMessage, SystemMessage
[docs]def create_sql_agent(
llm: BaseLanguageModel,
toolkit: SQLDatabaseToolkit,
agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = SQL_PREFIX,
suffix: Optional[str] = None,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
top_k: int = 10,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any], | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html |
aa59cae9d7e1-1 | **kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a sql agent from an LLM and tools."""
tools = toolkit.get_tools()
prefix = prefix.format(dialect=toolkit.dialect, top_k=top_k)
agent: BaseSingleActionAgent
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix or SQL_SUFFIX,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
messages = [
SystemMessage(content=prefix),
HumanMessagePromptTemplate.from_template("{input}"),
AIMessage(content=suffix or SQL_FUNCTIONS_SUFFIX),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
input_variables = ["input", "agent_scratchpad"]
_prompt = ChatPromptTemplate(input_variables=input_variables, messages=messages)
agent = OpenAIFunctionsAgent(
llm=llm,
prompt=_prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
else:
raise ValueError(f"Agent type {agent_type} not supported at the moment.")
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose, | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html |
aa59cae9d7e1-2 | tools=tools,
callback_manager=callback_manager,
verbose=verbose,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/sql/base.html |
a1b10b90ebfc-0 | Source code for langchain.agents.agent_toolkits.gmail.toolkit
from __future__ import annotations
from typing import TYPE_CHECKING, List
from pydantic import Field
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.gmail.create_draft import GmailCreateDraft
from langchain.tools.gmail.get_message import GmailGetMessage
from langchain.tools.gmail.get_thread import GmailGetThread
from langchain.tools.gmail.search import GmailSearch
from langchain.tools.gmail.send_message import GmailSendMessage
from langchain.tools.gmail.utils import build_resource_service
if TYPE_CHECKING:
# This is for linting and IDE typehints
from googleapiclient.discovery import Resource
else:
try:
# We do this so pydantic can resolve the types when instantiating
from googleapiclient.discovery import Resource
except ImportError:
pass
SCOPES = ["https://mail.google.com/"]
[docs]class GmailToolkit(BaseToolkit):
"""Toolkit for interacting with Gmail."""
api_resource: Resource = Field(default_factory=build_resource_service)
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
GmailCreateDraft(api_resource=self.api_resource),
GmailSendMessage(api_resource=self.api_resource),
GmailSearch(api_resource=self.api_resource),
GmailGetMessage(api_resource=self.api_resource),
GmailGetThread(api_resource=self.api_resource),
] | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/gmail/toolkit.html |
017ce69215a6-0 | Source code for langchain.agents.agent_toolkits.file_management.toolkit
"""Toolkit for interacting with the local filesystem."""
from __future__ import annotations
from typing import List, Optional
from pydantic import root_validator
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.file_management.copy import CopyFileTool
from langchain.tools.file_management.delete import DeleteFileTool
from langchain.tools.file_management.file_search import FileSearchTool
from langchain.tools.file_management.list_dir import ListDirectoryTool
from langchain.tools.file_management.move import MoveFileTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
_FILE_TOOLS = {
tool_cls.__fields__["name"].default: tool_cls
for tool_cls in [
CopyFileTool,
DeleteFileTool,
FileSearchTool,
MoveFileTool,
ReadFileTool,
WriteFileTool,
ListDirectoryTool,
]
}
[docs]class FileManagementToolkit(BaseToolkit):
"""Toolkit for interacting with a Local Files."""
root_dir: Optional[str] = None
"""If specified, all file operations are made relative to root_dir."""
selected_tools: Optional[List[str]] = None
"""If provided, only provide the selected tools. Defaults to all."""
@root_validator
def validate_tools(cls, values: dict) -> dict:
selected_tools = values.get("selected_tools") or []
for tool_name in selected_tools:
if tool_name not in _FILE_TOOLS:
raise ValueError(
f"File Tool of name {tool_name} not supported."
f" Permitted tools: {list(_FILE_TOOLS)}"
)
return values | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/file_management/toolkit.html |
017ce69215a6-1 | )
return values
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS.keys()
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS[tool]
tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore
return tools
__all__ = ["FileManagementToolkit"] | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/file_management/toolkit.html |
b92736f01d64-0 | Source code for langchain.agents.agent_toolkits.zapier.toolkit
"""Zapier Toolkit."""
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.zapier.tool import ZapierNLARunAction
from langchain.utilities.zapier import ZapierNLAWrapper
[docs]class ZapierToolkit(BaseToolkit):
"""Zapier Toolkit."""
tools: List[BaseTool] = []
[docs] @classmethod
def from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Create a toolkit from a ZapierNLAWrapper."""
actions = zapier_nla_wrapper.list()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools)
[docs] @classmethod
async def async_from_zapier_nla_wrapper(
cls, zapier_nla_wrapper: ZapierNLAWrapper
) -> "ZapierToolkit":
"""Create a toolkit from a ZapierNLAWrapper."""
actions = await zapier_nla_wrapper.alist()
tools = [
ZapierNLARunAction(
action_id=action["id"],
zapier_description=action["description"],
params_schema=action["params"],
api_wrapper=zapier_nla_wrapper,
)
for action in actions
]
return cls(tools=tools) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/zapier/toolkit.html |
b92736f01d64-1 | for action in actions
]
return cls(tools=tools)
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/zapier/toolkit.html |
63d001e4fcf4-0 | Source code for langchain.agents.agent_toolkits.pandas.base
"""Agent for working with pandas objects."""
from typing import Any, Dict, List, Optional, Tuple
from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent
from langchain.agents.agent_toolkits.pandas.prompt import (
FUNCTIONS_WITH_DF,
FUNCTIONS_WITH_MULTI_DF,
MULTI_DF_PREFIX,
MULTI_DF_PREFIX_FUNCTIONS,
PREFIX,
PREFIX_FUNCTIONS,
SUFFIX_NO_DF,
SUFFIX_WITH_DF,
SUFFIX_WITH_MULTI_DF,
)
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.agents.types import AgentType
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import SystemMessage
from langchain.tools.python.tool import PythonAstREPLTool
def _get_multi_prompt(
dfs: List[Any],
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
include_df_in_prompt: Optional[bool] = True,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
num_dfs = len(dfs)
if suffix is not None:
suffix_to_use = suffix
include_dfs_head = True
elif include_df_in_prompt:
suffix_to_use = SUFFIX_WITH_MULTI_DF
include_dfs_head = True
else:
suffix_to_use = SUFFIX_NO_DF
include_dfs_head = False
if input_variables is None: | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html |
63d001e4fcf4-1 | include_dfs_head = False
if input_variables is None:
input_variables = ["input", "agent_scratchpad", "num_dfs"]
if include_dfs_head:
input_variables += ["dfs_head"]
if prefix is None:
prefix = MULTI_DF_PREFIX
df_locals = {}
for i, dataframe in enumerate(dfs):
df_locals[f"df{i + 1}"] = dataframe
tools = [PythonAstREPLTool(locals=df_locals)]
prompt = ZeroShotAgent.create_prompt(
tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables
)
partial_prompt = prompt.partial()
if "dfs_head" in input_variables:
dfs_head = "\n\n".join([d.head().to_markdown() for d in dfs])
partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs), dfs_head=dfs_head)
if "num_dfs" in input_variables:
partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs))
return partial_prompt, tools
def _get_single_prompt(
df: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
include_df_in_prompt: Optional[bool] = True,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
if suffix is not None:
suffix_to_use = suffix
include_df_head = True
elif include_df_in_prompt:
suffix_to_use = SUFFIX_WITH_DF
include_df_head = True
else:
suffix_to_use = SUFFIX_NO_DF
include_df_head = False
if input_variables is None: | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html |
63d001e4fcf4-2 | include_df_head = False
if input_variables is None:
input_variables = ["input", "agent_scratchpad"]
if include_df_head:
input_variables += ["df_head"]
if prefix is None:
prefix = PREFIX
tools = [PythonAstREPLTool(locals={"df": df})]
prompt = ZeroShotAgent.create_prompt(
tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables
)
partial_prompt = prompt.partial()
if "df_head" in input_variables:
partial_prompt = partial_prompt.partial(df_head=str(df.head().to_markdown()))
return partial_prompt, tools
def _get_prompt_and_tools(
df: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
include_df_in_prompt: Optional[bool] = True,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
try:
import pandas as pd
except ImportError:
raise ValueError(
"pandas package not found, please install with `pip install pandas`"
)
if include_df_in_prompt is not None and suffix is not None:
raise ValueError("If suffix is specified, include_df_in_prompt should not be.")
if isinstance(df, list):
for item in df:
if not isinstance(item, pd.DataFrame):
raise ValueError(f"Expected pandas object, got {type(df)}")
return _get_multi_prompt(
df,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt,
)
else: | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html |
63d001e4fcf4-3 | include_df_in_prompt=include_df_in_prompt,
)
else:
if not isinstance(df, pd.DataFrame):
raise ValueError(f"Expected pandas object, got {type(df)}")
return _get_single_prompt(
df,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt,
)
def _get_functions_single_prompt(
df: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
include_df_in_prompt: Optional[bool] = True,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
if suffix is not None:
suffix_to_use = suffix
if include_df_in_prompt:
suffix_to_use = suffix_to_use.format(df_head=str(df.head().to_markdown()))
elif include_df_in_prompt:
suffix_to_use = FUNCTIONS_WITH_DF.format(df_head=str(df.head().to_markdown()))
else:
suffix_to_use = ""
if prefix is None:
prefix = PREFIX_FUNCTIONS
tools = [PythonAstREPLTool(locals={"df": df})]
system_message = SystemMessage(content=prefix + suffix_to_use)
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
return prompt, tools
def _get_functions_multi_prompt(
dfs: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
include_df_in_prompt: Optional[bool] = True,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
if suffix is not None:
suffix_to_use = suffix
if include_df_in_prompt: | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html |
63d001e4fcf4-4 | suffix_to_use = suffix
if include_df_in_prompt:
dfs_head = "\n\n".join([d.head().to_markdown() for d in dfs])
suffix_to_use = suffix_to_use.format(
dfs_head=dfs_head,
)
elif include_df_in_prompt:
dfs_head = "\n\n".join([d.head().to_markdown() for d in dfs])
suffix_to_use = FUNCTIONS_WITH_MULTI_DF.format(
dfs_head=dfs_head,
)
else:
suffix_to_use = ""
if prefix is None:
prefix = MULTI_DF_PREFIX_FUNCTIONS
prefix = prefix.format(num_dfs=str(len(dfs)))
df_locals = {}
for i, dataframe in enumerate(dfs):
df_locals[f"df{i + 1}"] = dataframe
tools = [PythonAstREPLTool(locals=df_locals)]
system_message = SystemMessage(content=prefix + suffix_to_use)
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
return prompt, tools
def _get_functions_prompt_and_tools(
df: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
include_df_in_prompt: Optional[bool] = True,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
try:
import pandas as pd
except ImportError:
raise ValueError(
"pandas package not found, please install with `pip install pandas`"
)
if input_variables is not None:
raise ValueError("`input_variables` is not supported at the moment.")
if include_df_in_prompt is not None and suffix is not None: | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html |
63d001e4fcf4-5 | if include_df_in_prompt is not None and suffix is not None:
raise ValueError("If suffix is specified, include_df_in_prompt should not be.")
if isinstance(df, list):
for item in df:
if not isinstance(item, pd.DataFrame):
raise ValueError(f"Expected pandas object, got {type(df)}")
return _get_functions_multi_prompt(
df,
prefix=prefix,
suffix=suffix,
include_df_in_prompt=include_df_in_prompt,
)
else:
if not isinstance(df, pd.DataFrame):
raise ValueError(f"Expected pandas object, got {type(df)}")
return _get_functions_single_prompt(
df,
prefix=prefix,
suffix=suffix,
include_df_in_prompt=include_df_in_prompt,
)
[docs]def create_pandas_dataframe_agent(
llm: BaseLanguageModel,
df: Any,
agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
include_df_in_prompt: Optional[bool] = True,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a pandas agent from an LLM and dataframe."""
agent: BaseSingleActionAgent | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html |
63d001e4fcf4-6 | agent: BaseSingleActionAgent
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt, tools = _get_prompt_and_tools(
df,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(
llm_chain=llm_chain,
allowed_tools=tool_names,
callback_manager=callback_manager,
**kwargs,
)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
_prompt, tools = _get_functions_prompt_and_tools(
df,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt,
)
agent = OpenAIFunctionsAgent(
llm=llm,
prompt=_prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
else:
raise ValueError(f"Agent type {agent_type} not supported at the moment.")
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/pandas/base.html |
b88ed3e82e6b-0 | Source code for langchain.agents.agent_toolkits.json.toolkit
"""Toolkit for interacting with a JSON spec."""
from __future__ import annotations
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.json.tool import JsonGetValueTool, JsonListKeysTool, JsonSpec
[docs]class JsonToolkit(BaseToolkit):
"""Toolkit for interacting with a JSON spec."""
spec: JsonSpec
[docs] def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
JsonListKeysTool(spec=self.spec),
JsonGetValueTool(spec=self.spec),
] | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/json/toolkit.html |
978fb97ca533-0 | Source code for langchain.agents.agent_toolkits.json.base
"""Json agent."""
from typing import Any, Dict, List, Optional
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
[docs]def create_json_agent(
llm: BaseLanguageModel,
toolkit: JsonToolkit,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: str = JSON_PREFIX,
suffix: str = JSON_SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]] = None,
verbose: bool = False,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a json agent from an LLM and tools."""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools( | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/json/base.html |
978fb97ca533-1 | return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/agent_toolkits/json/base.html |
487b0c0ecc31-0 | Source code for langchain.agents.self_ask_with_search.base
"""Chain that does self ask with search."""
from typing import Any, Sequence, Union
from pydantic import Field
from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser
from langchain.agents.agent_types import AgentType
from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputParser
from langchain.agents.self_ask_with_search.prompt import PROMPT
from langchain.agents.tools import Tool
from langchain.agents.utils import validate_tools_single_input
from langchain.base_language import BaseLanguageModel
from langchain.prompts.base import BasePromptTemplate
from langchain.tools.base import BaseTool
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
class SelfAskWithSearchAgent(Agent):
"""Agent for the self-ask-with-search paper."""
output_parser: AgentOutputParser = Field(default_factory=SelfAskOutputParser)
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return SelfAskOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.SELF_ASK_WITH_SEARCH
@classmethod
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
"""Prompt does not depend on tools."""
return PROMPT
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 1:
raise ValueError(f"Exactly one tool must be specified, but got {tools}") | https://api.python.langchain.com/en/latest/_modules/langchain/agents/self_ask_with_search/base.html |
487b0c0ecc31-1 | raise ValueError(f"Exactly one tool must be specified, but got {tools}")
tool_names = {tool.name for tool in tools}
if tool_names != {"Intermediate Answer"}:
raise ValueError(
f"Tool name should be Intermediate Answer, got {tool_names}"
)
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Intermediate answer: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the LLM call with."""
return ""
[docs]class SelfAskWithSearchChain(AgentExecutor):
"""Chain that does self ask with search.
Example:
.. code-block:: python
from langchain import SelfAskWithSearchChain, OpenAI, GoogleSerperAPIWrapper
search_chain = GoogleSerperAPIWrapper()
self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain)
"""
def __init__(
self,
llm: BaseLanguageModel,
search_chain: Union[GoogleSerperAPIWrapper, SerpAPIWrapper],
**kwargs: Any,
):
"""Initialize with just an LLM and a search chain."""
search_tool = Tool(
name="Intermediate Answer",
func=search_chain.run,
coroutine=search_chain.arun,
description="Search",
)
agent = SelfAskWithSearchAgent.from_llm_and_tools(llm, [search_tool])
super().__init__(agent=agent, tools=[search_tool], **kwargs) | https://api.python.langchain.com/en/latest/_modules/langchain/agents/self_ask_with_search/base.html |
dff4429db269-0 | Source code for langchain.experimental.autonomous_agents.baby_agi.baby_agi
"""BabyAGI agent."""
from collections import deque
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)
from langchain.vectorstores.base import VectorStore
[docs]class BabyAGI(Chain, BaseModel):
"""Controller model for the BabyAGI agent."""
task_list: deque = Field(default_factory=deque)
task_creation_chain: Chain = Field(...)
task_prioritization_chain: Chain = Field(...)
execution_chain: Chain = Field(...)
task_id_counter: int = Field(1)
vectorstore: VectorStore = Field(init=False)
max_iterations: Optional[int] = None
[docs] class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def add_task(self, task: Dict) -> None:
self.task_list.append(task)
def print_task_list(self) -> None:
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in self.task_list:
print(str(t["task_id"]) + ": " + t["task_name"]) | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html |
dff4429db269-1 | print(str(t["task_id"]) + ": " + t["task_name"])
def print_next_task(self, task: Dict) -> None:
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_id"]) + ": " + task["task_name"])
def print_task_result(self, result: str) -> None:
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
@property
def input_keys(self) -> List[str]:
return ["objective"]
@property
def output_keys(self) -> List[str]:
return []
[docs] def get_next_task(
self, result: str, task_description: str, objective: str
) -> List[Dict]:
"""Get the next task."""
task_names = [t["task_name"] for t in self.task_list]
incomplete_tasks = ", ".join(task_names)
response = self.task_creation_chain.run(
result=result,
task_description=task_description,
incomplete_tasks=incomplete_tasks,
objective=objective,
)
new_tasks = response.split("\n")
return [
{"task_name": task_name} for task_name in new_tasks if task_name.strip()
]
[docs] def prioritize_tasks(self, this_task_id: int, objective: str) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in list(self.task_list)]
next_task_id = int(this_task_id) + 1 | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html |
dff4429db269-2 | next_task_id = int(this_task_id) + 1
response = self.task_prioritization_chain.run(
task_names=", ".join(task_names),
next_task_id=str(next_task_id),
objective=objective,
)
new_tasks = response.split("\n")
prioritized_task_list = []
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append(
{"task_id": task_id, "task_name": task_name}
)
return prioritized_task_list
def _get_top_tasks(self, query: str, k: int) -> List[str]:
"""Get the top k tasks based on the query."""
results = self.vectorstore.similarity_search(query, k=k)
if not results:
return []
return [str(item.metadata["task"]) for item in results]
[docs] def execute_task(self, objective: str, task: str, k: int = 5) -> str:
"""Execute a task."""
context = self._get_top_tasks(query=objective, k=k)
return self.execution_chain.run(
objective=objective, context="\n".join(context), task=task
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the agent."""
objective = inputs["objective"] | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html |
dff4429db269-3 | """Run the agent."""
objective = inputs["objective"]
first_task = inputs.get("first_task", "Make a todo list")
self.add_task({"task_id": 1, "task_name": first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
# Step 1: Pull the first task
task = self.task_list.popleft()
self.print_next_task(task)
# Step 2: Execute the task
result = self.execute_task(objective, task["task_name"])
this_task_id = int(task["task_id"])
self.print_task_result(result)
# Step 3: Store the result in Pinecone
result_id = f"result_{task['task_id']}"
self.vectorstore.add_texts(
texts=[result],
metadatas=[{"task": task["task_name"]}],
ids=[result_id],
)
# Step 4: Create new tasks and reprioritize task list
new_tasks = self.get_next_task(result, task["task_name"], objective)
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({"task_id": self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(self.prioritize_tasks(this_task_id, objective))
num_iters += 1
if self.max_iterations is not None and num_iters == self.max_iterations:
print(
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
)
break
return {}
[docs] @classmethod
def from_llm( | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html |
dff4429db269-4 | return {}
[docs] @classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
verbose: bool = False,
task_execution_chain: Optional[Chain] = None,
**kwargs: Dict[str, Any],
) -> "BabyAGI":
"""Initialize the BabyAGI Controller."""
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
task_prioritization_chain = TaskPrioritizationChain.from_llm(
llm, verbose=verbose
)
if task_execution_chain is None:
execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose)
else:
execution_chain = task_execution_chain
return cls(
task_creation_chain=task_creation_chain,
task_prioritization_chain=task_prioritization_chain,
execution_chain=execution_chain,
vectorstore=vectorstore,
**kwargs,
) | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html |
6e6dc68fe7f9-0 | Source code for langchain.experimental.autonomous_agents.autogpt.agent
from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
from langchain.memory import ChatMessageHistory
from langchain.schema import (
AIMessage,
BaseChatMessageHistory,
Document,
HumanMessage,
SystemMessage,
)
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
[docs]class AutoGPT:
"""Agent class for interacting with Auto-GPT."""
def __init__(
self,
ai_name: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseAutoGPTOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
chat_history_memory: Optional[BaseChatMessageHistory] = None,
):
self.ai_name = ai_name
self.memory = memory
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
self.chat_history_memory = chat_history_memory or ChatMessageHistory()
@classmethod
def from_llm_and_tools( | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html |
6e6dc68fe7f9-1 | @classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
human_in_the_loop: bool = False,
output_parser: Optional[BaseAutoGPTOutputParser] = None,
chat_history_memory: Optional[BaseChatMessageHistory] = None,
) -> AutoGPT:
prompt = AutoGPTPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
input_variables=["memory", "messages", "goals", "user_input"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools,
feedback_tool=human_feedback_tool,
chat_history_memory=chat_history_memory,
)
def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.chat_history_memory.messages,
memory=self.memory,
user_input=user_input,
)
# Print Assistant thoughts | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html |
6e6dc68fe7f9-2 | user_input=user_input,
)
# Print Assistant thoughts
print(assistant_reply)
self.chat_history_memory.add_message(HumanMessage(content=user_input))
self.chat_history_memory.add_message(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html |
6e6dc68fe7f9-3 | return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.chat_history_memory.add_message(SystemMessage(content=result)) | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html |
9a7aab7b9e07-0 | Source code for langchain.experimental.generative_agents.memory
import logging
import re
from datetime import datetime
from typing import Any, Dict, List, Optional
from langchain import LLMChain
from langchain.base_language import BaseLanguageModel
from langchain.prompts import PromptTemplate
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.schema import BaseMemory, Document
from langchain.utils import mock_now
logger = logging.getLogger(__name__)
[docs]class GenerativeAgentMemory(BaseMemory):
llm: BaseLanguageModel
"""The core language model."""
memory_retriever: TimeWeightedVectorStoreRetriever
"""The retriever to fetch related memories."""
verbose: bool = False
reflection_threshold: Optional[float] = None
"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""
current_plan: List[str] = []
"""The current plan of the agent."""
# A weight of 0.15 makes this less important than it
# would be otherwise, relative to salience and time
importance_weight: float = 0.15
"""How much weight to assign the memory importance."""
aggregate_importance: float = 0.0 # : :meta private:
"""Track the sum of the 'importance' of recent memories.
Triggers reflection when it reaches reflection_threshold."""
max_tokens_limit: int = 1200 # : :meta private:
# input keys
queries_key: str = "queries"
most_recent_memories_token_key: str = "recent_memories_token"
add_memory_key: str = "add_memory"
# output keys
relevant_memories_key: str = "relevant_memories" | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html |
9a7aab7b9e07-1 | # output keys
relevant_memories_key: str = "relevant_memories"
relevant_memories_simple_key: str = "relevant_memories_simple"
most_recent_memories_key: str = "most_recent_memories"
now_key: str = "now"
reflecting: bool = False
def chain(self, prompt: PromptTemplate) -> LLMChain:
return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)
@staticmethod
def _parse_list(text: str) -> List[str]:
"""Parse a newline-separated string into a list of strings."""
lines = re.split(r"\n", text.strip())
lines = [line for line in lines if line.strip()] # remove empty lines
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]:
"""Return the 3 most salient high-level questions about recent observations."""
prompt = PromptTemplate.from_template(
"{observations}\n\n"
"Given only the information above, what are the 3 most salient "
"high-level questions we can answer about the subjects in the statements?\n"
"Provide each question on a new line."
)
observations = self.memory_retriever.memory_stream[-last_k:]
observation_str = "\n".join(
[self._format_memory_detail(o) for o in observations]
)
result = self.chain(prompt).run(observations=observation_str)
return self._parse_list(result)
def _get_insights_on_topic(
self, topic: str, now: Optional[datetime] = None | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html |
9a7aab7b9e07-2 | self, topic: str, now: Optional[datetime] = None
) -> List[str]:
"""Generate 'insights' on a topic of reflection, based on pertinent memories."""
prompt = PromptTemplate.from_template(
"Statements relevant to: '{topic}'\n"
"---\n"
"{related_statements}\n"
"---\n"
"What 5 high-level novel insights can you infer from the above statements "
"that are relevant for answering the following question?\n"
"Do not include any insights that are not relevant to the question.\n"
"Do not repeat any insights that have already been made.\n\n"
"Question: {topic}\n\n"
"(example format: insight (because of 1, 5, 3))\n"
)
related_memories = self.fetch_memories(topic, now=now)
related_statements = "\n".join(
[
self._format_memory_detail(memory, prefix=f"{i+1}. ")
for i, memory in enumerate(related_memories)
]
)
result = self.chain(prompt).run(
topic=topic, related_statements=related_statements
)
# TODO: Parse the connections between memories and insights
return self._parse_list(result)
[docs] def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]:
"""Reflect on recent observations and generate 'insights'."""
if self.verbose:
logger.info("Character is reflecting")
new_insights = []
topics = self._get_topics_of_reflection()
for topic in topics:
insights = self._get_insights_on_topic(topic, now=now) | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html |
9a7aab7b9e07-3 | insights = self._get_insights_on_topic(topic, now=now)
for insight in insights:
self.add_memory(insight, now=now)
new_insights.extend(insights)
return new_insights
def _score_memory_importance(self, memory_content: str) -> float:
"""Score the absolute importance of the given memory."""
prompt = PromptTemplate.from_template(
"On the scale of 1 to 10, where 1 is purely mundane"
+ " (e.g., brushing teeth, making bed) and 10 is"
+ " extremely poignant (e.g., a break up, college"
+ " acceptance), rate the likely poignancy of the"
+ " following piece of memory. Respond with a single integer."
+ "\nMemory: {memory_content}"
+ "\nRating: "
)
score = self.chain(prompt).run(memory_content=memory_content).strip()
if self.verbose:
logger.info(f"Importance score: {score}")
match = re.search(r"^\D*(\d+)", score)
if match:
return (float(match.group(1)) / 10) * self.importance_weight
else:
return 0.0
def _score_memories_importance(self, memory_content: str) -> List[float]:
"""Score the absolute importance of the given memory."""
prompt = PromptTemplate.from_template(
"On the scale of 1 to 10, where 1 is purely mundane"
+ " (e.g., brushing teeth, making bed) and 10 is"
+ " extremely poignant (e.g., a break up, college"
+ " acceptance), rate the likely poignancy of the" | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html |
9a7aab7b9e07-4 | + " acceptance), rate the likely poignancy of the"
+ " following piece of memory. Always answer with only a list of numbers."
+ " If just given one memory still respond in a list."
+ " Memories are separated by semi colans (;)"
+ "\Memories: {memory_content}"
+ "\nRating: "
)
scores = self.chain(prompt).run(memory_content=memory_content).strip()
if self.verbose:
logger.info(f"Importance scores: {scores}")
# Split into list of strings and convert to floats
scores_list = [float(x) for x in scores.split(";")]
return scores_list
[docs] def add_memories(
self, memory_content: str, now: Optional[datetime] = None
) -> List[str]:
"""Add an observations or memories to the agent's memory."""
importance_scores = self._score_memories_importance(memory_content)
self.aggregate_importance += max(importance_scores)
memory_list = memory_content.split(";")
documents = []
for i in range(len(memory_list)):
documents.append(
Document(
page_content=memory_list[i],
metadata={"importance": importance_scores[i]},
)
)
result = self.memory_retriever.add_documents(documents, current_time=now)
# After an agent has processed a certain amount of memories (as measured by
# aggregate importance), it is time to reflect on recent events to add
# more synthesized memories to the agent's memory stream.
if (
self.reflection_threshold is not None
and self.aggregate_importance > self.reflection_threshold
and not self.reflecting
):
self.reflecting = True | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html |
9a7aab7b9e07-5 | and not self.reflecting
):
self.reflecting = True
self.pause_to_reflect(now=now)
# Hack to clear the importance from reflection
self.aggregate_importance = 0.0
self.reflecting = False
return result
[docs] def add_memory(
self, memory_content: str, now: Optional[datetime] = None
) -> List[str]:
"""Add an observation or memory to the agent's memory."""
importance_score = self._score_memory_importance(memory_content)
self.aggregate_importance += importance_score
document = Document(
page_content=memory_content, metadata={"importance": importance_score}
)
result = self.memory_retriever.add_documents([document], current_time=now)
# After an agent has processed a certain amount of memories (as measured by
# aggregate importance), it is time to reflect on recent events to add
# more synthesized memories to the agent's memory stream.
if (
self.reflection_threshold is not None
and self.aggregate_importance > self.reflection_threshold
and not self.reflecting
):
self.reflecting = True
self.pause_to_reflect(now=now)
# Hack to clear the importance from reflection
self.aggregate_importance = 0.0
self.reflecting = False
return result
[docs] def fetch_memories(
self, observation: str, now: Optional[datetime] = None
) -> List[Document]:
"""Fetch related memories."""
if now is not None:
with mock_now(now):
return self.memory_retriever.get_relevant_documents(observation)
else:
return self.memory_retriever.get_relevant_documents(observation) | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html |
9a7aab7b9e07-6 | else:
return self.memory_retriever.get_relevant_documents(observation)
def format_memories_detail(self, relevant_memories: List[Document]) -> str:
content = []
for mem in relevant_memories:
content.append(self._format_memory_detail(mem, prefix="- "))
return "\n".join([f"{mem}" for mem in content])
def _format_memory_detail(self, memory: Document, prefix: str = "") -> str:
created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p")
return f"{prefix}[{created_time}] {memory.page_content.strip()}"
def format_memories_simple(self, relevant_memories: List[Document]) -> str:
return "; ".join([f"{mem.page_content}" for mem in relevant_memories])
def _get_memories_until_limit(self, consumed_tokens: int) -> str:
"""Reduce the number of tokens in the documents."""
result = []
for doc in self.memory_retriever.memory_stream[::-1]:
if consumed_tokens >= self.max_tokens_limit:
break
consumed_tokens += self.llm.get_num_tokens(doc.page_content)
if consumed_tokens < self.max_tokens_limit:
result.append(doc)
return self.format_memories_simple(result)
@property
def memory_variables(self) -> List[str]:
"""Input keys this memory class will load dynamically."""
return []
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
queries = inputs.get(self.queries_key)
now = inputs.get(self.now_key)
if queries is not None: | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html |
9a7aab7b9e07-7 | now = inputs.get(self.now_key)
if queries is not None:
relevant_memories = [
mem for query in queries for mem in self.fetch_memories(query, now=now)
]
return {
self.relevant_memories_key: self.format_memories_detail(
relevant_memories
),
self.relevant_memories_simple_key: self.format_memories_simple(
relevant_memories
),
}
most_recent_memories_token = inputs.get(self.most_recent_memories_token_key)
if most_recent_memories_token is not None:
return {
self.most_recent_memories_key: self._get_memories_until_limit(
most_recent_memories_token
)
}
return {}
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None:
"""Save the context of this model run to memory."""
# TODO: fix the save memory key
mem = outputs.get(self.add_memory_key)
now = outputs.get(self.now_key)
if mem:
self.add_memory(mem, now=now)
[docs] def clear(self) -> None:
"""Clear memory contents."""
# TODO | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/memory.html |
60662630844e-0 | Source code for langchain.experimental.generative_agents.generative_agent
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from langchain import LLMChain
from langchain.base_language import BaseLanguageModel
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain.prompts import PromptTemplate
[docs]class GenerativeAgent(BaseModel):
"""A character with memory and innate characteristics."""
name: str
"""The character's name."""
age: Optional[int] = None
"""The optional age of the character."""
traits: str = "N/A"
"""Permanent traits to ascribe to the character."""
status: str
"""The traits of the character you wish not to change."""
memory: GenerativeAgentMemory
"""The memory object that combines relevance, recency, and 'importance'."""
llm: BaseLanguageModel
"""The underlying language model."""
verbose: bool = False
summary: str = "" #: :meta private:
"""Stateful self-summary generated via reflection on the character's memory."""
summary_refresh_seconds: int = 3600 #: :meta private:
"""How frequently to re-generate the summary."""
last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private:
"""The last time the character's summary was regenerated."""
daily_summaries: List[str] = Field(default_factory=list) # : :meta private:
"""Summary of the events in the plan that the agent took."""
[docs] class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
# LLM-related methods
@staticmethod | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html |
60662630844e-1 | arbitrary_types_allowed = True
# LLM-related methods
@staticmethod
def _parse_list(text: str) -> List[str]:
"""Parse a newline-separated string into a list of strings."""
lines = re.split(r"\n", text.strip())
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
def chain(self, prompt: PromptTemplate) -> LLMChain:
return LLMChain(
llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory
)
def _get_entity_from_observation(self, observation: str) -> str:
prompt = PromptTemplate.from_template(
"What is the observed entity in the following observation? {observation}"
+ "\nEntity="
)
return self.chain(prompt).run(observation=observation).strip()
def _get_entity_action(self, observation: str, entity_name: str) -> str:
prompt = PromptTemplate.from_template(
"What is the {entity} doing in the following observation? {observation}"
+ "\nThe {entity} is"
)
return (
self.chain(prompt).run(entity=entity_name, observation=observation).strip()
)
[docs] def summarize_related_memories(self, observation: str) -> str:
"""Summarize memories that are most relevant to an observation."""
prompt = PromptTemplate.from_template(
"""
{q1}?
Context from memory:
{relevant_memories}
Relevant context:
"""
)
entity_name = self._get_entity_from_observation(observation)
entity_action = self._get_entity_action(observation, entity_name) | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html |
60662630844e-2 | entity_action = self._get_entity_action(observation, entity_name)
q1 = f"What is the relationship between {self.name} and {entity_name}"
q2 = f"{entity_name} is {entity_action}"
return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
def _generate_reaction(
self, observation: str, suffix: str, now: Optional[datetime] = None
) -> str:
"""React to a given observation or dialogue act."""
prompt = PromptTemplate.from_template(
"{agent_summary_description}"
+ "\nIt is {current_time}."
+ "\n{agent_name}'s status: {agent_status}"
+ "\nSummary of relevant context from {agent_name}'s memory:"
+ "\n{relevant_memories}"
+ "\nMost recent observations: {most_recent_memories}"
+ "\nObservation: {observation}"
+ "\n\n"
+ suffix
)
agent_summary_description = self.get_summary(now=now)
relevant_memories_str = self.summarize_related_memories(observation)
current_time_str = (
datetime.now().strftime("%B %d, %Y, %I:%M %p")
if now is None
else now.strftime("%B %d, %Y, %I:%M %p")
)
kwargs: Dict[str, Any] = dict(
agent_summary_description=agent_summary_description,
current_time=current_time_str,
relevant_memories=relevant_memories_str,
agent_name=self.name,
observation=observation,
agent_status=self.status,
)
consumed_tokens = self.llm.get_num_tokens( | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html |
60662630844e-3 | )
consumed_tokens = self.llm.get_num_tokens(
prompt.format(most_recent_memories="", **kwargs)
)
kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens
return self.chain(prompt=prompt).run(**kwargs).strip()
def _clean_response(self, text: str) -> str:
return re.sub(f"^{self.name} ", "", text.strip()).strip()
[docs] def generate_reaction(
self, observation: str, now: Optional[datetime] = None
) -> Tuple[bool, str]:
"""React to a given observation."""
call_to_action_template = (
"Should {agent_name} react to the observation, and if so,"
+ " what would be an appropriate reaction? Respond in one line."
+ ' If the action is to engage in dialogue, write:\nSAY: "what to say"'
+ "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)."
+ "\nEither do nothing, react, or say something but not both.\n\n"
)
full_result = self._generate_reaction(
observation, call_to_action_template, now=now
)
result = full_result.strip().split("\n")[0]
# AAA
self.memory.save_context(
{},
{
self.memory.add_memory_key: f"{self.name} observed "
f"{observation} and reacted by {result}",
self.memory.now_key: now,
},
)
if "REACT:" in result:
reaction = self._clean_response(result.split("REACT:")[-1])
return False, f"{self.name} {reaction}"
if "SAY:" in result: | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html |
60662630844e-4 | if "SAY:" in result:
said_value = self._clean_response(result.split("SAY:")[-1])
return True, f"{self.name} said {said_value}"
else:
return False, result
[docs] def generate_dialogue_response(
self, observation: str, now: Optional[datetime] = None
) -> Tuple[bool, str]:
"""React to a given observation."""
call_to_action_template = (
"What would {agent_name} say? To end the conversation, write:"
' GOODBYE: "what to say". Otherwise to continue the conversation,'
' write: SAY: "what to say next"\n\n'
)
full_result = self._generate_reaction(
observation, call_to_action_template, now=now
)
result = full_result.strip().split("\n")[0]
if "GOODBYE:" in result:
farewell = self._clean_response(result.split("GOODBYE:")[-1])
self.memory.save_context(
{},
{
self.memory.add_memory_key: f"{self.name} observed "
f"{observation} and said {farewell}",
self.memory.now_key: now,
},
)
return False, f"{self.name} said {farewell}"
if "SAY:" in result:
response_text = self._clean_response(result.split("SAY:")[-1])
self.memory.save_context(
{},
{
self.memory.add_memory_key: f"{self.name} observed "
f"{observation} and said {response_text}",
self.memory.now_key: now,
},
)
return True, f"{self.name} said {response_text}" | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html |
60662630844e-5 | )
return True, f"{self.name} said {response_text}"
else:
return False, result
######################################################
# Agent stateful' summary methods. #
# Each dialog or response prompt includes a header #
# summarizing the agent's self-description. This is #
# updated periodically through probing its memories #
######################################################
def _compute_agent_summary(self) -> str:
""""""
prompt = PromptTemplate.from_template(
"How would you summarize {name}'s core characteristics given the"
+ " following statements:\n"
+ "{relevant_memories}"
+ "Do not embellish."
+ "\n\nSummary: "
)
# The agent seeks to think about their core characteristics.
return (
self.chain(prompt)
.run(name=self.name, queries=[f"{self.name}'s core characteristics"])
.strip()
)
[docs] def get_summary(
self, force_refresh: bool = False, now: Optional[datetime] = None
) -> str:
"""Return a descriptive summary of the agent."""
current_time = datetime.now() if now is None else now
since_refresh = (current_time - self.last_refreshed).seconds
if (
not self.summary
or since_refresh >= self.summary_refresh_seconds
or force_refresh
):
self.summary = self._compute_agent_summary()
self.last_refreshed = current_time
age = self.age if self.age is not None else "N/A"
return (
f"Name: {self.name} (age: {age})"
+ f"\nInnate traits: {self.traits}" | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html |
60662630844e-6 | + f"\nInnate traits: {self.traits}"
+ f"\n{self.summary}"
)
[docs] def get_full_header(
self, force_refresh: bool = False, now: Optional[datetime] = None
) -> str:
"""Return a full header of the agent's status, summary, and current time."""
now = datetime.now() if now is None else now
summary = self.get_summary(force_refresh=force_refresh, now=now)
current_time_str = now.strftime("%B %d, %Y, %I:%M %p")
return (
f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}"
) | https://api.python.langchain.com/en/latest/_modules/langchain/experimental/generative_agents/generative_agent.html |
89ed0ff49ed6-0 | Source code for langchain.vectorstores.typesense
"""Wrapper around Typesense vector search"""
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from typesense.client import Client
from typesense.collection import Collection
[docs]class Typesense(VectorStore):
"""Wrapper around Typesense vector search.
To use, you should have the ``typesense`` python package installed.
Example:
.. code-block:: python
from langchain.embedding.openai import OpenAIEmbeddings
from langchain.vectorstores import Typesense
import typesense
node = {
"host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net
"port": "8108", # For Typesense Cloud use 443
"protocol": "http" # For Typesense Cloud use https
}
typesense_client = typesense.Client(
{
"nodes": [node],
"api_key": "<API_KEY>",
"connection_timeout_seconds": 2
}
)
typesense_collection_name = "langchain-memory"
embedding = OpenAIEmbeddings()
vectorstore = Typesense(
typesense_client=typesense_client,
embedding=embedding,
typesense_collection_name=typesense_collection_name,
text_key="text",
)
"""
def __init__(
self,
typesense_client: Client,
embedding: Embeddings,
*, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
89ed0ff49ed6-1 | typesense_client: Client,
embedding: Embeddings,
*,
typesense_collection_name: Optional[str] = None,
text_key: str = "text",
):
"""Initialize with Typesense client."""
try:
from typesense import Client
except ImportError:
raise ValueError(
"Could not import typesense python package. "
"Please install it with `pip install typesense`."
)
if not isinstance(typesense_client, Client):
raise ValueError(
f"typesense_client should be an instance of typesense.Client, "
f"got {type(typesense_client)}"
)
self._typesense_client = typesense_client
self._embedding = embedding
self._typesense_collection_name = (
typesense_collection_name or f"langchain-{str(uuid.uuid4())}"
)
self._text_key = text_key
@property
def _collection(self) -> Collection:
return self._typesense_client.collections[self._typesense_collection_name]
def _prep_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]],
ids: Optional[List[str]],
) -> List[dict]:
"""Embed and create the documents"""
_ids = ids or (str(uuid.uuid4()) for _ in texts)
_metadatas: Iterable[dict] = metadatas or ({} for _ in texts)
embedded_texts = self._embedding.embed_documents(list(texts))
return [
{"id": _id, "vec": vec, f"{self._text_key}": text, "metadata": metadata} | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
89ed0ff49ed6-2 | for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas)
]
def _create_collection(self, num_dim: int) -> None:
fields = [
{"name": "vec", "type": "float[]", "num_dim": num_dim},
{"name": f"{self._text_key}", "type": "string"},
{"name": ".*", "type": "auto"},
]
self._typesense_client.collections.create(
{"name": self._typesense_collection_name, "fields": fields}
)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from typesense.exceptions import ObjectNotFound
docs = self._prep_texts(texts, metadatas, ids)
try:
self._collection.documents.import_(docs, {"action": "upsert"})
except ObjectNotFound:
# Create the collection if it doesn't already exist
self._create_collection(len(docs[0]["vec"]))
self._collection.documents.import_(docs, {"action": "upsert"})
return [doc["id"] for doc in docs] | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
89ed0ff49ed6-3 | return [doc["id"] for doc in docs]
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 10,
filter: Optional[str] = "",
) -> List[Tuple[Document, float]]:
"""Return typesense documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
Minimum 10 results would be returned.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
"""
embedded_query = [str(x) for x in self._embedding.embed_query(query)]
query_obj = {
"q": "*",
"vector_query": f'vec:([{",".join(embedded_query)}], k:{k})',
"filter_by": filter,
"collection": self._typesense_collection_name,
}
docs = []
response = self._typesense_client.multi_search.perform(
{"searches": [query_obj]}, {}
)
for hit in response["results"][0]["hits"]:
document = hit["document"]
metadata = document["metadata"]
text = document[self._text_key]
score = hit["vector_distance"]
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
[docs] def similarity_search(
self,
query: str,
k: int = 10,
filter: Optional[str] = "",
**kwargs: Any,
) -> List[Document]:
"""Return typesense documents most similar to query. | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
89ed0ff49ed6-4 | ) -> List[Document]:
"""Return typesense documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
Minimum 10 results would be returned.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter)
return [doc for doc, _ in docs_and_score]
[docs] @classmethod
def from_client_params(
cls,
embedding: Embeddings,
*,
host: str = "localhost",
port: Union[str, int] = "8108",
protocol: str = "http",
typesense_api_key: Optional[str] = None,
connection_timeout_seconds: int = 2,
**kwargs: Any,
) -> Typesense:
"""Initialize Typesense directly from client parameters.
Example:
.. code-block:: python
from langchain.embedding.openai import OpenAIEmbeddings
from langchain.vectorstores import Typesense
# Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY".
vectorstore = Typesense(
OpenAIEmbeddings(),
host="localhost",
port="8108",
protocol="http",
typesense_collection_name="langchain-memory",
)
"""
try:
from typesense import Client
except ImportError:
raise ValueError(
"Could not import typesense python package. "
"Please install it with `pip install typesense`."
) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
89ed0ff49ed6-5 | "Please install it with `pip install typesense`."
)
node = {
"host": host,
"port": str(port),
"protocol": protocol,
}
typesense_api_key = typesense_api_key or get_from_env(
"typesense_api_key", "TYPESENSE_API_KEY"
)
client_config = {
"nodes": [node],
"api_key": typesense_api_key,
"connection_timeout_seconds": connection_timeout_seconds,
}
return cls(Client(client_config), embedding, **kwargs)
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
typesense_client: Optional[Client] = None,
typesense_client_params: Optional[dict] = None,
typesense_collection_name: Optional[str] = None,
text_key: str = "text",
**kwargs: Any,
) -> Typesense:
"""Construct Typesense wrapper from raw text."""
if typesense_client:
vectorstore = cls(typesense_client, embedding, **kwargs)
elif typesense_client_params:
vectorstore = cls.from_client_params(
embedding, **typesense_client_params, **kwargs
)
else:
raise ValueError(
"Must specify one of typesense_client or typesense_client_params."
)
vectorstore.add_texts(texts, metadatas=metadatas, ids=ids)
return vectorstore | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/typesense.html |
a69c40c6a54c-0 | Source code for langchain.vectorstores.supabase
from __future__ import annotations
import uuid
from itertools import repeat
from typing import (
TYPE_CHECKING,
Any,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import supabase
[docs]class SupabaseVectorStore(VectorStore):
"""VectorStore for a Supabase postgres database. Assumes you have the `pgvector`
extension installed and a `match_documents` (or similar) function. For more details:
https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
You can implement your own `match_documents` function in order to limit the search
space to a subset of documents based on your own authorization or business logic.
Note that the Supabase Python client does not yet support async operations.
If you'd like to use `max_marginal_relevance_search`, please review the instructions
below on modifying the `match_documents` function to return matched embeddings.
"""
_client: supabase.client.Client
# This is the embedding function. Don't confuse with the embedding vectors.
# We should perhaps rename the underlying Embedding base class to EmbeddingFunction
# or something
_embedding: Embeddings
table_name: str
query_name: str
def __init__(
self,
client: supabase.client.Client,
embedding: Embeddings,
table_name: str, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a69c40c6a54c-1 | embedding: Embeddings,
table_name: str,
query_name: Union[str, None] = None,
) -> None:
"""Initialize with supabase client."""
try:
import supabase # noqa: F401
except ImportError:
raise ValueError(
"Could not import supabase python package. "
"Please install it with `pip install supabase`."
)
self._client = client
self._embedding: Embeddings = embedding
self.table_name = table_name or "documents"
self.query_name = query_name or "match_documents"
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict[Any, Any]]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
ids = ids or [str(uuid.uuid4()) for _ in texts]
docs = self._texts_to_documents(texts, metadatas)
vectors = self._embedding.embed_documents(list(texts))
return self.add_vectors(vectors, docs, ids)
[docs] @classmethod
def from_texts(
cls: Type["SupabaseVectorStore"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[supabase.client.Client] = None,
table_name: Optional[str] = "documents",
query_name: Union[str, None] = "match_documents",
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> "SupabaseVectorStore":
"""Return VectorStore initialized from texts and embeddings.""" | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a69c40c6a54c-2 | """Return VectorStore initialized from texts and embeddings."""
if not client:
raise ValueError("Supabase client is required.")
if not table_name:
raise ValueError("Supabase document table_name is required.")
embeddings = embedding.embed_documents(texts)
ids = [str(uuid.uuid4()) for _ in texts]
docs = cls._texts_to_documents(texts, metadatas)
_ids = cls._add_vectors(client, table_name, embeddings, docs, ids)
return cls(
client=client,
embedding=embedding,
table_name=table_name,
query_name=query_name,
)
[docs] def add_vectors(
self,
vectors: List[List[float]],
documents: List[Document],
ids: List[str],
) -> List[str]:
return self._add_vectors(self._client, self.table_name, vectors, documents, ids)
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
vectors = self._embedding.embed_documents([query])
return self.similarity_search_by_vector(vectors[0], k)
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
result = self.similarity_search_by_vector_with_relevance_scores(embedding, k)
documents = [doc for doc, _ in result]
return documents
[docs] def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]: | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a69c40c6a54c-3 | ) -> List[Tuple[Document, float]]:
vectors = self._embedding.embed_documents([query])
return self.similarity_search_by_vector_with_relevance_scores(vectors[0], k)
[docs] def similarity_search_by_vector_with_relevance_scores(
self, query: List[float], k: int
) -> List[Tuple[Document, float]]:
match_documents_params = dict(query_embedding=query, match_count=k)
res = self._client.rpc(self.query_name, match_documents_params).execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
)
for search in res.data
if search.get("content")
]
return match_result
[docs] def similarity_search_by_vector_returning_embeddings(
self, query: List[float], k: int
) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]:
match_documents_params = dict(query_embedding=query, match_count=k)
res = self._client.rpc(self.query_name, match_documents_params).execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
# Supabase returns a vector type as its string represation (!).
# This is a hack to convert the string to numpy array.
np.fromstring(
search.get("embedding", "").strip("[]"), np.float32, sep=","
),
) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a69c40c6a54c-4 | ),
)
for search in res.data
if search.get("content")
]
return match_result
@staticmethod
def _texts_to_documents(
texts: Iterable[str],
metadatas: Optional[Iterable[dict[Any, Any]]] = None,
) -> List[Document]:
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [
Document(page_content=text, metadata=metadata)
for text, metadata in zip(texts, metadatas)
]
return docs
@staticmethod
def _add_vectors(
client: supabase.client.Client,
table_name: str,
vectors: List[List[float]],
documents: List[Document],
ids: List[str],
) -> List[str]:
"""Add vectors to Supabase table."""
rows: List[dict[str, Any]] = [
{
"id": ids[idx],
"content": documents[idx].page_content,
"embedding": embedding,
"metadata": documents[idx].metadata, # type: ignore
}
for idx, embedding in enumerate(vectors)
]
# According to the SupabaseVectorStore JS implementation, the best chunk size
# is 500
chunk_size = 500
id_list: List[str] = []
for i in range(0, len(rows), chunk_size):
chunk = rows[i : i + chunk_size]
result = client.from_(table_name).upsert(chunk).execute() # type: ignore
if len(result.data) == 0: | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a69c40c6a54c-5 | if len(result.data) == 0:
raise Exception("Error inserting: No rows added")
# VectorStore.add_vectors returns ids as strings
ids = [str(i.get("id")) for i in result.data if i.get("id")]
id_list.extend(ids)
return id_list
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
result = self.similarity_search_by_vector_returning_embeddings(
embedding, fetch_k
)
matched_documents = [doc_tuple[0] for doc_tuple in result]
matched_embeddings = [doc_tuple[2] for doc_tuple in result]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
matched_embeddings,
k=k,
lambda_mult=lambda_mult,
) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a69c40c6a54c-6 | matched_embeddings,
k=k,
lambda_mult=lambda_mult,
)
filtered_documents = [matched_documents[i] for i in mmr_selected]
return filtered_documents
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
`max_marginal_relevance_search` requires that `query_name` returns matched
embeddings alongside the match documents. The following function
demonstrates how to do this:
```sql
CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536),
match_count int)
RETURNS TABLE(
id bigint,
content text,
metadata jsonb,
embedding vector(1536),
similarity float)
LANGUAGE plpgsql
AS $$
# variable_conflict use_column
BEGIN
RETURN query
SELECT
id,
content,
metadata,
embedding, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a69c40c6a54c-7 | SELECT
id,
content,
metadata,
embedding,
1 -(docstore.embedding <=> query_embedding) AS similarity
FROM
docstore
ORDER BY
docstore.embedding <=> query_embedding
LIMIT match_count;
END;
$$;
```
"""
embedding = self._embedding.embed_documents([query])
docs = self.max_marginal_relevance_search_by_vector(
embedding[0], k, fetch_k, lambda_mult=lambda_mult
)
return docs
[docs] def delete(self, ids: List[str]) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
rows: List[dict[str, Any]] = [
{
"id": id,
}
for id in ids
]
# TODO: Check if this can be done in bulk
for row in rows:
self._client.from_(self.table_name).delete().eq("id", row["id"]).execute() | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
daaaaf8c8dbe-0 | Source code for langchain.vectorstores.cassandra
"""Wrapper around Cassandra vector-store capabilities, based on cassIO."""
from __future__ import annotations
import hashlib
import typing
from typing import Any, Iterable, List, Optional, Tuple, Type, TypeVar
import numpy as np
if typing.TYPE_CHECKING:
from cassandra.cluster import Session
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
CVST = TypeVar("CVST", bound="Cassandra")
# a positive number of seconds to expire entries, or None for no expiration.
CASSANDRA_VECTORSTORE_DEFAULT_TTL_SECONDS = None
def _hash(_input: str) -> str:
"""Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest()
[docs]class Cassandra(VectorStore):
"""Wrapper around Cassandra embeddings platform.
There is no notion of a default table name, since each embedding
function implies its own vector dimension, which is part of the schema.
Example:
.. code-block:: python
from langchain.vectorstores import Cassandra
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
session = ...
keyspace = 'my_keyspace'
vectorstore = Cassandra(embeddings, session, keyspace, 'my_doc_archive')
"""
_embedding_dimension: int | None
def _getEmbeddingDimension(self) -> int:
if self._embedding_dimension is None:
self._embedding_dimension = len(
self.embedding.embed_query("This is a sample sentence.")
)
return self._embedding_dimension
def __init__(
self, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
daaaaf8c8dbe-1 | )
return self._embedding_dimension
def __init__(
self,
embedding: Embeddings,
session: Session,
keyspace: str,
table_name: str,
ttl_seconds: int | None = CASSANDRA_VECTORSTORE_DEFAULT_TTL_SECONDS,
) -> None:
try:
from cassio.vector import VectorTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
"""Create a vector table."""
self.embedding = embedding
self.session = session
self.keyspace = keyspace
self.table_name = table_name
self.ttl_seconds = ttl_seconds
#
self._embedding_dimension = None
#
self.table = VectorTable(
session=session,
keyspace=keyspace,
table=table_name,
embedding_dimension=self._getEmbeddingDimension(),
auto_id=False, # the `add_texts` contract admits user-provided ids
)
[docs] def delete_collection(self) -> None:
"""
Just an alias for `clear`
(to better align with other VectorStore implementations).
"""
self.clear()
[docs] def clear(self) -> None:
"""Empty the collection."""
self.table.clear()
[docs] def delete_by_document_id(self, document_id: str) -> None:
return self.table.delete(document_id)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
daaaaf8c8dbe-2 | ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
_texts = list(texts) # lest it be a generator or something
if ids is None:
# unless otherwise specified, we have deterministic IDs:
# re-inserting an existing document will not create a duplicate.
# (and effectively update the metadata)
ids = [_hash(text) for text in _texts]
if metadatas is None:
metadatas = [{} for _ in _texts]
#
ttl_seconds = kwargs.get("ttl_seconds", self.ttl_seconds)
#
embedding_vectors = self.embedding.embed_documents(_texts)
for text, embedding_vector, text_id, metadata in zip(
_texts, embedding_vectors, ids, metadatas
):
self.table.put(
document=text,
embedding_vector=embedding_vector,
document_id=text_id,
metadata=metadata,
ttl_seconds=ttl_seconds,
)
#
return ids
# id-returning search facilities
[docs] def similarity_search_with_score_id_by_vector(
self,
embedding: List[float],
k: int = 4,
) -> List[Tuple[Document, float, str]]:
"""Return docs most similar to embedding vector. | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
daaaaf8c8dbe-3 | """Return docs most similar to embedding vector.
No support for `filter` query (on metadata) along with vector search.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score, id), the most similar to the query vector.
"""
hits = self.table.search(
embedding_vector=embedding,
top_k=k,
metric="cos",
metric_threshold=None,
)
# We stick to 'cos' distance as it can be normalized on a 0-1 axis
# (1=most relevant), as required by this class' contract.
return [
(
Document(
page_content=hit["document"],
metadata=hit["metadata"],
),
0.5 + 0.5 * hit["distance"],
hit["document_id"],
)
for hit in hits
]
[docs] def similarity_search_with_score_id(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float, str]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_id_by_vector(
embedding=embedding_vector,
k=k,
)
# id-unaware search facilities
[docs] def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector. | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
daaaaf8c8dbe-4 | """Return docs most similar to embedding vector.
No support for `filter` query (on metadata) along with vector search.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score), the most similar to the query vector.
"""
return [
(doc, score)
for (doc, score, docId) in self.similarity_search_with_score_id_by_vector(
embedding=embedding,
k=k,
)
]
[docs] def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
#
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_by_vector(
embedding_vector,
k,
**kwargs,
)
[docs] def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
return [
doc
for doc, _ in self.similarity_search_with_score_by_vector(
embedding,
k,
)
]
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_by_vector(
embedding_vector,
k,
) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
daaaaf8c8dbe-5 | embedding_vector,
k,
)
# Even though this is a `_`-method,
# it is apparently used by VectorSearch parent class
# in an exposed method (`similarity_search_with_relevance_scores`).
# So we implement it (hmm).
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
return self.similarity_search_with_score(
query,
k,
**kwargs,
)
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Returns:
List of Documents selected by maximal marginal relevance.
"""
prefetchHits = self.table.search(
embedding_vector=embedding,
top_k=fetch_k,
metric="cos",
metric_threshold=None,
) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
daaaaf8c8dbe-6 | metric="cos",
metric_threshold=None,
)
# let the mmr utility pick the *indices* in the above array
mmrChosenIndices = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
[pfHit["embedding_vector"] for pfHit in prefetchHits],
k=k,
lambda_mult=lambda_mult,
)
mmrHits = [
pfHit
for pfIndex, pfHit in enumerate(prefetchHits)
if pfIndex in mmrChosenIndices
]
return [
Document(
page_content=hit["document"],
metadata=hit["metadata"],
)
for hit in mmrHits
]
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Optional.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding_vector = self.embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector( | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
daaaaf8c8dbe-7 | return self.max_marginal_relevance_search_by_vector(
embedding_vector,
k,
fetch_k,
lambda_mult=lambda_mult,
)
[docs] @classmethod
def from_texts(
cls: Type[CVST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> CVST:
"""Create a Cassandra vectorstore from raw texts.
No support for specifying text IDs
Returns:
a Cassandra vectorstore.
"""
session: Session = kwargs["session"]
keyspace: str = kwargs["keyspace"]
table_name: str = kwargs["table_name"]
cassandraStore = cls(
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
)
cassandraStore.add_texts(texts=texts, metadatas=metadatas)
return cassandraStore
[docs] @classmethod
def from_documents(
cls: Type[CVST],
documents: List[Document],
embedding: Embeddings,
**kwargs: Any,
) -> CVST:
"""Create a Cassandra vectorstore from a document list.
No support for specifying text IDs
Returns:
a Cassandra vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
session: Session = kwargs["session"]
keyspace: str = kwargs["keyspace"]
table_name: str = kwargs["table_name"]
return cls.from_texts(
texts=texts, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
daaaaf8c8dbe-8 | return cls.from_texts(
texts=texts,
metadatas=metadatas,
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/cassandra.html |
57e44bac4f40-0 | Source code for langchain.vectorstores.alibabacloud_opensearch
import json
import logging
import numbers
from hashlib import sha1
from typing import Any, Dict, Iterable, List, Optional, Tuple
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger()
[docs]class AlibabaCloudOpenSearchSettings:
"""Opensearch Client Configuration
Attribute:
endpoint (str) : The endpoint of opensearch instance, You can find it
from the console of Alibaba Cloud OpenSearch.
instance_id (str) : The identify of opensearch instance, You can find
it from the console of Alibaba Cloud OpenSearch.
datasource_name (str): The name of the data source specified when creating it.
username (str) : The username specified when purchasing the instance.
password (str) : The password specified when purchasing the instance.
embedding_index_name (str) : The name of the vector attribute specified
when configuring the instance attributes.
field_name_mapping (Dict) : Using field name mapping between opensearch
vector store and opensearch instance configuration table field names:
{
'id': 'The id field name map of index document.',
'document': 'The text field name map of index document.',
'embedding': 'In the embedding field of the opensearch instance,
the values must be in float16 multivalue type and separated by commas.',
'metadata_field_x': 'Metadata field mapping includes the mapped
field name and operator in the mapping value, separated by a comma
between the mapped field name and the operator.',
}
"""
endpoint: str
instance_id: str
username: str
password: str | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
57e44bac4f40-1 | instance_id: str
username: str
password: str
datasource_name: str
embedding_index_name: str
field_name_mapping: Dict[str, str] = {
"id": "id",
"document": "document",
"embedding": "embedding",
"metadata_field_x": "metadata_field_x,operator",
}
def __init__(
self,
endpoint: str,
instance_id: str,
username: str,
password: str,
datasource_name: str,
embedding_index_name: str,
field_name_mapping: Dict[str, str],
) -> None:
self.endpoint = endpoint
self.instance_id = instance_id
self.username = username
self.password = password
self.datasource_name = datasource_name
self.embedding_index_name = embedding_index_name
self.field_name_mapping = field_name_mapping
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
def create_metadata(fields: Dict[str, Any]) -> Dict[str, Any]:
"""Create metadata from fields.
Args:
fields: The fields of the document. The fields must be a dict.
Returns:
metadata: The metadata of the document. The metadata must be a dict.
"""
metadata: Dict[str, Any] = {}
for key, value in fields.items():
if key == "id" or key == "document" or key == "embedding":
continue
metadata[key] = value
return metadata
[docs]class AlibabaCloudOpenSearch(VectorStore):
"""Alibaba Cloud OpenSearch Vector Store"""
def __init__(
self,
embedding: Embeddings, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
57e44bac4f40-2 | def __init__(
self,
embedding: Embeddings,
config: AlibabaCloudOpenSearchSettings,
**kwargs: Any,
) -> None:
try:
from alibabacloud_ha3engine import client, models
from alibabacloud_tea_util import models as util_models
except ImportError:
raise ValueError(
"Could not import alibaba cloud opensearch python package. "
"Please install it with `pip install alibabacloud-ha3engine`."
)
self.config = config
self.embedding = embedding
self.runtime = util_models.RuntimeOptions(
connect_timeout=5000,
read_timeout=10000,
autoretry=False,
ignore_ssl=False,
max_idle_conns=50,
)
self.ha3EngineClient = client.Client(
models.Config(
endpoint=config.endpoint,
instance_id=config.instance_id,
protocol="http",
access_user_name=config.username,
access_pass_word=config.password,
)
)
self.options_headers: Dict[str, str] = {}
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
def _upsert(push_doc_list: List[Dict]) -> List[str]:
if push_doc_list is None or len(push_doc_list) == 0:
return []
try:
push_request = models.PushDocumentsRequestModel(
self.options_headers, push_doc_list
)
push_response = self.ha3EngineClient.push_documents(
self.config.datasource_name, field_name_map["id"], push_request | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
57e44bac4f40-3 | self.config.datasource_name, field_name_map["id"], push_request
)
json_response = json.loads(push_response.body)
if json_response["status"] == "OK":
return [
push_doc["fields"][field_name_map["id"]]
for push_doc in push_doc_list
]
return []
except Exception as e:
logger.error(
f"add doc to endpoint:{self.config.endpoint} "
f"instance_id:{self.config.instance_id} failed.",
e,
)
raise e
from alibabacloud_ha3engine import models
ids = [sha1(t.encode("utf-8")).hexdigest() for t in texts]
embeddings = self.embedding.embed_documents(list(texts))
metadatas = metadatas or [{} for _ in texts]
field_name_map = self.config.field_name_mapping
add_doc_list = []
text_list = list(texts)
for idx, doc_id in enumerate(ids):
embedding = embeddings[idx] if idx < len(embeddings) else None
metadata = metadatas[idx] if idx < len(metadatas) else None
text = text_list[idx] if idx < len(text_list) else None
add_doc: Dict[str, Any] = dict()
add_doc_fields: Dict[str, Any] = dict()
add_doc_fields.__setitem__(field_name_map["id"], doc_id)
add_doc_fields.__setitem__(field_name_map["document"], text)
if embedding is not None:
add_doc_fields.__setitem__(
field_name_map["embedding"],
",".join(str(unit) for unit in embedding),
)
if metadata is not None: | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
57e44bac4f40-4 | )
if metadata is not None:
for md_key, md_value in metadata.items():
add_doc_fields.__setitem__(
field_name_map[md_key].split(",")[0], md_value
)
add_doc.__setitem__("fields", add_doc_fields)
add_doc.__setitem__("cmd", "add")
add_doc_list.append(add_doc)
return _upsert(add_doc_list)
[docs] def similarity_search(
self,
query: str,
k: int = 4,
search_filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
embedding = self.embedding.embed_query(query)
return self.create_results(
self.inner_embedding_query(
embedding=embedding, search_filter=search_filter, k=k
)
)
[docs] def similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
search_filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
embedding: List[float] = self.embedding.embed_query(query)
return self.create_results_with_score(
self.inner_embedding_query(
embedding=embedding, search_filter=search_filter, k=k
)
)
[docs] def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
search_filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
return self.create_results(
self.inner_embedding_query(
embedding=embedding, search_filter=search_filter, k=k
) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
57e44bac4f40-5 | embedding=embedding, search_filter=search_filter, k=k
)
)
[docs] def inner_embedding_query(
self,
embedding: List[float],
search_filter: Optional[Dict[str, Any]] = None,
k: int = 4,
) -> Dict[str, Any]:
def generate_embedding_query() -> str:
tmp_search_config_str = (
f"config=start:0,hit:{k},format:json&&cluster=general&&kvpairs="
f"first_formula:proxima_score({self.config.embedding_index_name})&&sort=+RANK"
)
tmp_query_str = (
f"&&query={self.config.embedding_index_name}:"
+ "'"
+ ",".join(str(x) for x in embedding)
+ "'"
)
if search_filter is not None:
filter_clause = "&&filter=" + " AND ".join(
[
create_filter(md_key, md_value)
for md_key, md_value in search_filter.items()
]
)
tmp_query_str += filter_clause
return tmp_search_config_str + tmp_query_str
def create_filter(md_key: str, md_value: Any) -> str:
md_filter_expr = self.config.field_name_mapping[md_key]
if md_filter_expr is None:
return ""
expr = md_filter_expr.split(",")
if len(expr) != 2:
logger.error(
f"filter {md_filter_expr} express is not correct, "
f"must contain mapping field and operator."
)
return ""
md_filter_key = expr[0].strip()
md_filter_operator = expr[1].strip() | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
57e44bac4f40-6 | md_filter_operator = expr[1].strip()
if isinstance(md_value, numbers.Number):
return f"{md_filter_key} {md_filter_operator} {md_value}"
return f'{md_filter_key}{md_filter_operator}"{md_value}"'
def search_data(single_query_str: str) -> Dict[str, Any]:
search_query = models.SearchQuery(query=single_query_str)
search_request = models.SearchRequestModel(
self.options_headers, search_query
)
return json.loads(self.ha3EngineClient.search(search_request).body)
from alibabacloud_ha3engine import models
try:
query_str = generate_embedding_query()
json_response = search_data(query_str)
if len(json_response["errors"]) != 0:
logger.error(
f"query {self.config.endpoint} {self.config.instance_id} "
f"errors:{json_response['errors']} failed."
)
else:
return json_response
except Exception as e:
logger.error(
f"query instance endpoint:{self.config.endpoint} "
f"instance_id:{self.config.instance_id} failed.",
e,
)
return {}
[docs] def create_results(self, json_result: Dict[str, Any]) -> List[Document]:
items = json_result["result"]["items"]
query_result_list: List[Document] = []
for item in items:
fields = item["fields"]
query_result_list.append(
Document(
page_content=fields[self.config.field_name_mapping["document"]],
metadata=create_metadata(fields),
)
)
return query_result_list
[docs] def create_results_with_score(
self, json_result: Dict[str, Any] | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
57e44bac4f40-7 | self, json_result: Dict[str, Any]
) -> List[Tuple[Document, float]]:
items = json_result["result"]["items"]
query_result_list: List[Tuple[Document, float]] = []
for item in items:
fields = item["fields"]
query_result_list.append(
(
Document(
page_content=fields[self.config.field_name_mapping["document"]],
metadata=create_metadata(fields),
),
float(item["sortExprValues"][0]),
)
)
return query_result_list
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
config: Optional[AlibabaCloudOpenSearchSettings] = None,
**kwargs: Any,
) -> "AlibabaCloudOpenSearch":
if config is None:
raise Exception("config can't be none")
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts=texts, metadatas=metadatas)
return ctx
[docs] @classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
ids: Optional[List[str]] = None,
config: Optional[AlibabaCloudOpenSearchSettings] = None,
**kwargs: Any,
) -> "AlibabaCloudOpenSearch":
if config is None:
raise Exception("config can't be none")
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(
texts=texts, | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
57e44bac4f40-8 | return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
config=config,
**kwargs,
) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/alibabacloud_opensearch.html |
926f7a3a3a06-0 | Source code for langchain.vectorstores.starrocks
"""Wrapper around open source StarRocks VectorSearch capability."""
from __future__ import annotations
import json
import logging
from hashlib import sha1
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple
from pydantic import BaseSettings
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger()
DEBUG = False
def has_mul_sub_str(s: str, *args: Any) -> bool:
"""
Check if a string has multiple substrings.
Args:
s: The string to check
*args: The substrings to check for in the string
Returns:
bool: True if all substrings are present in the string, False otherwise
"""
for a in args:
if a not in s:
return False
return True
def debug_output(s: Any) -> None:
"""
Print a debug message if DEBUG is True.
Args:
s: The message to print
"""
if DEBUG:
print(s)
def get_named_result(connection: Any, query: str) -> List[dict[str, Any]]:
"""
Get a named result from a query.
Args:
connection: The connection to the database
query: The query to execute
Returns:
List[dict[str, Any]]: The result of the query
"""
cursor = connection.cursor()
cursor.execute(query)
columns = cursor.description
result = []
for value in cursor.fetchall():
r = {}
for idx, datum in enumerate(value):
k = columns[idx][0] | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/starrocks.html |
926f7a3a3a06-1 | for idx, datum in enumerate(value):
k = columns[idx][0]
r[k] = datum
result.append(r)
debug_output(result)
cursor.close()
return result
class StarRocksSettings(BaseSettings):
"""StarRocks Client Configuration
Attribute:
StarRocks_host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
StarRocks_port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
column_map (Dict) : Column type map to project column name onto langchain
semantics. Must have keys: `text`, `id`, `vector`,
must be same size to number of columns. For example:
.. code-block:: python
{
'id': 'text_id',
'embedding': 'text_embedding',
'document': 'text_plain',
'metadata': 'metadata_dictionary_in_json',
}
Defaults to identity map.
"""
host: str = "localhost"
port: int = 9030
username: str = "root"
password: str = ""
column_map: Dict[str, str] = {
"id": "id",
"document": "document",
"embedding": "embedding",
"metadata": "metadata",
}
database: str = "default"
table: str = "langchain" | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/starrocks.html |
926f7a3a3a06-2 | database: str = "default"
table: str = "langchain"
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Config:
env_file = ".env"
env_prefix = "starrocks_"
env_file_encoding = "utf-8"
[docs]class StarRocks(VectorStore):
"""Wrapper around StarRocks vector database
You need a `pymysql` python package, and a valid account
to connect to StarRocks.
Right now StarRocks has only implemented `cosine_similarity` function to
compute distance between two vectors. And there is no vector inside right now,
so we have to iterate all vectors and compute spatial distance.
For more information, please visit
[StarRocks official site](https://www.starrocks.io/)
[StarRocks github](https://github.com/StarRocks/starrocks)
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[StarRocksSettings] = None,
**kwargs: Any,
) -> None:
"""StarRocks Wrapper to LangChain
embedding_function (Embeddings):
config (StarRocksSettings): Configuration to StarRocks Client
"""
try:
import pymysql # type: ignore[import]
except ImportError:
raise ImportError(
"Could not import pymysql python package. "
"Please install it with `pip install pymysql`."
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
# Just in case if tqdm is not installed
self.pgbar = lambda x, **kwargs: x | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/starrocks.html |
926f7a3a3a06-3 | self.pgbar = lambda x, **kwargs: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = StarRocksSettings()
assert self.config
assert self.config.host and self.config.port
assert self.config.column_map and self.config.database and self.config.table
for k in ["id", "embedding", "document", "metadata"]:
assert k in self.config.column_map
# initialize the schema
dim = len(embedding.embed_query("test"))
self.schema = f"""\
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} string,
{self.config.column_map['document']} string,
{self.config.column_map['embedding']} array<float>,
{self.config.column_map['metadata']} string
) ENGINE = OLAP PRIMARY KEY(id) DISTRIBUTED BY HASH(id) \
PROPERTIES ("replication_num" = "1")\
"""
self.dim = dim
self.BS = "\\"
self.must_escape = ("\\", "'")
self.embedding_function = embedding
self.dist_order = "DESC"
debug_output(self.config)
# Create a connection to StarRocks
self.connection = pymysql.connect(
host=self.config.host,
port=self.config.port,
user=self.config.username,
password=self.config.password,
database=self.config.database,
**kwargs,
)
debug_output(self.schema)
get_named_result(self.connection, self.schema)
[docs] def escape_str(self, value: str) -> str:
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value) | https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/starrocks.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.