id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
673b0af0f98b-0
Source code for langchain.tools.brave_search.tool from __future__ import annotations from typing import Any, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.brave_search import BraveSearchWrapper [docs]class BraveSearch(BaseTool): name = "brave_search" description = ( "a search engine. " "useful for when you need to answer questions about current events." " input should be a search query." ) search_wrapper: BraveSearchWrapper [docs] @classmethod def from_api_key( cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any ) -> BraveSearch: wrapper = BraveSearchWrapper(api_key=api_key, search_kwargs=search_kwargs or {}) return cls(search_wrapper=wrapper, **kwargs) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.search_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("BraveSearch does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/brave_search/tool.html
4009eadee54c-0
Source code for langchain.tools.powerbi.tool """Tools for interacting with a Power BI dataset.""" import logging from time import perf_counter from typing import Any, Dict, Optional, Tuple from pydantic import Field, validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.chains.llm import LLMChain from langchain.tools.base import BaseTool from langchain.tools.powerbi.prompt import ( BAD_REQUEST_RESPONSE, DEFAULT_FEWSHOT_EXAMPLES, QUESTION_TO_QUERY, RETRY_RESPONSE, ) from langchain.utilities.powerbi import PowerBIDataset, json_to_md logger = logging.getLogger(__name__) [docs]class QueryPowerBITool(BaseTool): """Tool for querying a Power BI Dataset.""" name = "query_powerbi" description = """ Input to this tool is a detailed question about the dataset, output is a result from the dataset. It will try to answer the question using the dataset, and if it cannot, it will ask for clarification. Example Input: "How many rows are in table1?" """ # noqa: E501 llm_chain: LLMChain powerbi: PowerBIDataset = Field(exclude=True) template: Optional[str] = QUESTION_TO_QUERY examples: Optional[str] = DEFAULT_FEWSHOT_EXAMPLES session_cache: Dict[str, Any] = Field(default_factory=dict, exclude=True) max_iterations: int = 5 class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @validator("llm_chain") def validate_llm_chain_input_variables( # pylint: disable=E0213
https://api.python.langchain.com/en/stable/_modules/langchain/tools/powerbi/tool.html
4009eadee54c-1
def validate_llm_chain_input_variables( # pylint: disable=E0213 cls, llm_chain: LLMChain ) -> LLMChain: """Make sure the LLM chain has the correct input variables.""" if llm_chain.prompt.input_variables != [ "tool_input", "tables", "schemas", "examples", ]: raise ValueError( "LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s", # noqa: C0301 E501 # pylint: disable=C0301 llm_chain.prompt.input_variables, ) return llm_chain def _check_cache(self, tool_input: str) -> Optional[str]: """Check if the input is present in the cache. If the value is a bad request, overwrite with the escalated version, if not present return None.""" if tool_input not in self.session_cache: return None return self.session_cache[tool_input] def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any, ) -> str: """Execute the query, return the results or an error message.""" if cache := self._check_cache(tool_input): logger.debug("Found cached result for %s: %s", tool_input, cache) return cache try: logger.info("Running PBI Query Tool with input: %s", tool_input) query = self.llm_chain.predict( tool_input=tool_input, tables=self.powerbi.get_table_names(), schemas=self.powerbi.get_schemas(),
https://api.python.langchain.com/en/stable/_modules/langchain/tools/powerbi/tool.html
4009eadee54c-2
tables=self.powerbi.get_table_names(), schemas=self.powerbi.get_schemas(), examples=self.examples, ) except Exception as exc: # pylint: disable=broad-except self.session_cache[tool_input] = f"Error on call to LLM: {exc}" return self.session_cache[tool_input] if query == "I cannot answer this": self.session_cache[tool_input] = query return self.session_cache[tool_input] logger.info("PBI Query: %s", query) start_time = perf_counter() pbi_result = self.powerbi.run(command=query) end_time = perf_counter() logger.debug("PBI Result: %s", pbi_result) logger.debug(f"PBI Query duration: {end_time - start_time:0.6f}") result, error = self._parse_output(pbi_result) if error is not None and "TokenExpired" in error: self.session_cache[ tool_input ] = "Authentication token expired or invalid, please try reauthenticate." return self.session_cache[tool_input] iterations = kwargs.get("iterations", 0) if error and iterations < self.max_iterations: return self._run( tool_input=RETRY_RESPONSE.format( tool_input=tool_input, query=query, error=error ), run_manager=run_manager, iterations=iterations + 1, ) self.session_cache[tool_input] = ( result if result else BAD_REQUEST_RESPONSE.format(error=error) ) return self.session_cache[tool_input] async def _arun( self, tool_input: str,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/powerbi/tool.html
4009eadee54c-3
async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, **kwargs: Any, ) -> str: """Execute the query, return the results or an error message.""" if cache := self._check_cache(tool_input): logger.debug("Found cached result for %s: %s", tool_input, cache) return cache try: logger.info("Running PBI Query Tool with input: %s", tool_input) query = await self.llm_chain.apredict( tool_input=tool_input, tables=self.powerbi.get_table_names(), schemas=self.powerbi.get_schemas(), examples=self.examples, ) except Exception as exc: # pylint: disable=broad-except self.session_cache[tool_input] = f"Error on call to LLM: {exc}" return self.session_cache[tool_input] if query == "I cannot answer this": self.session_cache[tool_input] = query return self.session_cache[tool_input] logger.info("PBI Query: %s", query) start_time = perf_counter() pbi_result = await self.powerbi.arun(command=query) end_time = perf_counter() logger.debug("PBI Result: %s", pbi_result) logger.debug(f"PBI Query duration: {end_time - start_time:0.6f}") result, error = self._parse_output(pbi_result) if error is not None and "TokenExpired" in error: self.session_cache[ tool_input ] = "Authentication token expired or invalid, please try reauthenticate." return self.session_cache[tool_input]
https://api.python.langchain.com/en/stable/_modules/langchain/tools/powerbi/tool.html
4009eadee54c-4
return self.session_cache[tool_input] iterations = kwargs.get("iterations", 0) if error and iterations < self.max_iterations: return await self._arun( tool_input=RETRY_RESPONSE.format( tool_input=tool_input, query=query, error=error ), run_manager=run_manager, iterations=iterations + 1, ) self.session_cache[tool_input] = ( result if result else BAD_REQUEST_RESPONSE.format(error=error) ) return self.session_cache[tool_input] def _parse_output( self, pbi_result: Dict[str, Any] ) -> Tuple[Optional[str], Optional[str]]: """Parse the output of the query to a markdown table.""" if "results" in pbi_result: return json_to_md(pbi_result["results"][0]["tables"][0]["rows"]), None if "error" in pbi_result: if ( "pbi.error" in pbi_result["error"] and "details" in pbi_result["error"]["pbi.error"] ): return None, pbi_result["error"]["pbi.error"]["details"][0]["detail"] return None, pbi_result["error"] return None, "Unknown error" [docs]class InfoPowerBITool(BaseTool): """Tool for getting metadata about a PowerBI Dataset.""" name = "schema_powerbi" description = """ Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Be sure that the tables actually exist by calling list_tables_powerbi first! Example Input: "table1, table2, table3"
https://api.python.langchain.com/en/stable/_modules/langchain/tools/powerbi/tool.html
4009eadee54c-5
Example Input: "table1, table2, table3" """ # noqa: E501 powerbi: PowerBIDataset = Field(exclude=True) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Get the schema for tables in a comma-separated list.""" return self.powerbi.get_table_info(tool_input.split(", ")) async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return await self.powerbi.aget_table_info(tool_input.split(", ")) [docs]class ListPowerBITool(BaseTool): """Tool for getting tables names.""" name = "list_tables_powerbi" description = "Input is an empty string, output is a comma separated list of tables in the database." # noqa: E501 # pylint: disable=C0301 powerbi: PowerBIDataset = Field(exclude=True) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _run( self, tool_input: Optional[str] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Get the names of the tables.""" return ", ".join(self.powerbi.get_table_names()) async def _arun( self, tool_input: Optional[str] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/powerbi/tool.html
4009eadee54c-6
run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Get the names of the tables.""" return ", ".join(self.powerbi.get_table_names())
https://api.python.langchain.com/en/stable/_modules/langchain/tools/powerbi/tool.html
73a23553967d-0
Source code for langchain.tools.spark_sql.tool # flake8: noqa """Tools for interacting with Spark SQL.""" from typing import Any, Dict, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.chains.llm import LLMChain from langchain.prompts import PromptTemplate from langchain.utilities.spark_sql import SparkSQL from langchain.tools.base import BaseTool from langchain.tools.spark_sql.prompt import QUERY_CHECKER [docs]class BaseSparkSQLTool(BaseModel): """Base tool for interacting with Spark SQL.""" db: SparkSQL = Field(exclude=True) # Override BaseTool.Config to appease mypy # See https://github.com/pydantic/pydantic/issues/4173 class Config(BaseTool.Config): """Configuration for this pydantic object.""" arbitrary_types_allowed = True extra = Extra.forbid [docs]class QuerySparkSQLTool(BaseSparkSQLTool, BaseTool): """Tool for querying a Spark SQL.""" name = "query_sql_db" description = """ Input to this tool is a detailed and correct SQL query, output is a result from the Spark SQL. If the query is not correct, an error message will be returned. If an error is returned, rewrite the query, check the query, and try again. """ def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Execute the query, return the results or an error message.""" return self.db.run_no_throw(query)
https://api.python.langchain.com/en/stable/_modules/langchain/tools/spark_sql/tool.html
73a23553967d-1
return self.db.run_no_throw(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError("QuerySqlDbTool does not support async") [docs]class InfoSparkSQLTool(BaseSparkSQLTool, BaseTool): """Tool for getting metadata about a Spark SQL.""" name = "schema_sql_db" description = """ Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Be sure that the tables actually exist by calling list_tables_sql_db first! Example Input: "table1, table2, table3" """ def _run( self, table_names: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Get the schema for tables in a comma-separated list.""" return self.db.get_table_info_no_throw(table_names.split(", ")) async def _arun( self, table_name: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError("SchemaSqlDbTool does not support async") [docs]class ListSparkSQLTool(BaseSparkSQLTool, BaseTool): """Tool for getting tables names.""" name = "list_tables_sql_db" description = "Input is an empty string, output is a comma separated list of tables in the Spark SQL." def _run( self, tool_input: str = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str:
https://api.python.langchain.com/en/stable/_modules/langchain/tools/spark_sql/tool.html
73a23553967d-2
) -> str: """Get the schema for a specific table.""" return ", ".join(self.db.get_usable_table_names()) async def _arun( self, tool_input: str = "", run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError("ListTablesSqlDbTool does not support async") [docs]class QueryCheckerTool(BaseSparkSQLTool, BaseTool): """Use an LLM to check if a query is correct. Adapted from https://www.patterns.app/blog/2023/01/18/crunchbot-sql-analyst-gpt/""" template: str = QUERY_CHECKER llm: BaseLanguageModel llm_chain: LLMChain = Field(init=False) name = "query_checker_sql_db" description = """ Use this tool to double check if your query is correct before executing it. Always use this tool before executing a query with query_sql_db! """ @root_validator(pre=True) def initialize_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "llm_chain" not in values: values["llm_chain"] = LLMChain( llm=values.get("llm"), prompt=PromptTemplate( template=QUERY_CHECKER, input_variables=["query"] ), ) if values["llm_chain"].prompt.input_variables != ["query"]: raise ValueError( "LLM chain for QueryCheckerTool need to use ['query'] as input_variables " "for the embedded prompt" ) return values def _run( self, query: str,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/spark_sql/tool.html
73a23553967d-3
return values def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the LLM to check the query.""" return self.llm_chain.predict(query=query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return await self.llm_chain.apredict(query=query)
https://api.python.langchain.com/en/stable/_modules/langchain/tools/spark_sql/tool.html
668b5f6ae291-0
Source code for langchain.tools.google_places.tool """Tool for the Google search API.""" from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.google_places_api import GooglePlacesAPIWrapper class GooglePlacesSchema(BaseModel): query: str = Field(..., description="Query for google maps") [docs]class GooglePlacesTool(BaseTool): """Tool that adds the capability to query the Google places API.""" name = "google_places" description = ( "A wrapper around Google Places. " "Useful for when you need to validate or " "discover addressed from ambiguous text. " "Input should be a search query." ) api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper) args_schema: Type[BaseModel] = GooglePlacesSchema def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GooglePlacesRun does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/google_places/tool.html
cc98a75937bb-0
Source code for langchain.tools.azure_cognitive_services.image_analysis from __future__ import annotations import logging from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import detect_file_src_type from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsImageAnalysisTool(BaseTool): """Tool that queries the Azure Cognitive Services Image Analysis API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40 """ azure_cogs_key: str = "" #: :meta private: azure_cogs_endpoint: str = "" #: :meta private: vision_service: Any #: :meta private: analysis_options: Any #: :meta private: name = "azure_cognitive_services_image_analysis" description = ( "A wrapper around Azure Cognitive Services Image Analysis. " "Useful for when you need to analyze images. " "Input should be a url to an image." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_endpoint = get_from_dict_or_env( values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" )
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
cc98a75937bb-1
) try: import azure.ai.vision as sdk values["vision_service"] = sdk.VisionServiceOptions( endpoint=azure_cogs_endpoint, key=azure_cogs_key ) values["analysis_options"] = sdk.ImageAnalysisOptions() values["analysis_options"].features = ( sdk.ImageAnalysisFeature.CAPTION | sdk.ImageAnalysisFeature.OBJECTS | sdk.ImageAnalysisFeature.TAGS | sdk.ImageAnalysisFeature.TEXT ) except ImportError: raise ImportError( "azure-ai-vision is not installed. " "Run `pip install azure-ai-vision` to install." ) return values def _image_analysis(self, image_path: str) -> Dict: try: import azure.ai.vision as sdk except ImportError: pass image_src_type = detect_file_src_type(image_path) if image_src_type == "local": vision_source = sdk.VisionSource(filename=image_path) elif image_src_type == "remote": vision_source = sdk.VisionSource(url=image_path) else: raise ValueError(f"Invalid image path: {image_path}") image_analyzer = sdk.ImageAnalyzer( self.vision_service, vision_source, self.analysis_options ) result = image_analyzer.analyze() res_dict = {} if result.reason == sdk.ImageAnalysisResultReason.ANALYZED: if result.caption is not None: res_dict["caption"] = result.caption.content if result.objects is not None: res_dict["objects"] = [obj.name for obj in result.objects] if result.tags is not None: res_dict["tags"] = [tag.name for tag in result.tags]
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
cc98a75937bb-2
res_dict["tags"] = [tag.name for tag in result.tags] if result.text is not None: res_dict["text"] = [line.content for line in result.text.lines] else: error_details = sdk.ImageAnalysisErrorDetails.from_result(result) raise RuntimeError( f"Image analysis failed.\n" f"Reason: {error_details.reason}\n" f"Details: {error_details.message}" ) return res_dict def _format_image_analysis_result(self, image_analysis_result: Dict) -> str: formatted_result = [] if "caption" in image_analysis_result: formatted_result.append("Caption: " + image_analysis_result["caption"]) if ( "objects" in image_analysis_result and len(image_analysis_result["objects"]) > 0 ): formatted_result.append( "Objects: " + ", ".join(image_analysis_result["objects"]) ) if "tags" in image_analysis_result and len(image_analysis_result["tags"]) > 0: formatted_result.append("Tags: " + ", ".join(image_analysis_result["tags"])) if "text" in image_analysis_result and len(image_analysis_result["text"]) > 0: formatted_result.append("Text: " + ", ".join(image_analysis_result["text"])) return "\n".join(formatted_result) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: image_analysis_result = self._image_analysis(query) if not image_analysis_result: return "No good image analysis result was found"
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
cc98a75937bb-3
if not image_analysis_result: return "No good image analysis result was found" return self._format_image_analysis_result(image_analysis_result) except Exception as e: raise RuntimeError(f"Error while running AzureCogsImageAnalysisTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsImageAnalysisTool does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
c0557ad0a7d6-0
Source code for langchain.tools.azure_cognitive_services.text2speech from __future__ import annotations import logging import tempfile from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsText2SpeechTool(BaseTool): """Tool that queries the Azure Cognitive Services Text2Speech API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-text-to-speech?pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_region: str = "" #: :meta private: speech_language: str = "en-US" #: :meta private: speech_config: Any #: :meta private: name = "azure_cognitive_services_text2speech" description = ( "A wrapper around Azure Cognitive Services Text2Speech. " "Useful for when you need to convert text to speech. " ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_region = get_from_dict_or_env( values, "azure_cogs_region", "AZURE_COGS_REGION" ) try:
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/text2speech.html
c0557ad0a7d6-1
) try: import azure.cognitiveservices.speech as speechsdk values["speech_config"] = speechsdk.SpeechConfig( subscription=azure_cogs_key, region=azure_cogs_region ) except ImportError: raise ImportError( "azure-cognitiveservices-speech is not installed. " "Run `pip install azure-cognitiveservices-speech` to install." ) return values def _text2speech(self, text: str, speech_language: str) -> str: try: import azure.cognitiveservices.speech as speechsdk except ImportError: pass self.speech_config.speech_synthesis_language = speech_language speech_synthesizer = speechsdk.SpeechSynthesizer( speech_config=self.speech_config, audio_config=None ) result = speech_synthesizer.speak_text(text) if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted: stream = speechsdk.AudioDataStream(result) with tempfile.NamedTemporaryFile( mode="wb", suffix=".wav", delete=False ) as f: stream.save_to_wav_file(f.name) return f.name elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logger.debug(f"Speech synthesis canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: raise RuntimeError( f"Speech synthesis error: {cancellation_details.error_details}" ) return "Speech synthesis canceled." else: return f"Speech synthesis failed: {result.reason}" def _run( self, query: str,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/text2speech.html
c0557ad0a7d6-2
def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: speech_file = self._text2speech(query, self.speech_language) return speech_file except Exception as e: raise RuntimeError(f"Error while running AzureCogsText2SpeechTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsText2SpeechTool does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/text2speech.html
4b65dd6c7e83-0
Source code for langchain.tools.azure_cognitive_services.form_recognizer from __future__ import annotations import logging from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import detect_file_src_type from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsFormRecognizerTool(BaseTool): """Tool that queries the Azure Cognitive Services Form Recognizer API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_endpoint: str = "" #: :meta private: doc_analysis_client: Any #: :meta private: name = "azure_cognitive_services_form_recognizer" description = ( "A wrapper around Azure Cognitive Services Form Recognizer. " "Useful for when you need to " "extract text, tables, and key-value pairs from documents. " "Input should be a url to a document." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" )
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
4b65dd6c7e83-1
) azure_cogs_endpoint = get_from_dict_or_env( values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" ) try: from azure.ai.formrecognizer import DocumentAnalysisClient from azure.core.credentials import AzureKeyCredential values["doc_analysis_client"] = DocumentAnalysisClient( endpoint=azure_cogs_endpoint, credential=AzureKeyCredential(azure_cogs_key), ) except ImportError: raise ImportError( "azure-ai-formrecognizer is not installed. " "Run `pip install azure-ai-formrecognizer` to install." ) return values def _parse_tables(self, tables: List[Any]) -> List[Any]: result = [] for table in tables: rc, cc = table.row_count, table.column_count _table = [["" for _ in range(cc)] for _ in range(rc)] for cell in table.cells: _table[cell.row_index][cell.column_index] = cell.content result.append(_table) return result def _parse_kv_pairs(self, kv_pairs: List[Any]) -> List[Any]: result = [] for kv_pair in kv_pairs: key = kv_pair.key.content if kv_pair.key else "" value = kv_pair.value.content if kv_pair.value else "" result.append((key, value)) return result def _document_analysis(self, document_path: str) -> Dict: document_src_type = detect_file_src_type(document_path) if document_src_type == "local": with open(document_path, "rb") as document: poller = self.doc_analysis_client.begin_analyze_document( "prebuilt-document", document )
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
4b65dd6c7e83-2
"prebuilt-document", document ) elif document_src_type == "remote": poller = self.doc_analysis_client.begin_analyze_document_from_url( "prebuilt-document", document_path ) else: raise ValueError(f"Invalid document path: {document_path}") result = poller.result() res_dict = {} if result.content is not None: res_dict["content"] = result.content if result.tables is not None: res_dict["tables"] = self._parse_tables(result.tables) if result.key_value_pairs is not None: res_dict["key_value_pairs"] = self._parse_kv_pairs(result.key_value_pairs) return res_dict def _format_document_analysis_result(self, document_analysis_result: Dict) -> str: formatted_result = [] if "content" in document_analysis_result: formatted_result.append( f"Content: {document_analysis_result['content']}".replace("\n", " ") ) if "tables" in document_analysis_result: for i, table in enumerate(document_analysis_result["tables"]): formatted_result.append(f"Table {i}: {table}".replace("\n", " ")) if "key_value_pairs" in document_analysis_result: for kv_pair in document_analysis_result["key_value_pairs"]: formatted_result.append( f"{kv_pair[0]}: {kv_pair[1]}".replace("\n", " ") ) return "\n".join(formatted_result) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try:
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
4b65dd6c7e83-3
) -> str: """Use the tool.""" try: document_analysis_result = self._document_analysis(query) if not document_analysis_result: return "No good document analysis result was found" return self._format_document_analysis_result(document_analysis_result) except Exception as e: raise RuntimeError(f"Error while running AzureCogsFormRecognizerTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsFormRecognizerTool does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
aa2d04290bfa-0
Source code for langchain.tools.azure_cognitive_services.speech2text from __future__ import annotations import logging import time from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import ( detect_file_src_type, download_audio_from_url, ) from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsSpeech2TextTool(BaseTool): """Tool that queries the Azure Cognitive Services Speech2Text API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_region: str = "" #: :meta private: speech_language: str = "en-US" #: :meta private: speech_config: Any #: :meta private: name = "azure_cognitive_services_speech2text" description = ( "A wrapper around Azure Cognitive Services Speech2Text. " "Useful for when you need to transcribe audio to text. " "Input should be a url to an audio file." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" )
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/speech2text.html
aa2d04290bfa-1
) azure_cogs_region = get_from_dict_or_env( values, "azure_cogs_region", "AZURE_COGS_REGION" ) try: import azure.cognitiveservices.speech as speechsdk values["speech_config"] = speechsdk.SpeechConfig( subscription=azure_cogs_key, region=azure_cogs_region ) except ImportError: raise ImportError( "azure-cognitiveservices-speech is not installed. " "Run `pip install azure-cognitiveservices-speech` to install." ) return values def _continuous_recognize(self, speech_recognizer: Any) -> str: done = False text = "" def stop_cb(evt: Any) -> None: """callback that stop continuous recognition""" speech_recognizer.stop_continuous_recognition_async() nonlocal done done = True def retrieve_cb(evt: Any) -> None: """callback that retrieves the intermediate recognition results""" nonlocal text text += evt.result.text # retrieve text on recognized events speech_recognizer.recognized.connect(retrieve_cb) # stop continuous recognition on either session stopped or canceled events speech_recognizer.session_stopped.connect(stop_cb) speech_recognizer.canceled.connect(stop_cb) # Start continuous speech recognition speech_recognizer.start_continuous_recognition_async() while not done: time.sleep(0.5) return text def _speech2text(self, audio_path: str, speech_language: str) -> str: try: import azure.cognitiveservices.speech as speechsdk except ImportError: pass
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/speech2text.html
aa2d04290bfa-2
except ImportError: pass audio_src_type = detect_file_src_type(audio_path) if audio_src_type == "local": audio_config = speechsdk.AudioConfig(filename=audio_path) elif audio_src_type == "remote": tmp_audio_path = download_audio_from_url(audio_path) audio_config = speechsdk.AudioConfig(filename=tmp_audio_path) else: raise ValueError(f"Invalid audio path: {audio_path}") self.speech_config.speech_recognition_language = speech_language speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config) return self._continuous_recognize(speech_recognizer) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: text = self._speech2text(query, self.speech_language) return text except Exception as e: raise RuntimeError(f"Error while running AzureCogsSpeech2TextTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsSpeech2TextTool does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/azure_cognitive_services/speech2text.html
466acbf49ca7-0
Source code for langchain.tools.sleep.tool """Tool for agent to sleep.""" from asyncio import sleep as asleep from time import sleep from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool class SleepInput(BaseModel): """Input for CopyFileTool.""" sleep_time: int = Field(..., description="Time to sleep in seconds") [docs]class SleepTool(BaseTool): """Tool that adds the capability to sleep.""" name = "sleep" args_schema: Type[BaseModel] = SleepInput description = "Make agent sleep for a specified number of seconds." def _run( self, sleep_time: int, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Sleep tool.""" sleep(sleep_time) return f"Agent slept for {sleep_time} seconds." async def _arun( self, sleep_time: int, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the sleep tool asynchronously.""" await asleep(sleep_time) return f"Agent slept for {sleep_time} seconds."
https://api.python.langchain.com/en/stable/_modules/langchain/tools/sleep/tool.html
93221d9a6d04-0
Source code for langchain.tools.jira.tool """ This tool allows agents to interact with the atlassian-python-api library and operate on a Jira instance. For more information on the atlassian-python-api library, see https://atlassian-python-api.readthedocs.io/jira.html To use this tool, you must first set as environment variables: JIRA_API_TOKEN JIRA_USERNAME JIRA_INSTANCE_URL Below is a sample script that uses the Jira tool: ```python from langchain.agents import AgentType from langchain.agents import initialize_agent from langchain.agents.agent_toolkits.jira.toolkit import JiraToolkit from langchain.llms import OpenAI from langchain.utilities.jira import JiraAPIWrapper llm = OpenAI(temperature=0) jira = JiraAPIWrapper() toolkit = JiraToolkit.from_jira_api_wrapper(jira) agent = initialize_agent( toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) ``` """ from typing import Optional from pydantic import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.jira import JiraAPIWrapper [docs]class JiraAction(BaseTool): api_wrapper: JiraAPIWrapper = Field(default_factory=JiraAPIWrapper) mode: str name = "" description = "" def _run( self, instructions: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Atlassian Jira API to run an operation."""
https://api.python.langchain.com/en/stable/_modules/langchain/tools/jira/tool.html
93221d9a6d04-1
"""Use the Atlassian Jira API to run an operation.""" return self.api_wrapper.run(self.mode, instructions) async def _arun( self, _: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Atlassian Jira API to run an operation.""" raise NotImplementedError("JiraAction does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/jira/tool.html
ce08c637bfe4-0
Source code for langchain.tools.youtube.search """ Adapted from https://github.com/venuv/langchain_yt_tools CustomYTSearchTool searches YouTube videos related to a person and returns a specified number of video URLs. Input to this tool should be a comma separated list, - the first part contains a person name - and the second(optional) a number that is the maximum number of video results to return """ import json from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools import BaseTool [docs]class YouTubeSearchTool(BaseTool): name = "youtube_search" description = ( "search for youtube videos associated with a person. " "the input to this tool should be a comma separated list, " "the first part contains a person name and the second a " "number that is the maximum number of video results " "to return aka num_results. the second part is optional" ) def _search(self, person: str, num_results: int) -> str: from youtube_search import YoutubeSearch results = YoutubeSearch(person, num_results).to_json() data = json.loads(results) url_suffix_list = [video["url_suffix"] for video in data["videos"]] return str(url_suffix_list) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" values = query.split(",") person = values[0] if len(values) > 1: num_results = int(values[1]) else:
https://api.python.langchain.com/en/stable/_modules/langchain/tools/youtube/search.html
ce08c637bfe4-1
num_results = int(values[1]) else: num_results = 2 return self._search(person, num_results) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("YouTubeSearchTool does not yet support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/youtube/search.html
2e530b41d256-0
Source code for langchain.tools.vectorstore.tool """Tools for interacting with vectorstores.""" import json from typing import Any, Dict, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.chains import RetrievalQA, RetrievalQAWithSourcesChain from langchain.llms.openai import OpenAI from langchain.tools.base import BaseTool from langchain.vectorstores.base import VectorStore class BaseVectorStoreTool(BaseModel): """Base class for tools that use a VectorStore.""" vectorstore: VectorStore = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) class Config(BaseTool.Config): """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _create_description_from_template(values: Dict[str, Any]) -> Dict[str, Any]: values["description"] = values["template"].format(name=values["name"]) return values [docs]class VectorStoreQATool(BaseVectorStoreTool, BaseTool): """Tool for the VectorDBQA chain. To be initialized with name and chain.""" [docs] @staticmethod def get_description(name: str, description: str) -> str: template: str = ( "Useful for when you need to answer questions about {name}. " "Whenever you need information about {description} " "you should ALWAYS use this. " "Input should be a fully formed question." ) return template.format(name=name, description=description) def _run( self, query: str,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/vectorstore/tool.html
2e530b41d256-1
def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" chain = RetrievalQA.from_chain_type( self.llm, retriever=self.vectorstore.as_retriever() ) return chain.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("VectorStoreQATool does not support async") [docs]class VectorStoreQAWithSourcesTool(BaseVectorStoreTool, BaseTool): """Tool for the VectorDBQAWithSources chain.""" [docs] @staticmethod def get_description(name: str, description: str) -> str: template: str = ( "Useful for when you need to answer questions about {name} and the sources " "used to construct the answer. " "Whenever you need information about {description} " "you should ALWAYS use this. " " Input should be a fully formed question. " "Output is a json serialized dictionary with keys `answer` and `sources`. " "Only use this tool if the user explicitly asks for sources." ) return template.format(name=name, description=description) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" chain = RetrievalQAWithSourcesChain.from_chain_type( self.llm, retriever=self.vectorstore.as_retriever() )
https://api.python.langchain.com/en/stable/_modules/langchain/tools/vectorstore/tool.html
2e530b41d256-2
self.llm, retriever=self.vectorstore.as_retriever() ) return json.dumps(chain({chain.question_key: query}, return_only_outputs=True)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("VectorStoreQAWithSourcesTool does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/vectorstore/tool.html
d091c4059e27-0
Source code for langchain.tools.google_search.tool """Tool for the Google search API.""" from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.google_search import GoogleSearchAPIWrapper [docs]class GoogleSearchRun(BaseTool): """Tool that adds the capability to query the Google search API.""" name = "google_search" description = ( "A wrapper around Google Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query." ) api_wrapper: GoogleSearchAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GoogleSearchRun does not support async") [docs]class GoogleSearchResults(BaseTool): """Tool that has capability to query the Google Search API and get back json.""" name = "Google Search Results JSON" description = ( "A wrapper around Google Search. " "Useful for when you need to answer questions about current events. " "Input should be a search query. Output is a JSON array of the query results" ) num_results: int = 4 api_wrapper: GoogleSearchAPIWrapper def _run( self,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/google_search/tool.html
d091c4059e27-1
api_wrapper: GoogleSearchAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.results(query, self.num_results)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GoogleSearchRun does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/google_search/tool.html
804813befe0b-0
Source code for langchain.tools.wolfram_alpha.tool """Tool for the Wolfram Alpha API.""" from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper [docs]class WolframAlphaQueryRun(BaseTool): """Tool that adds the capability to query using the Wolfram Alpha SDK.""" name = "wolfram_alpha" description = ( "A wrapper around Wolfram Alpha. " "Useful for when you need to answer questions about Math, " "Science, Technology, Culture, Society and Everyday Life. " "Input should be a search query." ) api_wrapper: WolframAlphaAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the WolframAlpha tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the WolframAlpha tool asynchronously.""" raise NotImplementedError("WolframAlphaQueryRun does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/wolfram_alpha/tool.html
7a2747ca06cb-0
Source code for langchain.tools.human.tool """Tool for asking human input.""" from typing import Callable, Optional from pydantic import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool def _print_func(text: str) -> None: print("\n") print(text) [docs]class HumanInputRun(BaseTool): """Tool that adds the capability to ask user for input.""" name = "human" description = ( "You can ask a human for guidance when you think you " "got stuck or you are not sure what to do next. " "The input should be a question for the human." ) prompt_func: Callable[[str], None] = Field(default_factory=lambda: _print_func) input_func: Callable = Field(default_factory=lambda: input) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Human input tool.""" self.prompt_func(query) return self.input_func() async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Human tool asynchronously.""" raise NotImplementedError("Human tool does not support async")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/human/tool.html
3fe715576f65-0
Source code for langchain.tools.shell.tool import asyncio import platform import warnings from typing import List, Optional, Type, Union from pydantic import BaseModel, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.bash import BashProcess class ShellInput(BaseModel): """Commands for the Bash Shell tool.""" commands: Union[str, List[str]] = Field( ..., description="List of shell commands to run. Deserialized using json.loads", ) """List of shell commands to run.""" @root_validator def _validate_commands(cls, values: dict) -> dict: """Validate commands.""" # TODO: Add real validators commands = values.get("commands") if not isinstance(commands, list): values["commands"] = [commands] # Warn that the bash tool is not safe warnings.warn( "The shell tool has no safeguards by default. Use at your own risk." ) return values def _get_default_bash_processs() -> BashProcess: """Get file path from string.""" return BashProcess(return_err_output=True) def _get_platform() -> str: """Get platform.""" system = platform.system() if system == "Darwin": return "MacOS" return system [docs]class ShellTool(BaseTool): """Tool to run shell commands.""" process: BashProcess = Field(default_factory=_get_default_bash_processs) """Bash process to run commands.""" name: str = "terminal" """Name of tool."""
https://api.python.langchain.com/en/stable/_modules/langchain/tools/shell/tool.html
3fe715576f65-1
name: str = "terminal" """Name of tool.""" description: str = f"Run shell commands on this {_get_platform()} machine." """Description of tool.""" args_schema: Type[BaseModel] = ShellInput """Schema for input arguments.""" def _run( self, commands: Union[str, List[str]], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Run commands and return final output.""" return self.process.run(commands) async def _arun( self, commands: Union[str, List[str]], run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Run commands asynchronously and return final output.""" return await asyncio.get_event_loop().run_in_executor( None, self.process.run, commands )
https://api.python.langchain.com/en/stable/_modules/langchain/tools/shell/tool.html
d45b403c20c7-0
Source code for langchain.tools.gmail.send_message """Send Gmail messages.""" import base64 from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from typing import Any, Dict, List, Optional, Union from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.gmail.base import GmailBaseTool class SendMessageSchema(BaseModel): message: str = Field( ..., description="The message to send.", ) to: Union[str, List[str]] = Field( ..., description="The list of recipients.", ) subject: str = Field( ..., description="The subject of the message.", ) cc: Optional[Union[str, List[str]]] = Field( None, description="The list of CC recipients.", ) bcc: Optional[Union[str, List[str]]] = Field( None, description="The list of BCC recipients.", ) [docs]class GmailSendMessage(GmailBaseTool): name: str = "send_gmail_message" description: str = ( "Use this tool to send email messages." " The input is the message, recipents" ) def _prepare_message( self, message: str, to: Union[str, List[str]], subject: str, cc: Optional[Union[str, List[str]]] = None, bcc: Optional[Union[str, List[str]]] = None, ) -> Dict[str, Any]: """Create a message for an email.""" mime_message = MIMEMultipart()
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/send_message.html
d45b403c20c7-1
"""Create a message for an email.""" mime_message = MIMEMultipart() mime_message.attach(MIMEText(message, "html")) mime_message["To"] = ", ".join(to if isinstance(to, list) else [to]) mime_message["Subject"] = subject if cc is not None: mime_message["Cc"] = ", ".join(cc if isinstance(cc, list) else [cc]) if bcc is not None: mime_message["Bcc"] = ", ".join(bcc if isinstance(bcc, list) else [bcc]) encoded_message = base64.urlsafe_b64encode(mime_message.as_bytes()).decode() return {"raw": encoded_message} def _run( self, message: str, to: Union[str, List[str]], subject: str, cc: Optional[Union[str, List[str]]] = None, bcc: Optional[Union[str, List[str]]] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Run the tool.""" try: create_message = self._prepare_message(message, to, subject, cc=cc, bcc=bcc) send_message = ( self.api_resource.users() .messages() .send(userId="me", body=create_message) ) sent_message = send_message.execute() return f'Message sent. Message Id: {sent_message["id"]}' except Exception as error: raise Exception(f"An error occurred: {error}") async def _arun( self, message: str, to: Union[str, List[str]], subject: str,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/send_message.html
d45b403c20c7-2
to: Union[str, List[str]], subject: str, cc: Optional[Union[str, List[str]]] = None, bcc: Optional[Union[str, List[str]]] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Run the tool asynchronously.""" raise NotImplementedError(f"The tool {self.name} does not support async yet.")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/send_message.html
6e57b486130b-0
Source code for langchain.tools.gmail.get_message import base64 import email from typing import Dict, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.gmail.base import GmailBaseTool from langchain.tools.gmail.utils import clean_email_body class SearchArgsSchema(BaseModel): message_id: str = Field( ..., description="The unique ID of the email message, retrieved from a search.", ) [docs]class GmailGetMessage(GmailBaseTool): name: str = "get_gmail_message" description: str = ( "Use this tool to fetch an email by message ID." " Returns the thread ID, snipet, body, subject, and sender." ) args_schema: Type[SearchArgsSchema] = SearchArgsSchema def _run( self, message_id: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> Dict: """Run the tool.""" query = ( self.api_resource.users() .messages() .get(userId="me", format="raw", id=message_id) ) message_data = query.execute() raw_message = base64.urlsafe_b64decode(message_data["raw"]) email_msg = email.message_from_bytes(raw_message) subject = email_msg["Subject"] sender = email_msg["From"] message_body = email_msg.get_payload() body = clean_email_body(message_body) return { "id": message_id, "threadId": message_data["threadId"], "snippet": message_data["snippet"],
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/get_message.html
6e57b486130b-1
"snippet": message_data["snippet"], "body": body, "subject": subject, "sender": sender, } async def _arun( self, message_id: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> Dict: """Run the tool.""" raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/get_message.html
73ebb8fc7245-0
Source code for langchain.tools.gmail.search import base64 import email from enum import Enum from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.gmail.base import GmailBaseTool from langchain.tools.gmail.utils import clean_email_body class Resource(str, Enum): """Enumerator of Resources to search.""" THREADS = "threads" MESSAGES = "messages" class SearchArgsSchema(BaseModel): # From https://support.google.com/mail/answer/7190?hl=en query: str = Field( ..., description="The Gmail query. Example filters include from:sender," " to:recipient, subject:subject, -filtered_term," " in:folder, is:important|read|starred, after:year/mo/date, " "before:year/mo/date, label:label_name" ' "exact phrase".' " Search newer/older than using d (day), m (month), and y (year): " "newer_than:2d, older_than:1y." " Attachments with extension example: filename:pdf. Multiple term" " matching example: from:amy OR from:david.", ) resource: Resource = Field( default=Resource.MESSAGES, description="Whether to search for threads or messages.", ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) [docs]class GmailSearch(GmailBaseTool): name: str = "search_gmail" description: str = (
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/search.html
73ebb8fc7245-1
name: str = "search_gmail" description: str = ( "Use this tool to search for email messages or threads." " The input must be a valid Gmail query." " The output is a JSON list of the requested resource." ) args_schema: Type[SearchArgsSchema] = SearchArgsSchema def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Add the thread message snippets to the thread results results = [] for thread in threads: thread_id = thread["id"] thread_data = ( self.api_resource.users() .threads() .get(userId="me", id=thread_id) .execute() ) messages = thread_data["messages"] thread["messages"] = [] for message in messages: snippet = message["snippet"] thread["messages"].append({"snippet": snippet, "id": message["id"]}) results.append(thread) return results def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: results = [] for message in messages: message_id = message["id"] message_data = ( self.api_resource.users() .messages() .get(userId="me", format="raw", id=message_id) .execute() ) raw_message = base64.urlsafe_b64decode(message_data["raw"]) email_msg = email.message_from_bytes(raw_message) subject = email_msg["Subject"] sender = email_msg["From"] message_body = email_msg.get_payload() body = clean_email_body(message_body) results.append( {
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/search.html
73ebb8fc7245-2
body = clean_email_body(message_body) results.append( { "id": message["id"], "threadId": message_data["threadId"], "snippet": message_data["snippet"], "body": body, "subject": subject, "sender": sender, } ) return results def _run( self, query: str, resource: Resource = Resource.MESSAGES, max_results: int = 10, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" results = ( self.api_resource.users() .messages() .list(userId="me", q=query, maxResults=max_results) .execute() .get(resource.value, []) ) if resource == Resource.THREADS: return self._parse_threads(results) elif resource == Resource.MESSAGES: return self._parse_messages(results) else: raise NotImplementedError(f"Resource of type {resource} not implemented.") async def _arun( self, query: str, resource: Resource = Resource.MESSAGES, max_results: int = 10, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/search.html
bbb3fe78de05-0
Source code for langchain.tools.gmail.create_draft import base64 from email.message import EmailMessage from typing import List, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.gmail.base import GmailBaseTool class CreateDraftSchema(BaseModel): message: str = Field( ..., description="The message to include in the draft.", ) to: List[str] = Field( ..., description="The list of recipients.", ) subject: str = Field( ..., description="The subject of the message.", ) cc: Optional[List[str]] = Field( None, description="The list of CC recipients.", ) bcc: Optional[List[str]] = Field( None, description="The list of BCC recipients.", ) [docs]class GmailCreateDraft(GmailBaseTool): name: str = "create_gmail_draft" description: str = ( "Use this tool to create a draft email with the provided message fields." ) args_schema: Type[CreateDraftSchema] = CreateDraftSchema def _prepare_draft_message( self, message: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, ) -> dict: draft_message = EmailMessage() draft_message.set_content(message) draft_message["To"] = ", ".join(to) draft_message["Subject"] = subject if cc is not None:
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/create_draft.html
bbb3fe78de05-1
draft_message["Subject"] = subject if cc is not None: draft_message["Cc"] = ", ".join(cc) if bcc is not None: draft_message["Bcc"] = ", ".join(bcc) encoded_message = base64.urlsafe_b64encode(draft_message.as_bytes()).decode() return {"message": {"raw": encoded_message}} def _run( self, message: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: create_message = self._prepare_draft_message(message, to, subject, cc, bcc) draft = ( self.api_resource.users() .drafts() .create(userId="me", body=create_message) .execute() ) output = f'Draft created. Draft Id: {draft["id"]}' return output except Exception as e: raise Exception(f"An error occurred: {e}") async def _arun( self, message: str, to: List[str], subject: str, cc: Optional[List[str]] = None, bcc: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError(f"The tool {self.name} does not support async yet.")
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/create_draft.html
f1df414ffb5d-0
Source code for langchain.tools.gmail.get_thread from typing import Dict, Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.gmail.base import GmailBaseTool class GetThreadSchema(BaseModel): # From https://support.google.com/mail/answer/7190?hl=en thread_id: str = Field( ..., description="The thread ID.", ) [docs]class GmailGetThread(GmailBaseTool): name: str = "get_gmail_thread" description: str = ( "Use this tool to search for email messages." " The input must be a valid Gmail query." " The output is a JSON list of messages." ) args_schema: Type[GetThreadSchema] = GetThreadSchema def _run( self, thread_id: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> Dict: """Run the tool.""" query = self.api_resource.users().threads().get(userId="me", id=thread_id) thread_data = query.execute() if not isinstance(thread_data, dict): raise ValueError("The output of the query must be a list.") messages = thread_data["messages"] thread_data["messages"] = [] keys_to_keep = ["id", "snippet", "snippet"] # TODO: Parse body. for message in messages: thread_data["messages"].append( {k: message[k] for k in keys_to_keep if k in message} ) return thread_data async def _arun( self,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/get_thread.html
f1df414ffb5d-1
) return thread_data async def _arun( self, thread_id: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> Dict: """Run the tool.""" raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/gmail/get_thread.html
d7149ef3b4e0-0
Source code for langchain.tools.file_management.move import shutil from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileMoveInput(BaseModel): """Input for MoveFileTool.""" source_path: str = Field(..., description="Path of the file to move") destination_path: str = Field(..., description="New path for the moved file") [docs]class MoveFileTool(BaseFileToolMixin, BaseTool): name: str = "move_file" args_schema: Type[BaseModel] = FileMoveInput description: str = "Move or rename a file from one location to another" def _run( self, source_path: str, destination_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: source_path_ = self.get_relative_path(source_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="source_path", value=source_path ) try: destination_path_ = self.get_relative_path(destination_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="destination_path_", value=destination_path_ ) if not source_path_.exists(): return f"Error: no such file or directory {source_path}" try: # shutil.move expects str args in 3.8 shutil.move(str(source_path_), destination_path_)
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/move.html
d7149ef3b4e0-1
shutil.move(str(source_path_), destination_path_) return f"File moved successfully from {source_path} to {destination_path}." except Exception as e: return "Error: " + str(e) async def _arun( self, source_path: str, destination_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/move.html
120793147a7f-0
Source code for langchain.tools.file_management.file_search import fnmatch import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileSearchInput(BaseModel): """Input for FileSearchTool.""" dir_path: str = Field( default=".", description="Subdirectory to search in.", ) pattern: str = Field( ..., description="Unix shell regex, where * matches everything.", ) [docs]class FileSearchTool(BaseFileToolMixin, BaseTool): name: str = "file_search" args_schema: Type[BaseModel] = FileSearchInput description: str = ( "Recursively search for files in a subdirectory that match the regex pattern" ) def _run( self, pattern: str, dir_path: str = ".", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: dir_path_ = self.get_relative_path(dir_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path) matches = [] try: for root, _, filenames in os.walk(dir_path_): for filename in fnmatch.filter(filenames, pattern): absolute_path = os.path.join(root, filename) relative_path = os.path.relpath(absolute_path, dir_path_) matches.append(relative_path) if matches:
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/file_search.html
120793147a7f-1
matches.append(relative_path) if matches: return "\n".join(matches) else: return f"No files found for pattern {pattern} in directory {dir_path}" except Exception as e: return "Error: " + str(e) async def _arun( self, dir_path: str, pattern: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/file_search.html
478e58e1ebd6-0
Source code for langchain.tools.file_management.copy import shutil from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileCopyInput(BaseModel): """Input for CopyFileTool.""" source_path: str = Field(..., description="Path of the file to copy") destination_path: str = Field(..., description="Path to save the copied file") [docs]class CopyFileTool(BaseFileToolMixin, BaseTool): name: str = "copy_file" args_schema: Type[BaseModel] = FileCopyInput description: str = "Create a copy of a file in a specified location" def _run( self, source_path: str, destination_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: source_path_ = self.get_relative_path(source_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="source_path", value=source_path ) try: destination_path_ = self.get_relative_path(destination_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="destination_path", value=destination_path ) try: shutil.copy2(source_path_, destination_path_, follow_symlinks=False) return f"File copied successfully from {source_path} to {destination_path}." except Exception as e:
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/copy.html
478e58e1ebd6-1
except Exception as e: return "Error: " + str(e) async def _arun( self, source_path: str, destination_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/copy.html
dc594cbfc59e-0
Source code for langchain.tools.file_management.list_dir import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class DirectoryListingInput(BaseModel): """Input for ListDirectoryTool.""" dir_path: str = Field(default=".", description="Subdirectory to list.") [docs]class ListDirectoryTool(BaseFileToolMixin, BaseTool): name: str = "list_directory" args_schema: Type[BaseModel] = DirectoryListingInput description: str = "List files and directories in a specified folder" def _run( self, dir_path: str = ".", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: dir_path_ = self.get_relative_path(dir_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path) try: entries = os.listdir(dir_path_) if entries: return "\n".join(entries) else: return f"No files found in directory {dir_path}" except Exception as e: return "Error: " + str(e) async def _arun( self, dir_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/list_dir.html
0e7b7d3dd8a2-0
Source code for langchain.tools.file_management.delete import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileDeleteInput(BaseModel): """Input for DeleteFileTool.""" file_path: str = Field(..., description="Path of the file to delete") [docs]class DeleteFileTool(BaseFileToolMixin, BaseTool): name: str = "file_delete" args_schema: Type[BaseModel] = FileDeleteInput description: str = "Delete a file" def _run( self, file_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: file_path_ = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) if not file_path_.exists(): return f"Error: no such file or directory: {file_path}" try: os.remove(file_path_) return f"File deleted successfully: {file_path}." except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/delete.html
b680e4aa1ebb-0
Source code for langchain.tools.file_management.read from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class ReadFileInput(BaseModel): """Input for ReadFileTool.""" file_path: str = Field(..., description="name of file") [docs]class ReadFileTool(BaseFileToolMixin, BaseTool): name: str = "read_file" args_schema: Type[BaseModel] = ReadFileInput description: str = "Read file from disk" def _run( self, file_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: read_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) if not read_path.exists(): return f"Error: no such file or directory: {file_path}" try: with read_path.open("r", encoding="utf-8") as f: content = f.read() return content except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/read.html
02b48bca2b68-0
Source code for langchain.tools.file_management.write from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class WriteFileInput(BaseModel): """Input for WriteFileTool.""" file_path: str = Field(..., description="name of file") text: str = Field(..., description="text to write to file") append: bool = Field( default=False, description="Whether to append to an existing file." ) [docs]class WriteFileTool(BaseFileToolMixin, BaseTool): name: str = "write_file" args_schema: Type[BaseModel] = WriteFileInput description: str = "Write file to disk" def _run( self, file_path: str, text: str, append: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: write_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) try: write_path.parent.mkdir(exist_ok=True, parents=False) mode = "a" if append else "w" with write_path.open(mode, encoding="utf-8") as f: f.write(text) return f"File written successfully to {file_path}." except Exception as e: return "Error: " + str(e)
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/write.html
02b48bca2b68-1
except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, text: str, append: bool = False, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/tools/file_management/write.html
a47e723ed941-0
Source code for langchain.tools.zapier.tool """## Zapier Natural Language Actions API \ Full docs here: https://nla.zapier.com/start/ **Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface. NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets, Microsoft Teams, and thousands more apps: https://zapier.com/apps Zapier NLA handles ALL the underlying API auth and translation from natural language --> underlying API call --> return simplified output for LLMs The key idea is you, or your users, expose a set of actions via an oauth-like setup window, which you can then query and execute via a REST API. NLA offers both API Key and OAuth for signing NLA API requests. 1. Server-side (API Key): for quickly getting started, testing, and production scenarios where LangChain will only use actions exposed in the developer's Zapier account (and will use the developer's connected accounts on Zapier.com) 2. User-facing (Oauth): for production scenarios where you are deploying an end-user facing application and LangChain needs access to end-user's exposed actions and connected accounts on Zapier.com This quick start will focus on the server-side use case for brevity. Review [full docs](https://nla.zapier.com/start/) for user-facing oauth developer support. Typically, you'd use SequentialChain, here's a basic example: 1. Use NLA to find an email in Gmail 2. Use LLMChain to generate a draft reply to (1)
https://api.python.langchain.com/en/stable/_modules/langchain/tools/zapier/tool.html
a47e723ed941-1
2. Use LLMChain to generate a draft reply to (1) 3. Use NLA to send the draft reply (2) to someone in Slack via direct message In code, below: ```python import os # get from https://platform.openai.com/ os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "") # get from https://nla.zapier.com/docs/authentication/ os.environ["ZAPIER_NLA_API_KEY"] = os.environ.get("ZAPIER_NLA_API_KEY", "") from langchain.llms import OpenAI from langchain.agents import initialize_agent from langchain.agents.agent_toolkits import ZapierToolkit from langchain.utilities.zapier import ZapierNLAWrapper ## step 0. expose gmail 'find email' and slack 'send channel message' actions # first go here, log in, expose (enable) the two actions: # https://nla.zapier.com/demo/start # -- for this example, can leave all fields "Have AI guess" # in an oauth scenario, you'd get your own <provider> id (instead of 'demo') # which you route your users through first llm = OpenAI(temperature=0) zapier = ZapierNLAWrapper() ## To leverage OAuth you may pass the value `nla_oauth_access_token` to ## the ZapierNLAWrapper. If you do this there is no need to initialize ## the ZAPIER_NLA_API_KEY env variable # zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token="TOKEN_HERE") toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier) agent = initialize_agent( toolkit.get_tools(), llm,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/zapier/tool.html
a47e723ed941-2
agent = initialize_agent( toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) agent.run(("Summarize the last email I received regarding Silicon Valley Bank. " "Send the summary to the #test-zapier channel in slack.")) ``` """ from typing import Any, Dict, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT from langchain.utilities.zapier import ZapierNLAWrapper [docs]class ZapierNLARunAction(BaseTool): """ Args: action_id: a specific action ID (from list actions) of the action to execute (the set api_key must be associated with the action owner) instructions: a natural language instruction string for using the action (eg. "get the latest email from Mike Knoop" for "Gmail: find email" action) params: a dict, optional. Any params provided will *override* AI guesses from `instructions` (see "understanding the AI guessing flow" here: https://nla.zapier.com/docs/using-the-api#ai-guessing) """ api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) action_id: str params: Optional[dict] = None base_prompt: str = BASE_ZAPIER_TOOL_PROMPT zapier_description: str params_schema: Dict[str, str] = Field(default_factory=dict) name = "" description = ""
https://api.python.langchain.com/en/stable/_modules/langchain/tools/zapier/tool.html
a47e723ed941-3
name = "" description = "" @root_validator def set_name_description(cls, values: Dict[str, Any]) -> Dict[str, Any]: zapier_description = values["zapier_description"] params_schema = values["params_schema"] if "instructions" in params_schema: del params_schema["instructions"] # Ensure base prompt (if overrided) contains necessary input fields necessary_fields = {"{zapier_description}", "{params}"} if not all(field in values["base_prompt"] for field in necessary_fields): raise ValueError( "Your custom base Zapier prompt must contain input fields for " "{zapier_description} and {params}." ) values["name"] = zapier_description values["description"] = values["base_prompt"].format( zapier_description=zapier_description, params=str(list(params_schema.keys())), ) return values def _run( self, instructions: str, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.run_as_str(self.action_id, instructions, self.params) async def _arun( self, instructions: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return await self.api_wrapper.arun_as_str( self.action_id, instructions, self.params, ) ZapierNLARunAction.__doc__ = (
https://api.python.langchain.com/en/stable/_modules/langchain/tools/zapier/tool.html
a47e723ed941-4
) ZapierNLARunAction.__doc__ = ( ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore ) # other useful actions [docs]class ZapierNLAListActions(BaseTool): """ Args: None """ name = "ZapierNLA_list_actions" description = BASE_ZAPIER_TOOL_PROMPT + ( "This tool returns a list of the user's exposed actions." ) api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) def _run( self, _: str = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.list_as_str() async def _arun( self, _: str = "", run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return await self.api_wrapper.alist_as_str() ZapierNLAListActions.__doc__ = ( ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore )
https://api.python.langchain.com/en/stable/_modules/langchain/tools/zapier/tool.html
2ccd0b9b1e8c-0
Source code for langchain.tools.json.tool # flake8: noqa """Tools for working with JSON specs.""" from __future__ import annotations import json import re from pathlib import Path from typing import Dict, List, Optional, Union from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool def _parse_input(text: str) -> List[Union[str, int]]: """Parse input of the form data["key1"][0]["key2"] into a list of keys.""" _res = re.findall(r"\[.*?]", text) # strip the brackets and quotes, convert to int if possible res = [i[1:-1].replace('"', "") for i in _res] res = [int(i) if i.isdigit() else i for i in res] return res class JsonSpec(BaseModel): """Base class for JSON spec.""" dict_: Dict max_value_length: int = 200 @classmethod def from_file(cls, path: Path) -> JsonSpec: """Create a JsonSpec from a file.""" if not path.exists(): raise FileNotFoundError(f"File not found: {path}") dict_ = json.loads(path.read_text()) return cls(dict_=dict_) def keys(self, text: str) -> str: """Return the keys of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]). """ try: items = _parse_input(text) val = self.dict_ for i in items:
https://api.python.langchain.com/en/stable/_modules/langchain/tools/json/tool.html
2ccd0b9b1e8c-1
val = self.dict_ for i in items: if i: val = val[i] if not isinstance(val, dict): raise ValueError( f"Value at path `{text}` is not a dict, get the value directly." ) return str(list(val.keys())) except Exception as e: return repr(e) def value(self, text: str) -> str: """Return the value of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]). """ try: items = _parse_input(text) val = self.dict_ for i in items: val = val[i] if isinstance(val, dict) and len(str(val)) > self.max_value_length: return "Value is a large dictionary, should explore its keys directly" str_val = str(val) if len(str_val) > self.max_value_length: str_val = str_val[: self.max_value_length] + "..." return str_val except Exception as e: return repr(e) [docs]class JsonListKeysTool(BaseTool): """Tool for listing keys in a JSON spec.""" name = "json_spec_list_keys" description = """ Can be used to list all keys at a given path. Before calling this you should be SURE that the path to this exists. The input is a text representation of the path to the dict in Python syntax (e.g. data["key1"][0]["key2"]). """ spec: JsonSpec def _run( self, tool_input: str,
https://api.python.langchain.com/en/stable/_modules/langchain/tools/json/tool.html
2ccd0b9b1e8c-2
def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: return self.spec.keys(tool_input) async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return self._run(tool_input) [docs]class JsonGetValueTool(BaseTool): """Tool for getting a value in a JSON spec.""" name = "json_spec_get_value" description = """ Can be used to see value in string format at a given path. Before calling this you should be SURE that the path to this exists. The input is a text representation of the path to the dict in Python syntax (e.g. data["key1"][0]["key2"]). """ spec: JsonSpec def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: return self.spec.value(tool_input) async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return self._run(tool_input)
https://api.python.langchain.com/en/stable/_modules/langchain/tools/json/tool.html
3d1ff054da00-0
Source code for langchain.tools.google_serper.tool """Tool for the Serper.dev Google Search API.""" from typing import Optional from pydantic.fields import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper [docs]class GoogleSerperRun(BaseTool): """Tool that adds the capability to query the Serper.dev Google search API.""" name = "google_serper" description = ( "A low-cost Google Search API." "Useful for when you need to answer questions about current events." "Input should be a search query." ) api_wrapper: GoogleSerperAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.run(query)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" return (await self.api_wrapper.arun(query)).__str__() [docs]class GoogleSerperResults(BaseTool): """Tool that has capability to query the Serper.dev Google Search API and get back json.""" name = "Google Serrper Results JSON" description = ( "A low-cost Google Search API." "Useful for when you need to answer questions about current events." "Input should be a search query. Output is a JSON object of the query results" )
https://api.python.langchain.com/en/stable/_modules/langchain/tools/google_serper/tool.html
3d1ff054da00-1
) api_wrapper: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.results(query)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" return (await self.api_wrapper.aresults(query)).__str__()
https://api.python.langchain.com/en/stable/_modules/langchain/tools/google_serper/tool.html
9801e4f15094-0
Source code for langchain.prompts.few_shot """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, ) from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptTemplate(StringPromptTemplate): """Prompt template that contains few shot examples.""" @property def lc_serializable(self) -> bool: return False examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: str """A prompt template string to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: str = "" """A prompt template string to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/few_shot.html
9801e4f15094-1
def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: check_valid_template( values["prefix"] + values["suffix"], values["template_format"], values["input_variables"] + list(values["partial_variables"]), ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/few_shot.html
9801e4f15094-2
.. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) examples = [ {k: e[k] for k in self.example_prompt.input_variables} for e in examples ] # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall template. pieces = [self.prefix, *example_strings, self.suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/few_shot.html
52b6b813e75a-0
Source code for langchain.prompts.loading """Load prompts from disk.""" import importlib import json import logging from pathlib import Path from typing import Union import yaml from langchain.output_parsers.regex import RegexParser from langchain.prompts.base import BasePromptTemplate from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseLLMOutputParser, NoOpOutputParser from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/" logger = logging.getLogger(__name__) def load_prompt_from_config(config: dict) -> BasePromptTemplate: """Load prompt from Config Dict.""" if "_type" not in config: logger.warning("No `_type` key found, defaulting to `prompt`.") config_type = config.pop("_type", "prompt") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} prompt not supported") prompt_loader = type_to_loader_dict[config_type] return prompt_loader(config) def _load_template(var_name: str, config: dict) -> dict: """Load template from disk if applicable.""" # Check if template_path exists in config. if f"{var_name}_path" in config: # If it does, make sure template variable doesn't also exist. if var_name in config: raise ValueError( f"Both `{var_name}_path` and `{var_name}` cannot be provided." ) # Pop the template path from the config. template_path = Path(config.pop(f"{var_name}_path")) # Load the template.
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/loading.html
52b6b813e75a-1
# Load the template. if template_path.suffix == ".txt": with open(template_path) as f: template = f.read() else: raise ValueError # Set the template variable to the extracted variable. config[var_name] = template return config def _load_examples(config: dict) -> dict: """Load examples if necessary.""" if isinstance(config["examples"], list): pass elif isinstance(config["examples"], str): with open(config["examples"]) as f: if config["examples"].endswith(".json"): examples = json.load(f) elif config["examples"].endswith((".yaml", ".yml")): examples = yaml.safe_load(f) else: raise ValueError( "Invalid file format. Only json or yaml formats are supported." ) config["examples"] = examples else: raise ValueError("Invalid examples format. Only list or string are supported.") return config def _load_output_parser(config: dict) -> dict: """Load output parser.""" if "output_parser" in config and config["output_parser"]: _config = config.pop("output_parser") output_parser_type = _config.pop("_type") if output_parser_type == "regex_parser": output_parser: BaseLLMOutputParser = RegexParser(**_config) elif output_parser_type == "default": output_parser = NoOpOutputParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parser"] = output_parser return config def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: """Load the few shot prompt from the config."""
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/loading.html
52b6b813e75a-2
"""Load the few shot prompt from the config.""" # Load the suffix and prefix templates. config = _load_template("suffix", config) config = _load_template("prefix", config) # Load the example prompt. if "example_prompt_path" in config: if "example_prompt" in config: raise ValueError( "Only one of example_prompt and example_prompt_path should " "be specified." ) config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) else: config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) # Load the examples. config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config) def _load_prompt(config: dict) -> PromptTemplate: """Load the prompt template from config.""" # Load the template from disk if necessary. config = _load_template("template", config) config = _load_output_parser(config) return PromptTemplate(**config) [docs]def load_prompt(path: Union[str, Path]) -> BasePromptTemplate: """Unified method for loading a prompt from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"} ): return hub_result else: return _load_prompt_from_file(path) def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate: """Load prompt from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/loading.html
52b6b813e75a-3
file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) elif file_path.suffix == ".py": spec = importlib.util.spec_from_loader( "prompt", loader=None, origin=str(file_path) ) if spec is None: raise ValueError("could not load spec") helper = importlib.util.module_from_spec(spec) with open(file_path, "rb") as f: exec(f.read(), helper.__dict__) if not isinstance(helper.PROMPT, BasePromptTemplate): raise ValueError("Did not get object of type BasePromptTemplate.") return helper.PROMPT else: raise ValueError(f"Got unsupported file type {file_path.suffix}") # Load the prompt from the config now. return load_prompt_from_config(config) type_to_loader_dict = { "prompt": _load_prompt, "few_shot": _load_few_shot_prompt, # "few_shot_with_templates": _load_few_shot_with_templates_prompt, }
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/loading.html
3598e739834c-0
Source code for langchain.prompts.few_shot_with_templates """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptWithTemplates(StringPromptTemplate): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: StringPromptTemplate """A PromptTemplate to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: Optional[StringPromptTemplate] = None """A PromptTemplate to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None)
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/few_shot_with_templates.html
3598e739834c-1
examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: input_variables = values["input_variables"] expected_input_variables = set(values["suffix"].input_variables) expected_input_variables |= set(values["partial_variables"]) if values["prefix"] is not None: expected_input_variables |= set(values["prefix"].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template.
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/few_shot_with_templates.html
3598e739834c-2
Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall prefix. if self.prefix is None: prefix = "" else: prefix_kwargs = { k: v for k, v in kwargs.items() if k in self.prefix.input_variables } for k in prefix_kwargs.keys(): kwargs.pop(k) prefix = self.prefix.format(**prefix_kwargs) # Create the overall suffix suffix_kwargs = { k: v for k, v in kwargs.items() if k in self.suffix.input_variables } for k in suffix_kwargs.keys(): kwargs.pop(k) suffix = self.suffix.format( **suffix_kwargs, ) pieces = [prefix, *example_strings, suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot_with_templates" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector:
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/few_shot_with_templates.html
3598e739834c-3
"""Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/few_shot_with_templates.html
a3868e6ba3b0-0
Source code for langchain.prompts.prompt """Prompt schema definition.""" from __future__ import annotations from pathlib import Path from string import Formatter from typing import Any, Dict, List, Union from pydantic import root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, _get_jinja2_variables_from_template, check_valid_template, ) [docs]class PromptTemplate(StringPromptTemplate): """Schema to represent a prompt for an LLM. Example: .. code-block:: python from langchain import PromptTemplate prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") """ @property def lc_attributes(self) -> Dict[str, Any]: return { "template_format": self.template_format, } input_variables: List[str] """A list of the names of the variables the prompt template expects.""" template: str """The prompt template.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "prompt" [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/prompt.html
a3868e6ba3b0-1
""" kwargs = self._merge_partial_and_user_variables(**kwargs) return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that template and input variables are consistent.""" if values["validate_template"]: all_inputs = values["input_variables"] + list(values["partial_variables"]) check_valid_template( values["template"], values["template_format"], all_inputs ) return values [docs] @classmethod def from_examples( cls, examples: List[str], suffix: str, input_variables: List[str], example_separator: str = "\n\n", prefix: str = "", **kwargs: Any, ) -> PromptTemplate: """Take examples in list format with prefix and suffix to create a prompt. Intended to be used as a way to dynamically create a prompt from examples. Args: examples: List of examples to use in the prompt. suffix: String to go after the list of examples. Should generally set up the user's input. input_variables: A list of variable names the final prompt template will expect. example_separator: The separator to use in between examples. Defaults to two new line characters. prefix: String that should go before any examples. Generally includes examples. Default to an empty string. Returns: The final prompt generated. """ template = example_separator.join([prefix, *examples, suffix]) return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_file(
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/prompt.html
a3868e6ba3b0-2
[docs] @classmethod def from_file( cls, template_file: Union[str, Path], input_variables: List[str], **kwargs: Any ) -> PromptTemplate: """Load a prompt from a file. Args: template_file: The path to the file containing the prompt template. input_variables: A list of variable names the final prompt template will expect. Returns: The prompt loaded from the file. """ with open(str(template_file), "r") as f: template = f.read() return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_template(cls, template: str, **kwargs: Any) -> PromptTemplate: """Load a prompt template from a template.""" if "template_format" in kwargs and kwargs["template_format"] == "jinja2": # Get the variables for the template input_variables = _get_jinja2_variables_from_template(template) else: input_variables = { v for _, v, _, _ in Formatter().parse(template) if v is not None } if "partial_variables" in kwargs: partial_variables = kwargs["partial_variables"] input_variables = { var for var in input_variables if var not in partial_variables } return cls( input_variables=list(sorted(input_variables)), template=template, **kwargs ) # For backwards compatibility. Prompt = PromptTemplate
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/prompt.html
5df8c2d18d3e-0
Source code for langchain.prompts.pipeline from typing import Any, Dict, List, Tuple from pydantic import root_validator from langchain.prompts.base import BasePromptTemplate from langchain.prompts.chat import BaseChatPromptTemplate from langchain.schema import PromptValue def _get_inputs(inputs: dict, input_variables: List[str]) -> dict: return {k: inputs[k] for k in input_variables} [docs]class PipelinePromptTemplate(BasePromptTemplate): """A prompt template for composing multiple prompts together. This can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts: - final_prompt: This is the final prompt that is returned - pipeline_prompts: This is a list of tuples, consisting of a string (`name`) and a Prompt Template. Each PromptTemplate will be formatted and then passed to future prompt templates as a variable with the same name as `name` """ final_prompt: BasePromptTemplate pipeline_prompts: List[Tuple[str, BasePromptTemplate]] @root_validator(pre=True) def get_input_variables(cls, values: Dict) -> Dict: """Get input variables.""" created_variables = set() all_variables = set() for k, prompt in values["pipeline_prompts"]: created_variables.add(k) all_variables.update(prompt.input_variables) values["input_variables"] = list(all_variables.difference(created_variables)) return values [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: for k, prompt in self.pipeline_prompts: _inputs = _get_inputs(kwargs, prompt.input_variables) if isinstance(prompt, BaseChatPromptTemplate):
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/pipeline.html
5df8c2d18d3e-1
if isinstance(prompt, BaseChatPromptTemplate): kwargs[k] = prompt.format_messages(**_inputs) else: kwargs[k] = prompt.format(**_inputs) _inputs = _get_inputs(kwargs, self.final_prompt.input_variables) return self.final_prompt.format_prompt(**_inputs) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() @property def _prompt_type(self) -> str: raise ValueError
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/pipeline.html
3c3578e9c2ac-0
Source code for langchain.prompts.chat """Chat prompt template.""" from __future__ import annotations from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, TypeVar, Union from pydantic import Field, root_validator from langchain.load.serializable import Serializable from langchain.memory.buffer import get_buffer_string from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, PromptValue, SystemMessage, ) class BaseMessagePromptTemplate(Serializable, ABC): @property def lc_serializable(self) -> bool: return True @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To messages.""" @property @abstractmethod def input_variables(self) -> List[str]: """Input variables for this prompt template.""" [docs]class MessagesPlaceholder(BaseMessagePromptTemplate): """Prompt template that assumes variable is already list of messages.""" variable_name: str [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To a BaseMessage.""" value = kwargs[self.variable_name] if not isinstance(value, list): raise ValueError( f"variable {self.variable_name} should be a list of base messages, " f"got {value}" ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f"variable {self.variable_name} should be a list of base messages," f" got {value}" )
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/chat.html
3c3578e9c2ac-1
f" got {value}" ) return value @property def input_variables(self) -> List[str]: """Input variables for this prompt template.""" return [self.variable_name] MessagePromptTemplateT = TypeVar( "MessagePromptTemplateT", bound="BaseStringMessagePromptTemplate" ) class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): prompt: StringPromptTemplate additional_kwargs: dict = Field(default_factory=dict) @classmethod def from_template( cls: Type[MessagePromptTemplateT], template: str, template_format: str = "f-string", **kwargs: Any, ) -> MessagePromptTemplateT: prompt = PromptTemplate.from_template(template, template_format=template_format) return cls(prompt=prompt, **kwargs) @classmethod def from_template_file( cls: Type[MessagePromptTemplateT], template_file: Union[str, Path], input_variables: List[str], **kwargs: Any, ) -> MessagePromptTemplateT: prompt = PromptTemplate.from_file(template_file, input_variables) return cls(prompt=prompt, **kwargs) @abstractmethod def format(self, **kwargs: Any) -> BaseMessage: """To a BaseMessage.""" def format_messages(self, **kwargs: Any) -> List[BaseMessage]: return [self.format(**kwargs)] @property def input_variables(self) -> List[str]: return self.prompt.input_variables [docs]class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): role: str [docs] def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return ChatMessage(
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/chat.html
3c3578e9c2ac-2
text = self.prompt.format(**kwargs) return ChatMessage( content=text, role=self.role, additional_kwargs=self.additional_kwargs ) [docs]class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate): [docs] def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return HumanMessage(content=text, additional_kwargs=self.additional_kwargs) [docs]class AIMessagePromptTemplate(BaseStringMessagePromptTemplate): [docs] def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return AIMessage(content=text, additional_kwargs=self.additional_kwargs) [docs]class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate): [docs] def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return SystemMessage(content=text, additional_kwargs=self.additional_kwargs) class ChatPromptValue(PromptValue): messages: List[BaseMessage] def to_string(self) -> str: """Return prompt as string.""" return get_buffer_string(self.messages) def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return self.messages [docs]class BaseChatPromptTemplate(BasePromptTemplate, ABC): [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: messages = self.format_messages(**kwargs) return ChatPromptValue(messages=messages) [docs] @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format kwargs into a list of messages."""
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/chat.html
3c3578e9c2ac-3
"""Format kwargs into a list of messages.""" [docs]class ChatPromptTemplate(BaseChatPromptTemplate, ABC): input_variables: List[str] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] @root_validator(pre=True) def validate_input_variables(cls, values: dict) -> dict: messages = values["messages"] input_vars = set() for message in messages: if isinstance(message, BaseMessagePromptTemplate): input_vars.update(message.input_variables) if "partial_variables" in values: input_vars = input_vars - set(values["partial_variables"]) if "input_variables" in values: if input_vars != set(values["input_variables"]): raise ValueError( "Got mismatched input_variables. " f"Expected: {input_vars}. " f"Got: {values['input_variables']}" ) else: values["input_variables"] = list(input_vars) return values [docs] @classmethod def from_template(cls, template: str, **kwargs: Any) -> ChatPromptTemplate: prompt_template = PromptTemplate.from_template(template, **kwargs) message = HumanMessagePromptTemplate(prompt=prompt_template) return cls.from_messages([message]) [docs] @classmethod def from_role_strings( cls, string_messages: List[Tuple[str, str]] ) -> ChatPromptTemplate: messages = [ ChatMessagePromptTemplate( prompt=PromptTemplate.from_template(template), role=role ) for role, template in string_messages ] return cls.from_messages(messages) [docs] @classmethod def from_strings(
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/chat.html
3c3578e9c2ac-4
[docs] @classmethod def from_strings( cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]] ) -> ChatPromptTemplate: messages = [ role(prompt=PromptTemplate.from_template(template)) for role, template in string_messages ] return cls.from_messages(messages) [docs] @classmethod def from_messages( cls, messages: Sequence[Union[BaseMessagePromptTemplate, BaseMessage]] ) -> ChatPromptTemplate: input_vars = set() for message in messages: if isinstance(message, BaseMessagePromptTemplate): input_vars.update(message.input_variables) return cls(input_variables=list(input_vars), messages=messages) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance(message_template, BaseMessagePromptTemplate): rel_params = { k: v for k, v in kwargs.items() if k in message_template.input_variables } message = message_template.format_messages(**rel_params) result.extend(message) else: raise ValueError(f"Unexpected input: {message_template}") return result [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: raise NotImplementedError @property def _prompt_type(self) -> str: return "chat"
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/chat.html
3c3578e9c2ac-5
def _prompt_type(self) -> str: return "chat" [docs] def save(self, file_path: Union[Path, str]) -> None: raise NotImplementedError
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/chat.html
a60ce9173993-0
Source code for langchain.prompts.base """BasePrompt schema definition.""" from __future__ import annotations import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Union import yaml from pydantic import Field, root_validator from langchain.formatting import formatter from langchain.load.serializable import Serializable from langchain.schema import BaseMessage, BaseOutputParser, HumanMessage, PromptValue def jinja2_formatter(template: str, **kwargs: Any) -> str: """Format a template using jinja2.""" try: from jinja2 import Template except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) return Template(template).render(**kwargs) def validate_jinja2(template: str, input_variables: List[str]) -> None: """ Validate that the input variables are valid for the template. Raise an exception if missing or extra variables are found. Args: template: The template string. input_variables: The input variables. """ input_variables_set = set(input_variables) valid_variables = _get_jinja2_variables_from_template(template) missing_variables = valid_variables - input_variables_set extra_variables = input_variables_set - valid_variables error_message = "" if missing_variables: error_message += f"Missing variables: {missing_variables} " if extra_variables: error_message += f"Extra variables: {extra_variables}" if error_message: raise KeyError(error_message.strip())
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/base.html
a60ce9173993-1
if error_message: raise KeyError(error_message.strip()) def _get_jinja2_variables_from_template(template: str) -> Set[str]: try: from jinja2 import Environment, meta except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) return variables DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = { "f-string": formatter.format, "jinja2": jinja2_formatter, } DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = { "f-string": formatter.validate_input_variables, "jinja2": validate_jinja2, } def check_valid_template( template: str, template_format: str, input_variables: List[str] ) -> None: """Check that template string is valid.""" if template_format not in DEFAULT_FORMATTER_MAPPING: valid_formats = list(DEFAULT_FORMATTER_MAPPING) raise ValueError( f"Invalid template format. Got `{template_format}`;" f" should be one of {valid_formats}" ) try: validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] validator_func(template, input_variables) except KeyError as e: raise ValueError( "Invalid prompt schema; check for mismatched or missing input parameters. " + str(e) ) class StringPromptValue(PromptValue): text: str def to_string(self) -> str: """Return prompt as string.""" return self.text
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/base.html
a60ce9173993-2
"""Return prompt as string.""" return self.text def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return [HumanMessage(content=self.text)] [docs]class BasePromptTemplate(Serializable, ABC): """Base class for all prompt templates, returning a prompt.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" output_parser: Optional[BaseOutputParser] = None """How to parse the output of calling an LLM on this formatted prompt.""" partial_variables: Mapping[str, Union[str, Callable[[], str]]] = Field( default_factory=dict ) @property def lc_serializable(self) -> bool: return True class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @abstractmethod def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" @root_validator() def validate_variable_names(cls, values: Dict) -> Dict: """Validate variable names do not include restricted names.""" if "stop" in values["input_variables"]: raise ValueError( "Cannot have an input variable named 'stop', as it is used internally," " please rename." ) if "stop" in values["partial_variables"]: raise ValueError( "Cannot have an partial variable named 'stop', as it is used " "internally, please rename." ) overall = set(values["input_variables"]).intersection( values["partial_variables"] ) if overall: raise ValueError( f"Found overlapping input and partial variables: {overall}" )
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/base.html
a60ce9173993-3
f"Found overlapping input and partial variables: {overall}" ) return values [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: """Return a partial of the prompt template.""" prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs) ) prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} return type(self)(**prompt_dict) def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]: # Get partial params: partial_kwargs = { k: v if isinstance(v, str) else v() for k, v in self.partial_variables.items() } return {**partial_kwargs, **kwargs} [docs] @abstractmethod def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ @property def _prompt_type(self) -> str: """Return the prompt type key.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of prompt.""" prompt_dict = super().dict(**kwargs) prompt_dict["_type"] = self._prompt_type return prompt_dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the prompt. Args:
https://api.python.langchain.com/en/stable/_modules/langchain/prompts/base.html