id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
d8867cdfc156-4
llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) # llm attribute is deprecated in favor of llm_chain, here to support old configs elif "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) # llm_path attribute is deprecated in favor of llm_chain_path, # its to support old configs elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) if llm_chain: return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config) else: return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.")
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-5
if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-6
revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: llm_chain = None if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) # llm attribute is deprecated in favor of llm_chain, here to support old configs elif "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) # llm_path attribute is deprecated in favor of llm_chain_path, # its to support old configs elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) if llm_chain: return LLMMathChain(llm_chain=llm_chain, prompt=prompt, **config) else: return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-7
if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: llm_chain = None if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) # llm attribute is deprecated in favor of llm_chain, here to support old configs elif "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) # llm_path attribute is deprecated in favor of llm_chain_path, # its to support old configs elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-8
prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") if llm_chain: return PALChain(llm_chain=llm_chain, prompt=prompt, **config) else: return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path"))
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-9
document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain: if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) else: prompt = None
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-10
prompt = load_prompt_from_config(prompt_config) else: prompt = None return SQLDatabaseChain.from_llm(llm, database, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_retrieval_qa(config: dict, **kwargs: Any) -> RetrievalQA: if "retriever" in kwargs: retriever = kwargs.pop("retriever") else: raise ValueError("`retriever` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-11
else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return RetrievalQA( combine_documents_chain=combine_documents_chain, retriever=retriever, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain: if "graph" in kwargs: graph = kwargs.pop("graph") else: raise ValueError("`graph` must be present.") if "cypher_generation_chain" in config: cypher_generation_chain_config = config.pop("cypher_generation_chain") cypher_generation_chain = load_chain_from_config(cypher_generation_chain_config) else: raise ValueError("`cypher_generation_chain` must be present.") if "qa_chain" in config:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-12
if "qa_chain" in config: qa_chain_config = config.pop("qa_chain") qa_chain = load_chain_from_config(qa_chain_config) else: raise ValueError("`qa_chain` must be present.") return GraphCypherQAChain( graph=graph, cypher_generation_chain=cypher_generation_chain, qa_chain=qa_chain, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-13
requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-14
"map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, "retrieval_qa": _load_retrieval_qa, "graph_cypher_chain": _load_graph_cypher_chain, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) [docs]def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
d8867cdfc156-15
else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and 'memory' for the chain if "verbose" in kwargs: config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/loading.html
188384bef808-0
Source code for langchain.chains.moderation """Pass input through a moderation endpoint.""" from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.utils import get_from_dict_or_env [docs]class OpenAIModerationChain(Chain): """Pass input through a moderation endpoint. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chains import OpenAIModerationChain moderation = OpenAIModerationChain() """ client: Any #: :meta private: model_name: Optional[str] = None """Moderation model name to use.""" error: bool = False """Whether or not to error if bad content was found.""" input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: openai_api_key: Optional[str] = None openai_organization: Optional[str] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_organization = get_from_dict_or_env( values, "openai_organization",
https://api.python.langchain.com/en/latest/_modules/langchain/chains/moderation.html
188384bef808-1
values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization values["client"] = openai.Moderation except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _moderate(self, text: str, results: dict) -> str: if results["flagged"]: error_str = "Text was found that violates OpenAI's content policy." if self.error: raise ValueError(error_str) else: return error_str return text def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: text = inputs[self.input_key] results = self.client.create(text) output = self._moderate(text, results["results"][0]) return {self.output_key: output}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/moderation.html
7ec7edb853a5-0
Source code for langchain.chains.mapreduce """Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate from langchain.text_splitter import TextSplitter [docs]class MapReduceChain(Chain): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: [docs] @classmethod def from_params( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, text_splitter: TextSplitter, callbacks: Callbacks = None, combine_chain_kwargs: Optional[Mapping[str, Any]] = None, reduce_chain_kwargs: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> MapReduceChain:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/mapreduce.html
7ec7edb853a5-1
**kwargs: Any, ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) reduce_chain = StuffDocumentsChain( llm_chain=llm_chain, callbacks=callbacks, **(reduce_chain_kwargs if reduce_chain_kwargs else {}), ) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=reduce_chain, callbacks=callbacks, **(combine_chain_kwargs if combine_chain_kwargs else {}), ) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter, callbacks=callbacks, **kwargs, ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Split the larger text into smaller chunks. doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/mapreduce.html
7ec7edb853a5-2
texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = { **inputs, self.combine_documents_chain.input_key: docs, } outputs = self.combine_documents_chain.run( _inputs, callbacks=_run_manager.get_child() ) return {self.output_key: outputs}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/mapreduce.html
069e7364b372-0
Source code for langchain.chains.transform """Chain that runs an arbitrary python function.""" from typing import Callable, Dict, List, Optional from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain [docs]class TransformChain(Chain): """Chain transform chain output. Example: .. code-block:: python from langchain import TransformChain transform_chain = TransformChain(input_variables=["text"], output_variables["entities"], transform=func()) """ input_variables: List[str] output_variables: List[str] transform: Callable[[Dict[str, str]], Dict[str, str]] @property def input_keys(self) -> List[str]: """Expect input keys. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output keys. :meta private: """ return self.output_variables def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: return self.transform(inputs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/transform.html
993680167778-0
Source code for langchain.chains.llm """Chain that just formats a prompt and calls an LLM.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from pydantic import Extra, Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain from langchain.input import get_colored_text from langchain.load.dump import dumpd from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( BaseLLMOutputParser, LLMResult, NoOpOutputParser, PromptValue, ) [docs]class LLMChain(Chain): """Chain to run queries against LLMs. Example: .. code-block:: python from langchain import LLMChain, OpenAI, PromptTemplate prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( input_variables=["adjective"], template=prompt_template ) llm = LLMChain(llm=OpenAI(), prompt=prompt) """ @property def lc_serializable(self) -> bool: return True prompt: BasePromptTemplate """Prompt object to use.""" llm: BaseLanguageModel """Language model to call.""" output_key: str = "text" #: :meta private: output_parser: BaseLLMOutputParser = Field(default_factory=NoOpOutputParser) """Output parser to use.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
993680167778-1
"""Output parser to use. Defaults to one that takes the most likely string but does not change it otherwise.""" return_final_only: bool = True """Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation.""" llm_kwargs: dict = Field(default_factory=dict) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ if self.return_final_only: return [self.output_key] else: return [self.output_key, "full_generation"] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: response = self.generate([inputs], run_manager=run_manager) return self.create_outputs(response)[0] [docs] def generate( self, input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = self.prep_prompts(input_list, run_manager=run_manager) return self.llm.generate_prompt( prompts, stop, callbacks=run_manager.get_child() if run_manager else None,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
993680167778-2
stop, callbacks=run_manager.get_child() if run_manager else None, **self.llm_kwargs, ) [docs] async def agenerate( self, input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager) return await self.llm.agenerate_prompt( prompts, stop, callbacks=run_manager.get_child() if run_manager else None, **self.llm_kwargs, ) [docs] def prep_prompts( self, input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text if run_manager: run_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
993680167778-3
) prompts.append(prompt) return prompts, stop [docs] async def aprep_prompts( self, input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text if run_manager: await run_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop [docs] def apply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = callback_manager.on_chain_start( dumpd(self), {"input_list": input_list}, ) try: response = self.generate(input_list, run_manager=run_manager)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
993680167778-4
try: response = self.generate(input_list, run_manager=run_manager) except (KeyboardInterrupt, Exception) as e: run_manager.on_chain_error(e) raise e outputs = self.create_outputs(response) run_manager.on_chain_end({"outputs": outputs}) return outputs [docs] async def aapply( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = await callback_manager.on_chain_start( dumpd(self), {"input_list": input_list}, ) try: response = await self.agenerate(input_list, run_manager=run_manager) except (KeyboardInterrupt, Exception) as e: await run_manager.on_chain_error(e) raise e outputs = self.create_outputs(response) await run_manager.on_chain_end({"outputs": outputs}) return outputs @property def _run_output_key(self) -> str: return self.output_key [docs] def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]: """Create outputs from response.""" result = [ # Get the text of the top generated string. { self.output_key: self.output_parser.parse_result(generation), "full_generation": generation, } for generation in llm_result.generations ] if self.return_final_only: result = [{self.output_key: r[self.output_key]} for r in result] return result
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
993680167778-5
return result async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: response = await self.agenerate([inputs], run_manager=run_manager) return self.create_outputs(response)[0] [docs] def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: callbacks: Callbacks to pass to LLMChain **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return self(kwargs, callbacks=callbacks)[self.output_key] [docs] async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: callbacks: Callbacks to pass to LLMChain **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return (await self.acall(kwargs, callbacks=callbacks))[self.output_key] [docs] def predict_and_parse( self, callbacks: Callbacks = None, **kwargs: Any ) -> Union[str, List[str], Dict[str, Any]]: """Call predict and then parse the results.""" warnings.warn( "The predict_and_parse method is deprecated, "
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
993680167778-6
warnings.warn( "The predict_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = self.predict(callbacks=callbacks, **kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result [docs] async def apredict_and_parse( self, callbacks: Callbacks = None, **kwargs: Any ) -> Union[str, List[str], Dict[str, str]]: """Call apredict and then parse the results.""" warnings.warn( "The apredict_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = await self.apredict(callbacks=callbacks, **kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result [docs] def apply_and_parse( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" warnings.warn( "The apply_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = self.apply(input_list, callbacks=callbacks) return self._parse_generation(result) def _parse_generation( self, generation: List[Dict[str, str]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: if self.prompt.output_parser is not None: return [ self.prompt.output_parser.parse(res[self.output_key]) for res in generation
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
993680167778-7
self.prompt.output_parser.parse(res[self.output_key]) for res in generation ] else: return generation [docs] async def aapply_and_parse( self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" warnings.warn( "The aapply_and_parse method is deprecated, " "instead pass an output parser directly to LLMChain." ) result = await self.aapply(input_list, callbacks=callbacks) return self._parse_generation(result) @property def _chain_type(self) -> str: return "llm_chain" [docs] @classmethod def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain: """Create LLMChain from LLM and template.""" prompt_template = PromptTemplate.from_template(template) return cls(llm=llm, prompt=prompt_template)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm.html
cda0c0d7f041-0
Source code for langchain.chains.llm_requests """Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain.requests import TextRequestsWrapper DEFAULT_HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501 } [docs]class LLMRequestsChain(Chain): """Chain that hits a URL and then uses an LLM to parse results.""" llm_chain: LLMChain requests_wrapper: TextRequestsWrapper = Field( default_factory=lambda: TextRequestsWrapper(headers=DEFAULT_HEADERS), exclude=True, ) text_length: int = 8000 requests_key: str = "requests_result" #: :meta private: input_key: str = "url" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_requests.html
cda0c0d7f041-1
"""Will always return text key. :meta private: """ return [self.output_key] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "Could not import bs4 python package. " "Please install it with `pip install bs4`." ) return values def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: from bs4 import BeautifulSoup _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} url = inputs[self.input_key] res = self.requests_wrapper.get(url) # extract the text from the html soup = BeautifulSoup(res, "html.parser") other_keys[self.requests_key] = soup.get_text()[: self.text_length] result = self.llm_chain.predict( callbacks=_run_manager.get_child(), **other_keys ) return {self.output_key: result} @property def _chain_type(self) -> str: return "llm_requests_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_requests.html
52b09dafcfb5-0
Source code for langchain.chains.sequential """Chain pipeline where the outputs of one step feed directly into next.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.input import get_color_mapping [docs]class SequentialChain(Chain): """Chain where the outputs of one chain feed directly into next.""" chains: List[Chain] input_variables: List[str] output_variables: List[str] #: :meta private: return_all: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Return expected input keys to the chain. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return self.output_variables @root_validator(pre=True) def validate_chains(cls, values: Dict) -> Dict: """Validate that the correct inputs exist for all chains.""" chains = values["chains"] input_variables = values["input_variables"] memory_keys = list() if "memory" in values and values["memory"] is not None: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables if set(input_variables).intersection(set(memory_keys)): overlapping_keys = set(input_variables) & set(memory_keys) raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
52b09dafcfb5-1
overlapping_keys = set(input_variables) & set(memory_keys) raise ValueError( f"The the input key(s) {''.join(overlapping_keys)} are found " f"in the Memory keys ({memory_keys}) - please use input and " f"memory keys that don't overlap." ) known_variables = set(input_variables + memory_keys) for chain in chains: missing_vars = set(chain.input_keys).difference(known_variables) if missing_vars: raise ValueError( f"Missing required input keys: {missing_vars}, " f"only had {known_variables}" ) overlapping_keys = known_variables.intersection(chain.output_keys) if overlapping_keys: raise ValueError( f"Chain returned keys that already exist: {overlapping_keys}" ) known_variables |= set(chain.output_keys) if "output_variables" not in values: if values.get("return_all", False): output_keys = known_variables.difference(input_variables) else: output_keys = chains[-1].output_keys values["output_variables"] = output_keys else: missing_vars = set(values["output_variables"]).difference(known_variables) if missing_vars: raise ValueError( f"Expected output variables that were not found: {missing_vars}." ) return values def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: known_values = inputs.copy() _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() for i, chain in enumerate(self.chains):
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
52b09dafcfb5-2
for i, chain in enumerate(self.chains): callbacks = _run_manager.get_child() outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: known_values = inputs.copy() _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() for i, chain in enumerate(self.chains): outputs = await chain.acall( known_values, return_only_outputs=True, callbacks=callbacks ) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} [docs]class SimpleSequentialChain(Chain): """Simple chain where the outputs of one step feed directly into next.""" chains: List[Chain] strip_outputs: bool = False input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] @root_validator()
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
52b09dafcfb5-3
""" return [self.output_key] @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that chains are all single input/output.""" for chain in values["chains"]: if len(chain.input_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one input, got " f"{chain} with {len(chain.input_keys)} inputs." ) if len(chain.output_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one output, got " f"{chain} with {len(chain.output_keys)} outputs." ) return values def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.chains): _input = chain.run(_input, callbacks=_run_manager.get_child(f"step_{i+1}")) if self.strip_outputs: _input = _input.strip() _run_manager.on_text( _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose ) return {self.output_key: _input} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
52b09dafcfb5-4
) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.chains): _input = await chain.arun(_input, callbacks=callbacks) if self.strip_outputs: _input = _input.strip() await _run_manager.on_text( _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose ) return {self.output_key: _input}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sequential.html
b7ec985f7d8b-0
Source code for langchain.chains.qa_with_sources.vector_db """Question-answering with sources over a vector database.""" import warnings from typing import Any, Dict, List from pydantic import Field, root_validator from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain from langchain.docstore.document import Document from langchain.vectorstores.base import VectorStore [docs]class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain): """Question-answering with sources over a vector database.""" vectorstore: VectorStore = Field(exclude=True) """Vector Database to connect to.""" k: int = 4 """Number of results to return from store""" reduce_k_below_max_tokens: bool = False """Reduce the number of results to return from store based on tokens limit""" max_tokens_limit: int = 3375 """Restrict the docs to return from store based on tokens, enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs) if self.reduce_k_below_max_tokens and isinstance( self.combine_documents_chain, StuffDocumentsChain ): tokens = [ self.combine_documents_chain.llm_chain.llm.get_num_tokens( doc.page_content ) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs]
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html
b7ec985f7d8b-1
num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: question = inputs[self.question_key] docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs ) return self._reduce_tokens_below_limit(docs) async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]: raise NotImplementedError("VectorDBQAWithSourcesChain does not support async") @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQAWithSourcesChain` is deprecated - " "please use `from langchain.chains import RetrievalQAWithSourcesChain`" ) return values @property def _chain_type(self) -> str: return "vector_db_qa_with_sources_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/vector_db.html
925b1a23e923-0
Source code for langchain.chains.qa_with_sources.retrieval """Question-answering with sources over an index.""" from typing import Any, Dict, List from pydantic import Field from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain): """Question-answering with sources over an index.""" retriever: BaseRetriever = Field(exclude=True) """Index to connect to.""" reduce_k_below_max_tokens: bool = False """Reduce the number of results to return from store based on tokens limit""" max_tokens_limit: int = 3375 """Restrict the docs to return from store based on tokens, enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs) if self.reduce_k_below_max_tokens and isinstance( self.combine_documents_chain, StuffDocumentsChain ): tokens = [ self.combine_documents_chain.llm_chain.llm.get_num_tokens( doc.page_content ) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: question = inputs[self.question_key] docs = self.retriever.get_relevant_documents(question)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html
925b1a23e923-1
docs = self.retriever.get_relevant_documents(question) return self._reduce_tokens_below_limit(docs) async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]: question = inputs[self.question_key] docs = await self.retriever.aget_relevant_documents(question) return self._reduce_tokens_below_limit(docs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/retrieval.html
9280b413b11f-0
Source code for langchain.chains.qa_with_sources.base """Question answering with sources over documents.""" from __future__ import annotations import re from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain from langchain.chains.qa_with_sources.map_reduce_prompt import ( COMBINE_PROMPT, EXAMPLE_PROMPT, QUESTION_PROMPT, ) from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate class BaseQAWithSourcesChain(Chain, ABC): """Question answering with sources over documents.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" question_key: str = "question" #: :meta private: input_docs_key: str = "docs" #: :meta private: answer_key: str = "answer" #: :meta private: sources_answer_key: str = "sources" #: :meta private: return_source_documents: bool = False """Return the source documents.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, document_prompt: BasePromptTemplate = EXAMPLE_PROMPT,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html
9280b413b11f-1
document_prompt: BasePromptTemplate = EXAMPLE_PROMPT, question_prompt: BasePromptTemplate = QUESTION_PROMPT, combine_prompt: BasePromptTemplate = COMBINE_PROMPT, **kwargs: Any, ) -> BaseQAWithSourcesChain: """Construct the chain from an LLM.""" llm_question_chain = LLMChain(llm=llm, prompt=question_prompt) llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt) combine_results_chain = StuffDocumentsChain( llm_chain=llm_combine_chain, document_prompt=document_prompt, document_variable_name="summaries", ) combine_document_chain = MapReduceDocumentsChain( llm_chain=llm_question_chain, combine_document_chain=combine_results_chain, document_variable_name="context", ) return cls( combine_documents_chain=combine_document_chain, **kwargs, ) @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseQAWithSourcesChain: """Load chain from chain type.""" _chain_kwargs = chain_type_kwargs or {} combine_document_chain = load_qa_with_sources_chain( llm, chain_type=chain_type, **_chain_kwargs ) return cls(combine_documents_chain=combine_document_chain, **kwargs) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html
9280b413b11f-2
def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.question_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ _output_keys = [self.answer_key, self.sources_answer_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @root_validator(pre=True) def validate_naming(cls, values: Dict) -> Dict: """Fix backwards compatability in naming.""" if "combine_document_chain" in values: values["combine_documents_chain"] = values.pop("combine_document_chain") return values @abstractmethod def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: """Get docs to run questioning over.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() docs = self._get_docs(inputs) answer = self.combine_documents_chain.run( input_documents=docs, callbacks=_run_manager.get_child(), **inputs ) if re.search(r"SOURCES:\s", answer): answer, sources = re.split(r"SOURCES:\s", answer) else: sources = "" result: Dict[str, Any] = { self.answer_key: answer, self.sources_answer_key: sources, } if self.return_source_documents: result["source_documents"] = docs
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html
9280b413b11f-3
} if self.return_source_documents: result["source_documents"] = docs return result @abstractmethod async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]: """Get docs to run questioning over.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() docs = await self._aget_docs(inputs) answer = await self.combine_documents_chain.arun( input_documents=docs, callbacks=_run_manager.get_child(), **inputs ) if re.search(r"SOURCES:\s", answer): answer, sources = re.split(r"SOURCES:\s", answer) else: sources = "" result: Dict[str, Any] = { self.answer_key: answer, self.sources_answer_key: sources, } if self.return_source_documents: result["source_documents"] = docs return result [docs]class QAWithSourcesChain(BaseQAWithSourcesChain): """Question answering with sources over documents.""" input_docs_key: str = "docs" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_docs_key, self.question_key] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: return inputs.pop(self.input_docs_key) async def _aget_docs(self, inputs: Dict[str, Any]) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html
9280b413b11f-4
return inputs.pop(self.input_docs_key) @property def _chain_type(self) -> str: return "qa_with_sources_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_with_sources/base.html
dad61f3c8bfb-0
Source code for langchain.chains.flare.base from __future__ import annotations import re from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple import numpy as np from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.flare.prompts import ( PROMPT, QUESTION_GENERATOR_PROMPT, FinishedOutputParser, ) from langchain.chains.llm import LLMChain from langchain.llms import OpenAI from langchain.prompts import BasePromptTemplate from langchain.schema import BaseRetriever, Generation class _ResponseChain(LLMChain): prompt: BasePromptTemplate = PROMPT @property def input_keys(self) -> List[str]: return self.prompt.input_variables def generate_tokens_and_log_probs( self, _input: Dict[str, Any], *, run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Tuple[Sequence[str], Sequence[float]]: llm_result = self.generate([_input], run_manager=run_manager) return self._extract_tokens_and_log_probs(llm_result.generations[0]) @abstractmethod def _extract_tokens_and_log_probs( self, generations: List[Generation] ) -> Tuple[Sequence[str], Sequence[float]]: """Extract tokens and log probs from response.""" class _OpenAIResponseChain(_ResponseChain): llm: OpenAI = Field( default_factory=lambda: OpenAI( max_tokens=32, model_kwargs={"logprobs": 1}, temperature=0 )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
dad61f3c8bfb-1
) ) def _extract_tokens_and_log_probs( self, generations: List[Generation] ) -> Tuple[Sequence[str], Sequence[float]]: tokens = [] log_probs = [] for gen in generations: if gen.generation_info is None: raise ValueError tokens.extend(gen.generation_info["logprobs"]["tokens"]) log_probs.extend(gen.generation_info["logprobs"]["token_logprobs"]) return tokens, log_probs class QuestionGeneratorChain(LLMChain): prompt: BasePromptTemplate = QUESTION_GENERATOR_PROMPT @property def input_keys(self) -> List[str]: return ["user_input", "context", "response"] def _low_confidence_spans( tokens: Sequence[str], log_probs: Sequence[float], min_prob: float, min_token_gap: int, num_pad_tokens: int, ) -> List[str]: _low_idx = np.where(np.exp(log_probs) < min_prob)[0] low_idx = [i for i in _low_idx if re.search(r"\w", tokens[i])] if len(low_idx) == 0: return [] spans = [[low_idx[0], low_idx[0] + num_pad_tokens + 1]] for i, idx in enumerate(low_idx[1:]): end = idx + num_pad_tokens + 1 if idx - low_idx[i] < min_token_gap: spans[-1][1] = end else: spans.append([idx, end]) return ["".join(tokens[start:end]) for start, end in spans] [docs]class FlareChain(Chain): question_generator_chain: QuestionGeneratorChain
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
dad61f3c8bfb-2
[docs]class FlareChain(Chain): question_generator_chain: QuestionGeneratorChain response_chain: _ResponseChain = Field(default_factory=_OpenAIResponseChain) output_parser: FinishedOutputParser = Field(default_factory=FinishedOutputParser) retriever: BaseRetriever min_prob: float = 0.2 min_token_gap: int = 5 num_pad_tokens: int = 2 max_iter: int = 10 start_with_retrieval: bool = True @property def input_keys(self) -> List[str]: return ["user_input"] @property def output_keys(self) -> List[str]: return ["response"] def _do_generation( self, questions: List[str], user_input: str, response: str, _run_manager: CallbackManagerForChainRun, ) -> Tuple[str, bool]: callbacks = _run_manager.get_child() docs = [] for question in questions: docs.extend(self.retriever.get_relevant_documents(question)) context = "\n\n".join(d.page_content for d in docs) result = self.response_chain.predict( user_input=user_input, context=context, response=response, callbacks=callbacks, ) marginal, finished = self.output_parser.parse(result) return marginal, finished def _do_retrieval( self, low_confidence_spans: List[str], _run_manager: CallbackManagerForChainRun, user_input: str, response: str, initial_response: str, ) -> Tuple[str, bool]: question_gen_inputs = [ { "user_input": user_input,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
dad61f3c8bfb-3
question_gen_inputs = [ { "user_input": user_input, "current_response": initial_response, "uncertain_span": span, } for span in low_confidence_spans ] callbacks = _run_manager.get_child() question_gen_outputs = self.question_generator_chain.apply( question_gen_inputs, callbacks=callbacks ) questions = [ output[self.question_generator_chain.output_keys[0]] for output in question_gen_outputs ] _run_manager.on_text( f"Generated Questions: {questions}", color="yellow", end="\n" ) return self._do_generation(questions, user_input, response, _run_manager) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() user_input = inputs[self.input_keys[0]] response = "" for i in range(self.max_iter): _run_manager.on_text( f"Current Response: {response}", color="blue", end="\n" ) _input = {"user_input": user_input, "context": "", "response": response} tokens, log_probs = self.response_chain.generate_tokens_and_log_probs( _input, run_manager=_run_manager ) low_confidence_spans = _low_confidence_spans( tokens, log_probs, self.min_prob, self.min_token_gap, self.num_pad_tokens, ) initial_response = response.strip() + " " + "".join(tokens)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
dad61f3c8bfb-4
) initial_response = response.strip() + " " + "".join(tokens) if not low_confidence_spans: response = initial_response final_response, finished = self.output_parser.parse(response) if finished: return {self.output_keys[0]: final_response} continue marginal, finished = self._do_retrieval( low_confidence_spans, _run_manager, user_input, response, initial_response, ) response = response.strip() + " " + marginal if finished: break return {self.output_keys[0]: response} [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, max_generation_len: int = 32, **kwargs: Any ) -> FlareChain: question_gen_chain = QuestionGeneratorChain(llm=llm) response_llm = OpenAI( max_tokens=max_generation_len, model_kwargs={"logprobs": 1}, temperature=0 ) response_chain = _OpenAIResponseChain(llm=response_llm) return cls( question_generator_chain=question_gen_chain, response_chain=response_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/flare/base.html
aa3b194ad889-0
Source code for langchain.chains.natbot.base """Implement an LLM driven browser.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.natbot.prompt import PROMPT from langchain.llms.openai import OpenAI [docs]class NatBotChain(Chain): """Implement an LLM driven browser. Example: .. code-block:: python from langchain import NatBotChain natbot = NatBotChain.from_default("Buy me a new hat.") """ llm_chain: LLMChain objective: str """Objective that NatBot is tasked with completing.""" llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" input_url_key: str = "url" #: :meta private: input_browser_content_key: str = "browser_content" #: :meta private: previous_command: str = "" #: :meta private: output_key: str = "command" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an NatBotChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm "
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
aa3b194ad889-1
"Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: values["llm_chain"] = LLMChain(llm=values["llm"], prompt=PROMPT) return values [docs] @classmethod def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain: """Load with default LLMChain.""" llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50) return cls.from_llm(llm, objective, **kwargs) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, objective: str, **kwargs: Any ) -> NatBotChain: """Load from LLM.""" llm_chain = LLMChain(llm=llm, prompt=PROMPT) return cls(llm_chain=llm_chain, objective=objective, **kwargs) @property def input_keys(self) -> List[str]: """Expect url and browser content. :meta private: """ return [self.input_url_key, self.input_browser_content_key] @property def output_keys(self) -> List[str]: """Return command. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
aa3b194ad889-2
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() url = inputs[self.input_url_key] browser_content = inputs[self.input_browser_content_key] llm_cmd = self.llm_chain.predict( objective=self.objective, url=url[:100], previous_command=self.previous_command, browser_content=browser_content[:4500], callbacks=_run_manager.get_child(), ) llm_cmd = llm_cmd.strip() self.previous_command = llm_cmd return {self.output_key: llm_cmd} [docs] def execute(self, url: str, browser_content: str) -> str: """Figure out next browser command to run. Args: url: URL of the site currently on. browser_content: Content of the page as currently displayed by the browser. Returns: Next browser command to run. Example: .. code-block:: python browser_content = "...." llm_command = natbot.run("www.google.com", browser_content) """ _inputs = { self.input_url_key: url, self.input_browser_content_key: browser_content, } return self(_inputs)[self.output_key] @property def _chain_type(self) -> str: return "nat_bot_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/natbot/base.html
5b1ec2318380-0
Source code for langchain.chains.hyde.base """Hypothetical Document Embeddings. https://arxiv.org/abs/2212.10496 """ from __future__ import annotations from typing import Any, Dict, List, Optional import numpy as np from pydantic import Extra from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.hyde.prompts import PROMPT_MAP from langchain.chains.llm import LLMChain from langchain.embeddings.base import Embeddings [docs]class HypotheticalDocumentEmbedder(Chain, Embeddings): """Generate hypothetical document for query, and then embed that. Based on https://arxiv.org/abs/2212.10496 """ base_embeddings: Embeddings llm_chain: LLMChain class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Input keys for Hyde's LLM chain.""" return self.llm_chain.input_keys @property def output_keys(self) -> List[str]: """Output keys for Hyde's LLM chain.""" return self.llm_chain.output_keys [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call the base embeddings.""" return self.base_embeddings.embed_documents(texts) [docs] def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]: """Combine embeddings into final embeddings.""" return list(np.array(embeddings).mean(axis=0))
https://api.python.langchain.com/en/latest/_modules/langchain/chains/hyde/base.html
5b1ec2318380-1
return list(np.array(embeddings).mean(axis=0)) [docs] def embed_query(self, text: str) -> List[float]: """Generate a hypothetical document and embedded it.""" var_name = self.llm_chain.input_keys[0] result = self.llm_chain.generate([{var_name: text}]) documents = [generation.text for generation in result.generations[0]] embeddings = self.embed_documents(documents) return self.combine_embeddings(embeddings) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Call the internal llm chain.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() return self.llm_chain(inputs, callbacks=_run_manager.get_child()) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, base_embeddings: Embeddings, prompt_key: str, **kwargs: Any, ) -> HypotheticalDocumentEmbedder: """Load and use LLMChain for a specific prompt key.""" prompt = PROMPT_MAP[prompt_key] llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs) @property def _chain_type(self) -> str: return "hyde_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/hyde/base.html
217673e0e9d1-0
Source code for langchain.chains.sql_database.base """Chain for interacting with SQL Database.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.sql_database import SQLDatabase from langchain.tools.sql_database.prompt import QUERY_CHECKER INTERMEDIATE_STEPS_KEY = "intermediate_steps" [docs]class SQLDatabaseChain(Chain): """Chain for interacting with SQL Database. Example: .. code-block:: python from langchain import SQLDatabaseChain, OpenAI, SQLDatabase db = SQLDatabase(...) db_chain = SQLDatabaseChain.from_llm(OpenAI(), db) """ llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" database: SQLDatabase = Field(exclude=True) """SQL Database to connect to.""" prompt: Optional[BasePromptTemplate] = None """[Deprecated] Prompt to use to translate natural language to SQL.""" top_k: int = 5 """Number of results to return from the query""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
217673e0e9d1-1
return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the SQL table directly.""" use_query_checker: bool = False """Whether or not the query checker tool should be used to attempt to fix the initial SQL from the LLM.""" query_checker_prompt: Optional[BasePromptTemplate] = None """The prompt template that should be used by the query checker""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an SQLDatabaseChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: database = values["database"] prompt = values.get("prompt") or SQL_PROMPTS.get( database.dialect, PROMPT ) values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
217673e0e9d1-2
:meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() input_text = f"{inputs[self.input_key]}\nSQLQuery:" _run_manager.on_text(input_text, verbose=self.verbose) # If not present, then defaults to None which is all tables. table_names_to_use = inputs.get("table_names_to_use") table_info = self.database.get_table_info(table_names=table_names_to_use) llm_inputs = { "input": input_text, "top_k": str(self.top_k), "dialect": self.database.dialect, "table_info": table_info, "stop": ["\nSQLResult:"], } intermediate_steps: List = [] try: intermediate_steps.append(llm_inputs) # input: sql generation sql_cmd = self.llm_chain.predict( callbacks=_run_manager.get_child(), **llm_inputs, ).strip() if not self.use_query_checker: _run_manager.on_text(sql_cmd, color="green", verbose=self.verbose) intermediate_steps.append( sql_cmd ) # output: sql generation (no checker) intermediate_steps.append({"sql_cmd": sql_cmd}) # input: sql exec result = self.database.run(sql_cmd)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
217673e0e9d1-3
result = self.database.run(sql_cmd) intermediate_steps.append(str(result)) # output: sql exec else: query_checker_prompt = self.query_checker_prompt or PromptTemplate( template=QUERY_CHECKER, input_variables=["query", "dialect"] ) query_checker_chain = LLMChain( llm=self.llm_chain.llm, prompt=query_checker_prompt ) query_checker_inputs = { "query": sql_cmd, "dialect": self.database.dialect, } checked_sql_command: str = query_checker_chain.predict( callbacks=_run_manager.get_child(), **query_checker_inputs ).strip() intermediate_steps.append( checked_sql_command ) # output: sql generation (checker) _run_manager.on_text( checked_sql_command, color="green", verbose=self.verbose ) intermediate_steps.append( {"sql_cmd": checked_sql_command} ) # input: sql exec result = self.database.run(checked_sql_command) intermediate_steps.append(str(result)) # output: sql exec sql_cmd = checked_sql_command _run_manager.on_text("\nSQLResult: ", verbose=self.verbose) _run_manager.on_text(result, color="yellow", verbose=self.verbose) # If return direct, we just set the final result equal to # the result of the sql query result, otherwise try to get a human readable # final answer if self.return_direct: final_result = result else: _run_manager.on_text("\nAnswer:", verbose=self.verbose) input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:" llm_inputs["input"] = input_text
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
217673e0e9d1-4
llm_inputs["input"] = input_text intermediate_steps.append(llm_inputs) # input: final answer final_result = self.llm_chain.predict( callbacks=_run_manager.get_child(), **llm_inputs, ).strip() intermediate_steps.append(final_result) # output: final answer _run_manager.on_text(final_result, color="green", verbose=self.verbose) chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result except Exception as exc: # Append intermediate steps to exception, to aid in logging and later # improvement of few shot prompt seeds exc.intermediate_steps = intermediate_steps # type: ignore raise exc @property def _chain_type(self) -> str: return "sql_database_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, db: SQLDatabase, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> SQLDatabaseChain: prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT) llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, database=db, **kwargs) [docs]class SQLDatabaseSequentialChain(Chain): """Chain for querying SQL database that is a sequential chain. The chain is as follows: 1. Based on the query, determine which tables to use. 2. Based on those tables, call the normal SQL database chain.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
217673e0e9d1-5
2. Based on those tables, call the normal SQL database chain. This is useful in cases where the number of tables in the database is large. """ decider_chain: LLMChain sql_chain: SQLDatabaseChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, database: SQLDatabase, query_prompt: BasePromptTemplate = PROMPT, decider_prompt: BasePromptTemplate = DECIDER_PROMPT, **kwargs: Any, ) -> SQLDatabaseSequentialChain: """Load the necessary chains.""" sql_chain = SQLDatabaseChain.from_llm( llm, database, prompt=query_prompt, **kwargs ) decider_chain = LLMChain( llm=llm, prompt=decider_prompt, output_key="table_names" ) return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs) @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY] def _call( self, inputs: Dict[str, Any],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
217673e0e9d1-6
def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _table_names = self.sql_chain.database.get_usable_table_names() table_names = ", ".join(_table_names) llm_inputs = { "query": inputs[self.input_key], "table_names": table_names, } _lowercased_table_names = [name.lower() for name in _table_names] table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs) table_names_to_use = [ name for name in table_names_from_chain if name.lower() in _lowercased_table_names ] _run_manager.on_text("Table names to use:", end="\n", verbose=self.verbose) _run_manager.on_text( str(table_names_to_use), color="yellow", verbose=self.verbose ) new_inputs = { self.sql_chain.input_key: inputs[self.input_key], "table_names_to_use": table_names_to_use, } return self.sql_chain( new_inputs, callbacks=_run_manager.get_child(), return_only_outputs=True ) @property def _chain_type(self) -> str: return "sql_database_sequential_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/sql_database/base.html
51265aa4aa43-0
Source code for langchain.chains.llm_math.base """Chain that interprets a prompt and executes python code to do math.""" from __future__ import annotations import math import re import warnings from typing import Any, Dict, List, Optional import numexpr from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_math.prompt import PROMPT from langchain.prompts.base import BasePromptTemplate [docs]class LLMMathChain(Chain): """Chain that interprets a prompt and executes python code to do math. Example: .. code-block:: python from langchain import LLMMathChain, OpenAI llm_math = LLMMathChain.from_llm(OpenAI()) """ llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" prompt: BasePromptTemplate = PROMPT """[Deprecated] Prompt to use to translate to python if necessary.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
51265aa4aa43-1
if "llm" in values: warnings.warn( "Directly instantiating an LLMMathChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the from_llm " "class method." ) if "llm_chain" not in values and values["llm"] is not None: prompt = values.get("prompt", PROMPT) values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _evaluate_expression(self, expression: str) -> str: try: local_dict = {"pi": math.pi, "e": math.e} output = str( numexpr.evaluate( expression.strip(), global_dict={}, # restrict access to globals local_dict=local_dict, # add common mathematical functions ) ) except Exception as e: raise ValueError( f'LLMMathChain._evaluate("{expression}") raised error: {e}.' " Please try again with a valid numerical expression" ) # Remove any leading and trailing brackets from the output return re.sub(r"^\[|\]$", "", output) def _process_llm_result( self, llm_output: str, run_manager: CallbackManagerForChainRun ) -> Dict[str, str]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
51265aa4aa43-2
) -> Dict[str, str]: run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) run_manager.on_text("\nAnswer: ", verbose=self.verbose) run_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} async def _aprocess_llm_result( self, llm_output: str, run_manager: AsyncCallbackManagerForChainRun, ) -> Dict[str, str]: await run_manager.on_text(llm_output, color="green", verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) await run_manager.on_text("\nAnswer: ", verbose=self.verbose) await run_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif llm_output.startswith("Answer:"): answer = llm_output
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
51265aa4aa43-3
elif llm_output.startswith("Answer:"): answer = llm_output elif "Answer:" in llm_output: answer = "Answer: " + llm_output.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {llm_output}") return {self.output_key: answer} def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key]) llm_output = self.llm_chain.predict( question=inputs[self.input_key], stop=["```output"], callbacks=_run_manager.get_child(), ) return self._process_llm_result(llm_output, _run_manager) async def _acall( self, inputs: Dict[str, str], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() await _run_manager.on_text(inputs[self.input_key]) llm_output = await self.llm_chain.apredict( question=inputs[self.input_key], stop=["```output"], callbacks=_run_manager.get_child(), ) return await self._aprocess_llm_result(llm_output, _run_manager) @property def _chain_type(self) -> str: return "llm_math_chain" [docs] @classmethod def from_llm( cls,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
51265aa4aa43-4
[docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate = PROMPT, **kwargs: Any, ) -> LLMMathChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_math/base.html
dea33b5aa4e7-0
Source code for langchain.chains.llm_bash.base """Chain that interprets a prompt and executes bash code to perform bash operations.""" from __future__ import annotations import logging import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.prompt import PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import OutputParserException from langchain.utilities.bash import BashProcess logger = logging.getLogger(__name__) [docs]class LLMBashChain(Chain): """Chain that interprets a prompt and executes bash code to perform bash operations. Example: .. code-block:: python from langchain import LLMBashChain, OpenAI llm_bash = LLMBashChain.from_llm(OpenAI()) """ llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: prompt: BasePromptTemplate = PROMPT """[Deprecated]""" bash_process: BashProcess = Field(default_factory=BashProcess) #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
dea33b5aa4e7-1
def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an LLMBashChain with an llm is deprecated. " "Please instantiate with llm_chain or using the from_llm class method." ) if "llm_chain" not in values and values["llm"] is not None: prompt = values.get("prompt", PROMPT) values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) return values @root_validator def validate_prompt(cls, values: Dict) -> Dict: if values["llm_chain"].prompt.output_parser is None: raise ValueError( "The prompt used by llm_chain is expected to have an output_parser." ) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key], verbose=self.verbose) t = self.llm_chain.predict( question=inputs[self.input_key], callbacks=_run_manager.get_child() ) _run_manager.on_text(t, color="green", verbose=self.verbose)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
dea33b5aa4e7-2
) _run_manager.on_text(t, color="green", verbose=self.verbose) t = t.strip() try: parser = self.llm_chain.prompt.output_parser command_list = parser.parse(t) # type: ignore[union-attr] except OutputParserException as e: _run_manager.on_chain_error(e, verbose=self.verbose) raise e if self.verbose: _run_manager.on_text("\nCode: ", verbose=self.verbose) _run_manager.on_text( str(command_list), color="yellow", verbose=self.verbose ) output = self.bash_process.run(command_list) _run_manager.on_text("\nAnswer: ", verbose=self.verbose) _run_manager.on_text(output, color="yellow", verbose=self.verbose) return {self.output_key: output} @property def _chain_type(self) -> str: return "llm_bash_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate = PROMPT, **kwargs: Any, ) -> LLMBashChain: llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_bash/base.html
eeda05917898-0
Source code for langchain.chains.qa_generation.base from __future__ import annotations import json from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR from langchain.prompts.base import BasePromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter [docs]class QAGenerationChain(Chain): llm_chain: LLMChain text_splitter: TextSplitter = Field( default=RecursiveCharacterTextSplitter(chunk_overlap=500) ) input_key: str = "text" output_key: str = "questions" k: Optional[int] = None [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> QAGenerationChain: _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) chain = LLMChain(llm=llm, prompt=_prompt) return cls(llm_chain=chain, **kwargs) @property def _chain_type(self) -> str: raise NotImplementedError @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] def _call( self, inputs: Dict[str, Any],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_generation/base.html
eeda05917898-1
def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, List]: docs = self.text_splitter.create_documents([inputs[self.input_key]]) results = self.llm_chain.generate( [{"text": d.page_content} for d in docs], run_manager=run_manager ) qa = [json.loads(res[0].text) for res in results.generations] return {self.output_key: qa}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/qa_generation/base.html
2106479fb6e7-0
Source code for langchain.chains.router.multi_prompt """Use a single chain to route an input to one of multiple llm chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.llm import LLMChain from langchain.chains.router.base import MultiRouteChain, RouterChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE from langchain.prompts import PromptTemplate [docs]class MultiPromptChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst prompts.""" router_chain: RouterChain """Chain for deciding a destination chain and the input to it.""" destination_chains: Mapping[str, LLMChain] """Map of name to candidate chains that inputs can be routed to.""" default_chain: LLMChain """Default chain to use when router doesn't map input to one of the destinations.""" @property def output_keys(self) -> List[str]: return ["text"] [docs] @classmethod def from_prompts( cls, llm: BaseLanguageModel, prompt_infos: List[Dict[str, str]], default_chain: Optional[LLMChain] = None, **kwargs: Any, ) -> MultiPromptChain: """Convenience constructor for instantiating from destination prompts.""" destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos] destinations_str = "\n".join(destinations) router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_prompt.html
2106479fb6e7-1
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(), ) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for p_info in prompt_infos: name = p_info["name"] prompt_template = p_info["prompt_template"] prompt = PromptTemplate(template=prompt_template, input_variables=["input"]) chain = LLMChain(llm=llm, prompt=prompt) destination_chains[name] = chain _default_chain = default_chain or ConversationChain(llm=llm, output_key="text") return cls( router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_prompt.html
25f8972c872c-0
Source code for langchain.chains.router.multi_retrieval_qa """Use a single chain to route an input to one of multiple retrieval qa chains.""" from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.base_language import BaseLanguageModel from langchain.chains import ConversationChain from langchain.chains.base import Chain from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA from langchain.chains.router.base import MultiRouteChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_retrieval_prompt import ( MULTI_RETRIEVAL_ROUTER_TEMPLATE, ) from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever [docs]class MultiRetrievalQAChain(MultiRouteChain): """A multi-route chain that uses an LLM router chain to choose amongst retrieval qa chains.""" router_chain: LLMRouterChain """Chain for deciding a destination chain and the input to it.""" destination_chains: Mapping[str, BaseRetrievalQA] """Map of name to candidate chains that inputs can be routed to.""" default_chain: Chain """Default chain to use when router doesn't map input to one of the destinations.""" @property def output_keys(self) -> List[str]: return ["result"] [docs] @classmethod def from_retrievers( cls, llm: BaseLanguageModel, retriever_infos: List[Dict[str, Any]], default_retriever: Optional[BaseRetriever] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
25f8972c872c-1
default_retriever: Optional[BaseRetriever] = None, default_prompt: Optional[PromptTemplate] = None, default_chain: Optional[Chain] = None, **kwargs: Any, ) -> MultiRetrievalQAChain: if default_prompt and not default_retriever: raise ValueError( "`default_retriever` must be specified if `default_prompt` is " "provided. Received only `default_prompt`." ) destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos] destinations_str = "\n".join(destinations) router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(next_inputs_inner_key="query"), ) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for r_info in retriever_infos: prompt = r_info.get("prompt") retriever = r_info["retriever"] chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever) name = r_info["name"] destination_chains[name] = chain if default_chain: _default_chain = default_chain elif default_retriever: _default_chain = RetrievalQA.from_llm( llm, prompt=default_prompt, retriever=default_retriever ) else: prompt_template = DEFAULT_TEMPLATE.replace("input", "query") prompt = PromptTemplate(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
25f8972c872c-2
prompt = PromptTemplate( template=prompt_template, input_variables=["history", "query"] ) _default_chain = ConversationChain( llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result" ) return cls( router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/multi_retrieval_qa.html
ae5e79cc289c-0
Source code for langchain.chains.router.base """Base classes for chain routing.""" from __future__ import annotations from abc import ABC from typing import Any, Dict, List, Mapping, NamedTuple, Optional from pydantic import Extra from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain class Route(NamedTuple): destination: Optional[str] next_inputs: Dict[str, Any] [docs]class RouterChain(Chain, ABC): """Chain that outputs the name of a destination chain and the inputs to it.""" @property def output_keys(self) -> List[str]: return ["destination", "next_inputs"] [docs] def route(self, inputs: Dict[str, Any], callbacks: Callbacks = None) -> Route: result = self(inputs, callbacks=callbacks) return Route(result["destination"], result["next_inputs"]) [docs] async def aroute( self, inputs: Dict[str, Any], callbacks: Callbacks = None ) -> Route: result = await self.acall(inputs, callbacks=callbacks) return Route(result["destination"], result["next_inputs"]) [docs]class MultiRouteChain(Chain): """Use a single chain to route an input to one of multiple candidate chains.""" router_chain: RouterChain """Chain that routes inputs to destination chains.""" destination_chains: Mapping[str, Chain] """Chains that return final answer to inputs.""" default_chain: Chain """Default chain to use when none of the destination chains are suitable.""" silent_errors: bool = False """If True, use default_chain when an invalid destination name is provided.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
ae5e79cc289c-1
"""If True, use default_chain when an invalid destination name is provided. Defaults to False.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the router chain prompt expects. :meta private: """ return self.router_chain.input_keys @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() route = self.router_chain.route(inputs, callbacks=callbacks) _run_manager.on_text( str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose ) if not route.destination: return self.default_chain(route.next_inputs, callbacks=callbacks) elif route.destination in self.destination_chains: return self.destination_chains[route.destination]( route.next_inputs, callbacks=callbacks ) elif self.silent_errors: return self.default_chain(route.next_inputs, callbacks=callbacks) else: raise ValueError( f"Received invalid destination chain name '{route.destination}'" ) async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
ae5e79cc289c-2
run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() route = await self.router_chain.aroute(inputs, callbacks=callbacks) _run_manager.on_text( str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose ) if not route.destination: return await self.default_chain.acall( route.next_inputs, callbacks=callbacks ) elif route.destination in self.destination_chains: return await self.destination_chains[route.destination].acall( route.next_inputs, callbacks=callbacks ) elif self.silent_errors: return await self.default_chain.acall( route.next_inputs, callbacks=callbacks ) else: raise ValueError( f"Received invalid destination chain name '{route.destination}'" )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/base.html
4cae6545ff24-0
Source code for langchain.chains.router.llm_router """Base classes for LLM-powered router chains.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type, cast from pydantic import root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains import LLMChain from langchain.chains.router.base import RouterChain from langchain.output_parsers.json import parse_and_check_json_markdown from langchain.prompts import BasePromptTemplate from langchain.schema import BaseOutputParser, OutputParserException [docs]class LLMRouterChain(RouterChain): """A router chain that uses an LLM chain to perform routing.""" llm_chain: LLMChain """LLM chain used to perform routing""" @root_validator() def validate_prompt(cls, values: dict) -> dict: prompt = values["llm_chain"].prompt if prompt.output_parser is None: raise ValueError( "LLMRouterChain requires base llm_chain prompt to have an output" " parser that converts LLM text output to a dictionary with keys" " 'destination' and 'next_inputs'. Received a prompt with no output" " parser." ) return values @property def input_keys(self) -> List[str]: """Will be whatever keys the LLM chain prompt expects. :meta private: """ return self.llm_chain.input_keys def _validate_outputs(self, outputs: Dict[str, Any]) -> None: super()._validate_outputs(outputs) if not isinstance(outputs["next_inputs"], dict): raise ValueError def _call(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
4cae6545ff24-1
raise ValueError def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() output = cast( Dict[str, Any], self.llm_chain.predict_and_parse(callbacks=callbacks, **inputs), ) return output async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() output = cast( Dict[str, Any], await self.llm_chain.apredict_and_parse(callbacks=callbacks, **inputs), ) return output [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **kwargs: Any ) -> LLMRouterChain: """Convenience constructor.""" llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, **kwargs) class RouterOutputParser(BaseOutputParser[Dict[str, str]]): """Parser for output of router chain int he multi-prompt chain.""" default_destination: str = "DEFAULT" next_inputs_type: Type = str next_inputs_inner_key: str = "input" def parse(self, text: str) -> Dict[str, Any]: try:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
4cae6545ff24-2
def parse(self, text: str) -> Dict[str, Any]: try: expected_keys = ["destination", "next_inputs"] parsed = parse_and_check_json_markdown(text, expected_keys) if not isinstance(parsed["destination"], str): raise ValueError("Expected 'destination' to be a string.") if not isinstance(parsed["next_inputs"], self.next_inputs_type): raise ValueError( f"Expected 'next_inputs' to be {self.next_inputs_type}." ) parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]} if ( parsed["destination"].strip().lower() == self.default_destination.lower() ): parsed["destination"] = None else: parsed["destination"] = parsed["destination"].strip() return parsed except Exception as e: raise OutputParserException( f"Parsing text\n{text}\n raised following error:\n{e}" )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/router/llm_router.html
eb024c8fdfbb-0
Source code for langchain.chains.conversational_retrieval.base """Chain for chatting with a vector database.""" from __future__ import annotations import warnings from abc import abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, Callbacks, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMessage, BaseRetriever, Document from langchain.vectorstores.base import VectorStore # Depending on the memory type and configuration, the chat history format may differ. # This needs to be consolidated. CHAT_TURN_TYPE = Union[Tuple[str, str], BaseMessage] _ROLE_MAP = {"human": "Human: ", "ai": "Assistant: "} def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str: buffer = "" for dialogue_turn in chat_history: if isinstance(dialogue_turn, BaseMessage): role_prefix = _ROLE_MAP.get(dialogue_turn.type, f"{dialogue_turn.type}: ") buffer += f"\n{role_prefix}{dialogue_turn.content}" elif isinstance(dialogue_turn, tuple): human = "Human: " + dialogue_turn[0]
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
eb024c8fdfbb-1
human = "Human: " + dialogue_turn[0] ai = "Assistant: " + dialogue_turn[1] buffer += "\n" + "\n".join([human, ai]) else: raise ValueError( f"Unsupported chat history format: {type(dialogue_turn)}." f" Full chat history: {chat_history} " ) return buffer class BaseConversationalRetrievalChain(Chain): """Chain for chatting with an index.""" combine_docs_chain: BaseCombineDocumentsChain question_generator: LLMChain output_key: str = "answer" return_source_documents: bool = False return_generated_question: bool = False get_chat_history: Optional[Callable[[CHAT_TURN_TYPE], str]] = None """Return the source documents.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Input keys.""" return ["question", "chat_history"] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] if self.return_generated_question: _output_keys = _output_keys + ["generated_question"] return _output_keys @abstractmethod def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: """Get docs.""" def _call( self,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
eb024c8fdfbb-2
"""Get docs.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: callbacks = _run_manager.get_child() new_question = self.question_generator.run( question=question, chat_history=chat_history_str, callbacks=callbacks ) else: new_question = question docs = self._get_docs(new_question, inputs) new_inputs = inputs.copy() new_inputs["question"] = new_question new_inputs["chat_history"] = chat_history_str answer = self.combine_docs_chain.run( input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs ) output: Dict[str, Any] = {self.output_key: answer} if self.return_source_documents: output["source_documents"] = docs if self.return_generated_question: output["generated_question"] = new_question return output @abstractmethod async def _aget_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: """Get docs.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs["question"]
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
eb024c8fdfbb-3
question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: callbacks = _run_manager.get_child() new_question = await self.question_generator.arun( question=question, chat_history=chat_history_str, callbacks=callbacks ) else: new_question = question docs = await self._aget_docs(new_question, inputs) new_inputs = inputs.copy() new_inputs["question"] = new_question new_inputs["chat_history"] = chat_history_str answer = await self.combine_docs_chain.arun( input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs ) output: Dict[str, Any] = {self.output_key: answer} if self.return_source_documents: output["source_documents"] = docs if self.return_generated_question: output["generated_question"] = new_question return output def save(self, file_path: Union[Path, str]) -> None: if self.get_chat_history: raise ValueError("Chain not savable when `get_chat_history` is not None.") super().save(file_path) [docs]class ConversationalRetrievalChain(BaseConversationalRetrievalChain): """Chain for chatting with an index.""" retriever: BaseRetriever """Index to connect to.""" max_tokens_limit: Optional[int] = None """If set, restricts the docs to return from store based on tokens, enforced only for StuffDocumentChain""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
eb024c8fdfbb-4
num_docs = len(docs) if self.max_tokens_limit and isinstance( self.combine_docs_chain, StuffDocumentsChain ): tokens = [ self.combine_docs_chain.llm_chain.llm.get_num_tokens(doc.page_content) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: docs = self.retriever.get_relevant_documents(question) return self._reduce_tokens_below_limit(docs) async def _aget_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: docs = await self.retriever.aget_relevant_documents(question) return self._reduce_tokens_below_limit(docs) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, retriever: BaseRetriever, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, chain_type: str = "stuff", verbose: bool = False, condense_question_llm: Optional[BaseLanguageModel] = None, combine_docs_chain_kwargs: Optional[Dict] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseConversationalRetrievalChain: """Load chain from LLM.""" combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} doc_chain = load_qa_chain( llm, chain_type=chain_type, verbose=verbose, callbacks=callbacks,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
eb024c8fdfbb-5
chain_type=chain_type, verbose=verbose, callbacks=callbacks, **combine_docs_chain_kwargs, ) _llm = condense_question_llm or llm condense_question_chain = LLMChain( llm=_llm, prompt=condense_question_prompt, verbose=verbose, callbacks=callbacks, ) return cls( retriever=retriever, combine_docs_chain=doc_chain, question_generator=condense_question_chain, callbacks=callbacks, **kwargs, ) [docs]class ChatVectorDBChain(BaseConversationalRetrievalChain): """Chain for chatting with a vector database.""" vectorstore: VectorStore = Field(alias="vectorstore") top_k_docs_for_context: int = 4 search_kwargs: dict = Field(default_factory=dict) @property def _chain_type(self) -> str: return "chat-vector-db" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`ChatVectorDBChain` is deprecated - " "please use `from langchain.chains import ConversationalRetrievalChain`" ) return values def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: vectordbkwargs = inputs.get("vectordbkwargs", {}) full_kwargs = {**self.search_kwargs, **vectordbkwargs} return self.vectorstore.similarity_search( question, k=self.top_k_docs_for_context, **full_kwargs ) async def _aget_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
eb024c8fdfbb-6
raise NotImplementedError("ChatVectorDBChain does not support async") [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, chain_type: str = "stuff", combine_docs_chain_kwargs: Optional[Dict] = None, callbacks: Callbacks = None, **kwargs: Any, ) -> BaseConversationalRetrievalChain: """Load chain from LLM.""" combine_docs_chain_kwargs = combine_docs_chain_kwargs or {} doc_chain = load_qa_chain( llm, chain_type=chain_type, callbacks=callbacks, **combine_docs_chain_kwargs, ) condense_question_chain = LLMChain( llm=llm, prompt=condense_question_prompt, callbacks=callbacks ) return cls( vectorstore=vectorstore, combine_docs_chain=doc_chain, question_generator=condense_question_chain, callbacks=callbacks, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversational_retrieval/base.html
b70822a03554-0
Source code for langchain.chains.conversation.base """Chain that carries on a conversation and calls an LLM.""" from typing import Dict, List from pydantic import Extra, Field, root_validator from langchain.chains.conversation.prompt import PROMPT from langchain.chains.llm import LLMChain from langchain.memory.buffer import ConversationBufferMemory from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMemory [docs]class ConversationChain(LLMChain): """Chain to have a conversation and load context from memory. Example: .. code-block:: python from langchain import ConversationChain, OpenAI conversation = ConversationChain(llm=OpenAI()) """ memory: BaseMemory = Field(default_factory=ConversationBufferMemory) """Default memory store.""" prompt: BasePromptTemplate = PROMPT """Default conversation prompt to use.""" input_key: str = "input" #: :meta private: output_key: str = "response" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Use this since so some prompt vars come from history.""" return [self.input_key] @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables input_key = values["input_key"] if input_key in memory_keys: raise ValueError( f"The input key {input_key} was also found in the memory keys "
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html
b70822a03554-1
f"The input key {input_key} was also found in the memory keys " f"({memory_keys}) - please provide keys that don't overlap." ) prompt_variables = values["prompt"].input_variables expected_keys = memory_keys + [input_key] if set(expected_keys) != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but got {memory_keys} as inputs from " f"memory, and {input_key} as the normal input key." ) return values
https://api.python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html
d93acfb16879-0
Source code for langchain.chains.graph_qa.cypher """Question answering over a graph.""" from __future__ import annotations import re from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.neo4j_graph import Neo4jGraph from langchain.prompts.base import BasePromptTemplate INTERMEDIATE_STEPS_KEY = "intermediate_steps" def extract_cypher(text: str) -> str: """ Extract Cypher code from a text. Args: text: Text to extract Cypher code from. Returns: Cypher code extracted from the text. """ # The pattern to find Cypher code enclosed in triple backticks pattern = r"```(.*?)```" # Find all matches in the input text matches = re.findall(pattern, text, re.DOTALL) return matches[0] if matches else text [docs]class GraphCypherQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements.""" graph: Neo4jGraph = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: top_k: int = 10 """Number of results to return from the query"""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/cypher.html
d93acfb16879-1
"""Number of results to return from the query""" return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the graph directly.""" @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys @property def _chain_type(self) -> str: return "graph_cypher_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate = CYPHER_GENERATION_PROMPT, **kwargs: Any, ) -> GraphCypherQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt) return cls( qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/cypher.html
d93acfb16879-2
) -> Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) # Extract Cypher code if it is wrapped in backticks generated_cypher = extract_cypher(generated_cypher) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"query": generated_cypher}) # Retrieve and limit the number of results context = self.graph.query(generated_cypher)[: self.top_k] if self.return_direct: final_result = context else: _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) intermediate_steps.append({"context": context}) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/cypher.html
7e9c79648dc7-0
Source code for langchain.chains.graph_qa.nebulagraph """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, NGQL_GENERATION_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.nebula_graph import NebulaGraph from langchain.prompts.base import BasePromptTemplate [docs]class NebulaGraphQAChain(Chain): """Chain for question-answering against a graph by generating nGQL statements.""" graph: NebulaGraph = Field(exclude=True) ngql_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, ngql_prompt: BasePromptTemplate = NGQL_GENERATION_PROMPT, **kwargs: Any,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/nebulagraph.html
7e9c79648dc7-1
**kwargs: Any, ) -> NebulaGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) ngql_generation_chain = LLMChain(llm=llm, prompt=ngql_prompt) return cls( qa_chain=qa_chain, ngql_generation_chain=ngql_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate nGQL statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_ngql = self.ngql_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated nGQL:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_ngql, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_ngql) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/nebulagraph.html
9b0bec7b1ed5-0
Source code for langchain.chains.graph_qa.base """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ENTITY_EXTRACTION_PROMPT, PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, get_entities from langchain.prompts.base import BasePromptTemplate [docs]class GraphQAChain(Chain): """Chain for question-answering against a graph.""" graph: NetworkxEntityGraph = Field(exclude=True) entity_extraction_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, qa_prompt: BasePromptTemplate = PROMPT, entity_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT, **kwargs: Any, ) -> GraphQAChain: """Initialize from LLM."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/base.html
9b0bec7b1ed5-1
) -> GraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) entity_chain = LLMChain(llm=llm, prompt=entity_prompt) return cls( qa_chain=qa_chain, entity_extraction_chain=entity_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Extract entities, look up info and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] entity_string = self.entity_extraction_chain.run(question) _run_manager.on_text("Entities Extracted:", end="\n", verbose=self.verbose) _run_manager.on_text( entity_string, color="green", end="\n", verbose=self.verbose ) entities = get_entities(entity_string) context = "" for entity in entities: triplets = self.graph.get_entity_knowledge(entity) context += "\n".join(triplets) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text(context, color="green", end="\n", verbose=self.verbose) result = self.qa_chain( {"question": question, "context": context}, callbacks=_run_manager.get_child(), ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/base.html
58db5ec6b7d3-0
Source code for langchain.chains.graph_qa.kuzu """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, KUZU_GENERATION_PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.kuzu_graph import KuzuGraph from langchain.prompts.base import BasePromptTemplate [docs]class KuzuQAChain(Chain): """Chain for question-answering against a graph by generating Cypher statements for Kùzu. """ graph: KuzuGraph = Field(exclude=True) cypher_generation_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate = CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate = KUZU_GENERATION_PROMPT,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/kuzu.html
58db5ec6b7d3-1
cypher_prompt: BasePromptTemplate = KUZU_GENERATION_PROMPT, **kwargs: Any, ) -> KuzuQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt) return cls( qa_chain=qa_chain, cypher_generation_chain=cypher_generation_chain, **kwargs, ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) _run_manager.on_text("Generated Cypher:", end="\n", verbose=self.verbose) _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) context = self.graph.query(generated_cypher) _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) _run_manager.on_text( str(context), color="green", end="\n", verbose=self.verbose ) result = self.qa_chain( {"question": question, "context": context}, callbacks=callbacks, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/kuzu.html
58db5ec6b7d3-2
callbacks=callbacks, ) return {self.output_key: result[self.qa_chain.output_key]}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/graph_qa/kuzu.html
810c04452234-0
Source code for langchain.chains.openai_functions.qa_with_structure from typing import Any, List, Optional, Type, Union from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import get_llm_kwargs from langchain.output_parsers.openai_functions import ( OutputFunctionsParser, PydanticOutputFunctionsParser, ) from langchain.prompts import PromptTemplate from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.schema import BaseLLMOutputParser, HumanMessage, SystemMessage class AnswerWithSources(BaseModel): """An answer to the question being asked, with sources.""" answer: str = Field(..., description="Answer to the question that was asked") sources: List[str] = Field( ..., description="List of sources used to answer the question" ) [docs]def create_qa_with_structure_chain( llm: BaseLanguageModel, schema: Union[dict, Type[BaseModel]], output_parser: str = "base", prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None, ) -> LLMChain: """Create a question answering chain that returns an answer with sources. Args: llm: Language model to use for the chain. schema: Pydantic schema to use for the output. output_parser: Output parser to use. Should be one of `pydantic` or `base`. Default to `base`. prompt: Optional prompt to use for the chain. Returns: """ if output_parser == "pydantic":
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/qa_with_structure.html