id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
810c04452234-1
Returns: """ if output_parser == "pydantic": if not (isinstance(schema, type) and issubclass(schema, BaseModel)): raise ValueError( "Must provide a pydantic class for schema when output_parser is " "'pydantic'." ) _output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser( pydantic_schema=schema ) elif output_parser == "base": _output_parser = OutputFunctionsParser() else: raise ValueError( f"Got unexpected output_parser: {output_parser}. " f"Should be one of `pydantic` or `base`." ) if isinstance(schema, type) and issubclass(schema, BaseModel): schema_dict = schema.schema() else: schema_dict = schema function = { "name": schema_dict["title"], "description": schema_dict["description"], "parameters": schema_dict, } llm_kwargs = get_llm_kwargs(function) messages = [ SystemMessage( content=( "You are a world class algorithm to answer " "questions in a specific format." ) ), HumanMessage(content="Answer question using the following context"), HumanMessagePromptTemplate.from_template("{context}"), HumanMessagePromptTemplate.from_template("Question: {question}"), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] prompt = prompt or ChatPromptTemplate(messages=messages) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=_output_parser, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/qa_with_structure.html
810c04452234-2
output_parser=_output_parser, ) return chain [docs]def create_qa_with_sources_chain(llm: BaseLanguageModel, **kwargs: Any) -> LLMChain: """Create a question answering chain that returns an answer with sources. Args: llm: Language model to use for the chain. **kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`. Returns: Chain (LLMChain) that can be used to answer questions with citations. """ return create_qa_with_structure_chain(llm, AnswerWithSources, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/qa_with_structure.html
b8678fe13c81-0
Source code for langchain.chains.openai_functions.citation_fuzzy_match from typing import Iterator, List from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import get_llm_kwargs from langchain.output_parsers.openai_functions import ( PydanticOutputFunctionsParser, ) from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.schema import HumanMessage, SystemMessage class FactWithEvidence(BaseModel): """Class representing single statement. Each fact has a body and a list of sources. If there are multiple facts make sure to break them apart such that each one only uses a set of sources that are relevant to it. """ fact: str = Field(..., description="Body of the sentence, as part of a response") substring_quote: List[str] = Field( ..., description=( "Each source should be a direct quote from the context, " "as a substring of the original content" ), ) def _get_span(self, quote: str, context: str, errs: int = 100) -> Iterator[str]: import regex minor = quote major = context errs_ = 0 s = regex.search(f"({minor}){{e<={errs_}}}", major) while s is None and errs_ <= errs: errs_ += 1 s = regex.search(f"({minor}){{e<={errs_}}}", major) if s is not None: yield from s.spans() def get_spans(self, context: str) -> Iterator[str]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/citation_fuzzy_match.html
b8678fe13c81-1
def get_spans(self, context: str) -> Iterator[str]: for quote in self.substring_quote: yield from self._get_span(quote, context) class QuestionAnswer(BaseModel): """A question and its answer as a list of facts each one should have a source. each sentence contains a body and a list of sources.""" question: str = Field(..., description="Question that was asked") answer: List[FactWithEvidence] = Field( ..., description=( "Body of the answer, each fact should be " "its separate object with a body and a list of sources" ), ) [docs]def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain: """Create a citation fuzzy match chain. Args: llm: Language model to use for the chain. Returns: Chain (LLMChain) that can be used to answer questions with citations. """ output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer) schema = QuestionAnswer.schema() function = { "name": schema["title"], "description": schema["description"], "parameters": schema, } llm_kwargs = get_llm_kwargs(function) messages = [ SystemMessage( content=( "You are a world class algorithm to answer " "questions with correct and exact citations." ) ), HumanMessage(content="Answer question using the following context"), HumanMessagePromptTemplate.from_template("{context}"), HumanMessagePromptTemplate.from_template("Question: {question}"), HumanMessage( content=( "Tips: Make sure to cite your sources, "
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/citation_fuzzy_match.html
b8678fe13c81-2
content=( "Tips: Make sure to cite your sources, " "and use the exact words from the context." ) ), ] prompt = ChatPromptTemplate(messages=messages) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, ) return chain
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/citation_fuzzy_match.html
ac91b3d4ddbe-0
Source code for langchain.chains.openai_functions.tagging from typing import Any from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import _convert_schema, get_llm_kwargs from langchain.output_parsers.openai_functions import ( JsonOutputFunctionsParser, PydanticOutputFunctionsParser, ) from langchain.prompts import ChatPromptTemplate def _get_tagging_function(schema: dict) -> dict: return { "name": "information_extraction", "description": "Extracts the relevant information from the passage.", "parameters": _convert_schema(schema), } _TAGGING_TEMPLATE = """Extract the desired information from the following passage. Passage: {input} """ [docs]def create_tagging_chain(schema: dict, llm: BaseLanguageModel) -> Chain: """Creates a chain that extracts information from a passage. Args: schema: The schema of the entities to extract. llm: The language model to use. Returns: Chain (LLMChain) that can be used to extract information from a passage. """ function = _get_tagging_function(schema) prompt = ChatPromptTemplate.from_template(_TAGGING_TEMPLATE) output_parser = JsonOutputFunctionsParser() llm_kwargs = get_llm_kwargs(function) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, ) return chain [docs]def create_tagging_chain_pydantic( pydantic_schema: Any, llm: BaseLanguageModel
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/tagging.html
ac91b3d4ddbe-1
pydantic_schema: Any, llm: BaseLanguageModel ) -> Chain: """Creates a chain that extracts information from a passage. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. Returns: Chain (LLMChain) that can be used to extract information from a passage. """ openai_schema = pydantic_schema.schema() function = _get_tagging_function(openai_schema) prompt = ChatPromptTemplate.from_template(_TAGGING_TEMPLATE) output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema) llm_kwargs = get_llm_kwargs(function) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, ) return chain
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/tagging.html
cd623a55c510-0
Source code for langchain.chains.openai_functions.extraction from typing import Any, List from pydantic import BaseModel from langchain.base_language import BaseLanguageModel from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.openai_functions.utils import ( _convert_schema, _resolve_schema_references, get_llm_kwargs, ) from langchain.output_parsers.openai_functions import ( JsonKeyOutputFunctionsParser, PydanticAttrOutputFunctionsParser, ) from langchain.prompts import ChatPromptTemplate def _get_extraction_function(entity_schema: dict) -> dict: return { "name": "information_extraction", "description": "Extracts the relevant information from the passage.", "parameters": { "type": "object", "properties": { "info": {"type": "array", "items": _convert_schema(entity_schema)} }, "required": ["info"], }, } _EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned\ in the following passage together with their properties. Passage: {input} """ [docs]def create_extraction_chain(schema: dict, llm: BaseLanguageModel) -> Chain: """Creates a chain that extracts information from a passage. Args: schema: The schema of the entities to extract. llm: The language model to use. Returns: Chain that can be used to extract information from a passage. """ function = _get_extraction_function(schema) prompt = ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE) output_parser = JsonKeyOutputFunctionsParser(key_name="info")
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/extraction.html
cd623a55c510-1
output_parser = JsonKeyOutputFunctionsParser(key_name="info") llm_kwargs = get_llm_kwargs(function) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, ) return chain [docs]def create_extraction_chain_pydantic( pydantic_schema: Any, llm: BaseLanguageModel ) -> Chain: """Creates a chain that extracts information from a passage using pydantic schema. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. Returns: Chain that can be used to extract information from a passage. """ class PydanticSchema(BaseModel): info: List[pydantic_schema] # type: ignore openai_schema = pydantic_schema.schema() openai_schema = _resolve_schema_references( openai_schema, openai_schema.get("definitions", {}) ) function = _get_extraction_function(openai_schema) prompt = ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE) output_parser = PydanticAttrOutputFunctionsParser( pydantic_schema=PydanticSchema, attr_name="info" ) llm_kwargs = get_llm_kwargs(function) chain = LLMChain( llm=llm, prompt=prompt, llm_kwargs=llm_kwargs, output_parser=output_parser, ) return chain
https://api.python.langchain.com/en/latest/_modules/langchain/chains/openai_functions/extraction.html
40755c90ee9d-0
Source code for langchain.chains.llm_checker.base """Chain for question-answering with self-verification.""" from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_checker.prompt import ( CHECK_ASSERTIONS_PROMPT, CREATE_DRAFT_ANSWER_PROMPT, LIST_ASSERTIONS_PROMPT, REVISED_ANSWER_PROMPT, ) from langchain.chains.sequential import SequentialChain from langchain.prompts import PromptTemplate def _load_question_to_checked_assertions_chain( llm: BaseLanguageModel, create_draft_answer_prompt: PromptTemplate, list_assertions_prompt: PromptTemplate, check_assertions_prompt: PromptTemplate, revised_answer_prompt: PromptTemplate, ) -> SequentialChain: create_draft_answer_chain = LLMChain( llm=llm, prompt=create_draft_answer_prompt, output_key="statement", ) list_assertions_chain = LLMChain( llm=llm, prompt=list_assertions_prompt, output_key="assertions", ) check_assertions_chain = LLMChain( llm=llm, prompt=check_assertions_prompt, output_key="checked_assertions", ) revised_answer_chain = LLMChain( llm=llm, prompt=revised_answer_prompt, output_key="revised_statement", ) chains = [ create_draft_answer_chain,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
40755c90ee9d-1
) chains = [ create_draft_answer_chain, list_assertions_chain, check_assertions_chain, revised_answer_chain, ] question_to_checked_assertions_chain = SequentialChain( chains=chains, input_variables=["question"], output_variables=["revised_statement"], verbose=True, ) return question_to_checked_assertions_chain [docs]class LLMCheckerChain(Chain): """Chain for question-answering with self-verification. Example: .. code-block:: python from langchain import OpenAI, LLMCheckerChain llm = OpenAI(temperature=0.7) checker_chain = LLMCheckerChain.from_llm(llm) """ question_to_checked_assertions_chain: SequentialChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT """[Deprecated]""" list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT """[Deprecated]""" check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT """[Deprecated]""" revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT """[Deprecated] Prompt to use when questioning the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
40755c90ee9d-2
if "llm" in values: warnings.warn( "Directly instantiating an LLMCheckerChain with an llm is deprecated. " "Please instantiate with question_to_checked_assertions_chain " "or using the from_llm class method." ) if ( "question_to_checked_assertions_chain" not in values and values["llm"] is not None ): question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain( values["llm"], values.get( "create_draft_answer_prompt", CREATE_DRAFT_ANSWER_PROMPT ), values.get("list_assertions_prompt", LIST_ASSERTIONS_PROMPT), values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT), values.get("revised_answer_prompt", REVISED_ANSWER_PROMPT), ) ) values[ "question_to_checked_assertions_chain" ] = question_to_checked_assertions_chain return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] output = self.question_to_checked_assertions_chain(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
40755c90ee9d-3
output = self.question_to_checked_assertions_chain( {"question": question}, callbacks=_run_manager.get_child() ) return {self.output_key: output["revised_statement"]} @property def _chain_type(self) -> str: return "llm_checker_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT, list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT, check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT, revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT, **kwargs: Any, ) -> LLMCheckerChain: question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain( llm, create_draft_answer_prompt, list_assertions_prompt, check_assertions_prompt, revised_answer_prompt, ) ) return cls( question_to_checked_assertions_chain=question_to_checked_assertions_chain, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_checker/base.html
96fa3bf0a236-0
Source code for langchain.chains.combine_documents.stuff """Chain that combines documents by stuffing into context.""" from typing import Any, Dict, List, Optional, Tuple from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import ( BaseCombineDocumentsChain, format_document, ) from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate def _get_default_document_prompt() -> PromptTemplate: return PromptTemplate(input_variables=["page_content"], template="{page_content}") [docs]class StuffDocumentsChain(BaseCombineDocumentsChain): """Chain that combines documents by stuffing into context.""" llm_chain: LLMChain """LLM wrapper to use after formatting documents.""" document_prompt: BasePromptTemplate = Field( default_factory=_get_default_document_prompt ) """Prompt to use to format each document.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" document_separator: str = "\n\n" """The string with which to join the formatted documents""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" llm_chain_variables = values["llm_chain"].prompt.input_variables if "document_variable_name" not in values:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/stuff.html
96fa3bf0a236-1
if "document_variable_name" not in values: if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain_variables" ) else: if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict: # Format each document according to the prompt doc_strings = [format_document(doc, self.document_prompt) for doc in docs] # Join the documents together to put them in the prompt. inputs = { k: v for k, v in kwargs.items() if k in self.llm_chain.prompt.input_variables } inputs[self.document_variable_name] = self.document_separator.join(doc_strings) return inputs [docs] def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]: """Get the prompt length by formatting the prompt.""" inputs = self._get_inputs(docs, **kwargs) prompt = self.llm_chain.prompt.format(**inputs) return self.llm_chain.llm.get_num_tokens(prompt) [docs] def combine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Stuff all documents into one prompt and pass to LLM."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/stuff.html
96fa3bf0a236-2
"""Stuff all documents into one prompt and pass to LLM.""" inputs = self._get_inputs(docs, **kwargs) # Call predict on the LLM. return self.llm_chain.predict(callbacks=callbacks, **inputs), {} [docs] async def acombine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Stuff all documents into one prompt and pass to LLM.""" inputs = self._get_inputs(docs, **kwargs) # Call predict on the LLM. return await self.llm_chain.apredict(callbacks=callbacks, **inputs), {} @property def _chain_type(self) -> str: return "stuff_documents_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/stuff.html
164b672cc53e-0
Source code for langchain.chains.combine_documents.refine """Combining documents by doing a first pass and then refining on more documents.""" from __future__ import annotations from typing import Any, Dict, List, Tuple from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import ( BaseCombineDocumentsChain, format_document, ) from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate def _get_default_document_prompt() -> PromptTemplate: return PromptTemplate(input_variables=["page_content"], template="{page_content}") [docs]class RefineDocumentsChain(BaseCombineDocumentsChain): """Combine documents by doing a first pass and then refining on more documents.""" initial_llm_chain: LLMChain """LLM chain to use on initial document.""" refine_llm_chain: LLMChain """LLM chain to use when refining.""" document_variable_name: str """The variable name in the initial_llm_chain to put the documents in. If only one variable in the initial_llm_chain, this need not be provided.""" initial_response_name: str """The variable name to format the initial response in when refining.""" document_prompt: BasePromptTemplate = Field( default_factory=_get_default_document_prompt ) """Prompt to use to format each document.""" return_intermediate_steps: bool = False """Return the results of the refine steps in the output.""" @property def output_keys(self) -> List[str]: """Expect input key. :meta private: """
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/refine.html
164b672cc53e-1
"""Expect input key. :meta private: """ _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = _output_keys + ["intermediate_steps"] return _output_keys class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def get_return_intermediate_steps(cls, values: Dict) -> Dict: """For backwards compatibility.""" if "return_refine_steps" in values: values["return_intermediate_steps"] = values["return_refine_steps"] del values["return_refine_steps"] return values @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: llm_chain_variables = values["initial_llm_chain"].prompt.input_variables if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain input_variables" ) else: llm_chain_variables = values["initial_llm_chain"].prompt.input_variables if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values [docs] def combine_docs(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/refine.html
164b672cc53e-2
) return values [docs] def combine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Combine by mapping first chain over all, then stuffing into final chain.""" inputs = self._construct_initial_inputs(docs, **kwargs) res = self.initial_llm_chain.predict(callbacks=callbacks, **inputs) refine_steps = [res] for doc in docs[1:]: base_inputs = self._construct_refine_inputs(doc, res) inputs = {**base_inputs, **kwargs} res = self.refine_llm_chain.predict(callbacks=callbacks, **inputs) refine_steps.append(res) return self._construct_result(refine_steps, res) [docs] async def acombine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Combine by mapping first chain over all, then stuffing into final chain.""" inputs = self._construct_initial_inputs(docs, **kwargs) res = await self.initial_llm_chain.apredict(callbacks=callbacks, **inputs) refine_steps = [res] for doc in docs[1:]: base_inputs = self._construct_refine_inputs(doc, res) inputs = {**base_inputs, **kwargs} res = await self.refine_llm_chain.apredict(callbacks=callbacks, **inputs) refine_steps.append(res) return self._construct_result(refine_steps, res) def _construct_result(self, refine_steps: List[str], res: str) -> Tuple[str, dict]: if self.return_intermediate_steps:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/refine.html
164b672cc53e-3
if self.return_intermediate_steps: extra_return_dict = {"intermediate_steps": refine_steps} else: extra_return_dict = {} return res, extra_return_dict def _construct_refine_inputs(self, doc: Document, res: str) -> Dict[str, Any]: return { self.document_variable_name: format_document(doc, self.document_prompt), self.initial_response_name: res, } def _construct_initial_inputs( self, docs: List[Document], **kwargs: Any ) -> Dict[str, Any]: base_info = {"page_content": docs[0].page_content} base_info.update(docs[0].metadata) document_info = {k: base_info[k] for k in self.document_prompt.input_variables} base_inputs: dict = { self.document_variable_name: self.document_prompt.format(**document_info) } inputs = {**base_inputs, **kwargs} return inputs @property def _chain_type(self) -> str: return "refine_documents_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/refine.html
9fcb0e00f25f-0
Source code for langchain.chains.combine_documents.map_rerank """Combining documents by mapping a chain over them first, then reranking results.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast from pydantic import Extra, root_validator from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.output_parsers.regex import RegexParser [docs]class MapRerankDocumentsChain(BaseCombineDocumentsChain): """Combining documents by mapping a chain over them, then reranking results.""" llm_chain: LLMChain """Chain to apply to each document individually.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" rank_key: str """Key in output of llm_chain to rank on.""" answer_key: str """Key in output of llm_chain to return as answer.""" metadata_keys: Optional[List[str]] = None return_intermediate_steps: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def output_keys(self) -> List[str]: """Expect input key. :meta private: """ _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = _output_keys + ["intermediate_steps"] if self.metadata_keys is not None: _output_keys += self.metadata_keys return _output_keys
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_rerank.html
9fcb0e00f25f-1
_output_keys += self.metadata_keys return _output_keys @root_validator() def validate_llm_output(cls, values: Dict) -> Dict: """Validate that the combine chain outputs a dictionary.""" output_parser = values["llm_chain"].prompt.output_parser if not isinstance(output_parser, RegexParser): raise ValueError( "Output parser of llm_chain should be a RegexParser," f" got {output_parser}" ) output_keys = output_parser.output_keys if values["rank_key"] not in output_keys: raise ValueError( f"Got {values['rank_key']} as key to rank on, but did not find " f"it in the llm_chain output keys ({output_keys})" ) if values["answer_key"] not in output_keys: raise ValueError( f"Got {values['answer_key']} as key to return, but did not find " f"it in the llm_chain output keys ({output_keys})" ) return values @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: llm_chain_variables = values["llm_chain"].prompt.input_variables if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain input_variables" ) else: llm_chain_variables = values["llm_chain"].prompt.input_variables
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_rerank.html
9fcb0e00f25f-2
else: llm_chain_variables = values["llm_chain"].prompt.input_variables if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values [docs] def combine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Combine documents in a map rerank manner. Combine by mapping first chain over all documents, then reranking the results. """ results = self.llm_chain.apply_and_parse( # FYI - this is parallelized and so it is fast. [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], callbacks=callbacks, ) return self._process_results(docs, results) [docs] async def acombine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Combine documents in a map rerank manner. Combine by mapping first chain over all documents, then reranking the results. """ results = await self.llm_chain.aapply_and_parse( # FYI - this is parallelized and so it is fast. [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], callbacks=callbacks, ) return self._process_results(docs, results) def _process_results( self, docs: List[Document],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_rerank.html
9fcb0e00f25f-3
def _process_results( self, docs: List[Document], results: Sequence[Union[str, List[str], Dict[str, str]]], ) -> Tuple[str, dict]: typed_results = cast(List[dict], results) sorted_res = sorted( zip(typed_results, docs), key=lambda x: -int(x[0][self.rank_key]) ) output, document = sorted_res[0] extra_info = {} if self.metadata_keys is not None: for key in self.metadata_keys: extra_info[key] = document.metadata[key] if self.return_intermediate_steps: extra_info["intermediate_steps"] = results return output[self.answer_key], extra_info @property def _chain_type(self) -> str: return "map_rerank_documents_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_rerank.html
a02be9c1074f-0
Source code for langchain.chains.combine_documents.base """Base interface for chains combining documents.""" from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional, Tuple from pydantic import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter def format_document(doc: Document, prompt: BasePromptTemplate) -> str: """Format a document into a string based on a prompt template.""" base_info = {"page_content": doc.page_content} base_info.update(doc.metadata) missing_metadata = set(prompt.input_variables).difference(base_info) if len(missing_metadata) > 0: required_metadata = [ iv for iv in prompt.input_variables if iv != "page_content" ] raise ValueError( f"Document prompt requires documents to have metadata variables: " f"{required_metadata}. Received document with missing metadata: " f"{list(missing_metadata)}." ) document_info = {k: base_info[k] for k in prompt.input_variables} return prompt.format(**document_info) class BaseCombineDocumentsChain(Chain, ABC): """Base interface for chains combining documents.""" input_key: str = "input_documents" #: :meta private: output_key: str = "output_text" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/base.html
a02be9c1074f-1
""" return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]: """Return the prompt length given the documents passed in. Returns None if the method does not depend on the prompt length. """ return None @abstractmethod def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]: """Combine documents into a single string.""" @abstractmethod async def acombine_docs( self, docs: List[Document], **kwargs: Any ) -> Tuple[str, dict]: """Combine documents into a single string asynchronously.""" def _call( self, inputs: Dict[str, List[Document]], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() docs = inputs[self.input_key] # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} output, extra_return_dict = self.combine_docs( docs, callbacks=_run_manager.get_child(), **other_keys ) extra_return_dict[self.output_key] = output return extra_return_dict async def _acall( self, inputs: Dict[str, List[Document]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/base.html
a02be9c1074f-2
) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() docs = inputs[self.input_key] # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} output, extra_return_dict = await self.acombine_docs( docs, callbacks=_run_manager.get_child(), **other_keys ) extra_return_dict[self.output_key] = output return extra_return_dict [docs]class AnalyzeDocumentChain(Chain): """Chain that splits documents, then analyzes it in pieces.""" input_key: str = "input_document" #: :meta private: text_splitter: TextSplitter = Field(default_factory=RecursiveCharacterTextSplitter) combine_docs_chain: BaseCombineDocumentsChain @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return self.combine_docs_chain.output_keys def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() document = inputs[self.input_key] docs = self.text_splitter.create_documents([document]) # Other keys are assumed to be needed for LLM prediction other_keys: Dict = {k: v for k, v in inputs.items() if k != self.input_key}
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/base.html
a02be9c1074f-3
other_keys[self.combine_docs_chain.input_key] = docs return self.combine_docs_chain( other_keys, return_only_outputs=True, callbacks=_run_manager.get_child() )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/base.html
734c5fe808aa-0
Source code for langchain.chains.combine_documents.map_reduce """Combining documents by mapping a chain over them first, then combining results.""" from __future__ import annotations from typing import Any, Callable, Dict, List, Optional, Protocol, Tuple from pydantic import Extra, root_validator from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document class CombineDocsProtocol(Protocol): """Interface for the combine_docs method.""" def __call__(self, docs: List[Document], **kwargs: Any) -> str: """Interface for the combine_docs method.""" def _split_list_of_docs( docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any ) -> List[List[Document]]: new_result_doc_list = [] _sub_result_docs = [] for doc in docs: _sub_result_docs.append(doc) _num_tokens = length_func(_sub_result_docs, **kwargs) if _num_tokens > token_max: if len(_sub_result_docs) == 1: raise ValueError( "A single document was longer than the context length," " we cannot handle this." ) if len(_sub_result_docs) == 2: raise ValueError( "A single document was so long it could not be combined " "with another document, we cannot handle this." ) new_result_doc_list.append(_sub_result_docs[:-1]) _sub_result_docs = _sub_result_docs[-1:] new_result_doc_list.append(_sub_result_docs) return new_result_doc_list def _collapse_docs(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html
734c5fe808aa-1
return new_result_doc_list def _collapse_docs( docs: List[Document], combine_document_func: CombineDocsProtocol, **kwargs: Any, ) -> Document: result = combine_document_func(docs, **kwargs) combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()} for doc in docs[1:]: for k, v in doc.metadata.items(): if k in combined_metadata: combined_metadata[k] += f", {v}" else: combined_metadata[k] = str(v) return Document(page_content=result, metadata=combined_metadata) [docs]class MapReduceDocumentsChain(BaseCombineDocumentsChain): """Combining documents by mapping a chain over them, then combining results.""" llm_chain: LLMChain """Chain to apply to each document individually.""" combine_document_chain: BaseCombineDocumentsChain """Chain to use to combine results of applying llm_chain to documents.""" collapse_document_chain: Optional[BaseCombineDocumentsChain] = None """Chain to use to collapse intermediary results if needed. If None, will use the combine_document_chain.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" return_intermediate_steps: bool = False """Return the results of the map steps in the output.""" @property def output_keys(self) -> List[str]: """Expect input key. :meta private: """ _output_keys = super().output_keys if self.return_intermediate_steps: _output_keys = _output_keys + ["intermediate_steps"]
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html
734c5fe808aa-2
_output_keys = _output_keys + ["intermediate_steps"] return _output_keys class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def get_return_intermediate_steps(cls, values: Dict) -> Dict: """For backwards compatibility.""" if "return_map_steps" in values: values["return_intermediate_steps"] = values["return_map_steps"] del values["return_map_steps"] return values @root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: llm_chain_variables = values["llm_chain"].prompt.input_variables if len(llm_chain_variables) == 1: values["document_variable_name"] = llm_chain_variables[0] else: raise ValueError( "document_variable_name must be provided if there are " "multiple llm_chain input_variables" ) else: llm_chain_variables = values["llm_chain"].prompt.input_variables if values["document_variable_name"] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was " f"not found in llm_chain input_variables: {llm_chain_variables}" ) return values @property def _collapse_chain(self) -> BaseCombineDocumentsChain: if self.collapse_document_chain is not None: return self.collapse_document_chain else: return self.combine_document_chain [docs] def combine_docs( self,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html
734c5fe808aa-3
return self.combine_document_chain [docs] def combine_docs( self, docs: List[Document], token_max: int = 3000, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[str, dict]: """Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents). """ results = self.llm_chain.apply( # FYI - this is parallelized and so it is fast. [{self.document_variable_name: d.page_content, **kwargs} for d in docs], callbacks=callbacks, ) return self._process_results( results, docs, token_max, callbacks=callbacks, **kwargs ) [docs] async def acombine_docs( self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any ) -> Tuple[str, dict]: """Combine documents in a map reduce manner. Combine by mapping first chain over all documents, then reducing the results. This reducing can be done recursively if needed (if there are many documents). """ results = await self.llm_chain.aapply( # FYI - this is parallelized and so it is fast. [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs], callbacks=callbacks, ) return await self._aprocess_results( results, docs, callbacks=callbacks, **kwargs ) def _process_results_common( self, results: List[Dict], docs: List[Document],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html
734c5fe808aa-4
self, results: List[Dict], docs: List[Document], token_max: int = 3000, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[List[Document], dict]: question_result_key = self.llm_chain.output_key result_docs = [ Document(page_content=r[question_result_key], metadata=docs[i].metadata) # This uses metadata from the docs, and the textual results from `results` for i, r in enumerate(results) ] length_func = self.combine_document_chain.prompt_length num_tokens = length_func(result_docs, **kwargs) def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str: return self._collapse_chain.run( input_documents=docs, callbacks=callbacks, **kwargs ) while num_tokens is not None and num_tokens > token_max: new_result_doc_list = _split_list_of_docs( result_docs, length_func, token_max, **kwargs ) result_docs = [] for docs in new_result_doc_list: new_doc = _collapse_docs(docs, _collapse_docs_func, **kwargs) result_docs.append(new_doc) num_tokens = length_func(result_docs, **kwargs) if self.return_intermediate_steps: _results = [r[self.llm_chain.output_key] for r in results] extra_return_dict = {"intermediate_steps": _results} else: extra_return_dict = {} return result_docs, extra_return_dict def _process_results( self, results: List[Dict], docs: List[Document], token_max: int = 3000,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html
734c5fe808aa-5
docs: List[Document], token_max: int = 3000, callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[str, dict]: result_docs, extra_return_dict = self._process_results_common( results, docs, token_max, callbacks=callbacks, **kwargs ) output = self.combine_document_chain.run( input_documents=result_docs, callbacks=callbacks, **kwargs ) return output, extra_return_dict async def _aprocess_results( self, results: List[Dict], docs: List[Document], callbacks: Callbacks = None, **kwargs: Any, ) -> Tuple[str, dict]: result_docs, extra_return_dict = self._process_results_common( results, docs, callbacks=callbacks, **kwargs ) output = await self.combine_document_chain.arun( input_documents=result_docs, callbacks=callbacks, **kwargs ) return output, extra_return_dict @property def _chain_type(self) -> str: return "map_reduce_documents_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/combine_documents/map_reduce.html
19bccd66272b-0
Source code for langchain.chains.constitutional_ai.base """Chain for applying constitutional principles to the outputs of another chain.""" from typing import Any, Dict, List, Optional from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.constitutional_ai.principles import PRINCIPLES from langchain.chains.constitutional_ai.prompts import CRITIQUE_PROMPT, REVISION_PROMPT from langchain.chains.llm import LLMChain from langchain.prompts.base import BasePromptTemplate [docs]class ConstitutionalChain(Chain): """Chain for applying constitutional principles. Example: .. code-block:: python from langchain.llms import OpenAI from langchain.chains import LLMChain, ConstitutionalChain from langchain.chains.constitutional_ai.models \ import ConstitutionalPrinciple llm = OpenAI() qa_prompt = PromptTemplate( template="Q: {question} A:", input_variables=["question"], ) qa_chain = LLMChain(llm=llm, prompt=qa_prompt) constitutional_chain = ConstitutionalChain.from_llm( llm=llm, chain=qa_chain, constitutional_principles=[ ConstitutionalPrinciple( critique_request="Tell if this answer is good.", revision_request="Give a better answer.", ) ], ) constitutional_chain.run(question="What is the meaning of life?") """ chain: LLMChain constitutional_principles: List[ConstitutionalPrinciple] critique_chain: LLMChain
https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
19bccd66272b-1
critique_chain: LLMChain revision_chain: LLMChain return_intermediate_steps: bool = False [docs] @classmethod def get_principles( cls, names: Optional[List[str]] = None ) -> List[ConstitutionalPrinciple]: if names is None: return list(PRINCIPLES.values()) else: return [PRINCIPLES[name] for name in names] [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, chain: LLMChain, critique_prompt: BasePromptTemplate = CRITIQUE_PROMPT, revision_prompt: BasePromptTemplate = REVISION_PROMPT, **kwargs: Any, ) -> "ConstitutionalChain": """Create a chain from an LLM.""" critique_chain = LLMChain(llm=llm, prompt=critique_prompt) revision_chain = LLMChain(llm=llm, prompt=revision_prompt) return cls( chain=chain, critique_chain=critique_chain, revision_chain=revision_chain, **kwargs, ) @property def input_keys(self) -> List[str]: """Defines the input keys.""" return self.chain.input_keys @property def output_keys(self) -> List[str]: """Defines the output keys.""" if self.return_intermediate_steps: return ["output", "critiques_and_revisions", "initial_output"] return ["output"] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
19bccd66272b-2
) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() response = self.chain.run( **inputs, callbacks=_run_manager.get_child("original"), ) initial_response = response input_prompt = self.chain.prompt.format(**inputs) _run_manager.on_text( text="Initial response: " + response + "\n\n", verbose=self.verbose, color="yellow", ) critiques_and_revisions = [] for constitutional_principle in self.constitutional_principles: # Do critique raw_critique = self.critique_chain.run( input_prompt=input_prompt, output_from_model=response, critique_request=constitutional_principle.critique_request, callbacks=_run_manager.get_child("critique"), ) critique = self._parse_critique( output_string=raw_critique, ).strip() # if the critique contains "No critique needed", then we're done # in this case, initial_output is the same as output, # but we'll keep it for consistency if "no critique needed" in critique.lower(): critiques_and_revisions.append((critique, "")) continue # Do revision revision = self.revision_chain.run( input_prompt=input_prompt, output_from_model=response, critique_request=constitutional_principle.critique_request, critique=critique, revision_request=constitutional_principle.revision_request, callbacks=_run_manager.get_child("revision"), ).strip() response = revision critiques_and_revisions.append((critique, revision)) _run_manager.on_text(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
19bccd66272b-3
_run_manager.on_text( text=f"Applying {constitutional_principle.name}..." + "\n\n", verbose=self.verbose, color="green", ) _run_manager.on_text( text="Critique: " + critique + "\n\n", verbose=self.verbose, color="blue", ) _run_manager.on_text( text="Updated response: " + revision + "\n\n", verbose=self.verbose, color="yellow", ) final_output: Dict[str, Any] = {"output": response} if self.return_intermediate_steps: final_output["initial_output"] = initial_response final_output["critiques_and_revisions"] = critiques_and_revisions return final_output @staticmethod def _parse_critique(output_string: str) -> str: if "Revision request:" not in output_string: return output_string output_string = output_string.split("Revision request:")[0] if "\n\n" in output_string: output_string = output_string.split("\n\n")[0] return output_string
https://api.python.langchain.com/en/latest/_modules/langchain/chains/constitutional_ai/base.html
91ec7c5588d7-0
Source code for langchain.chains.pal.base """Implements Program-Aided Language Models. As in https://arxiv.org/pdf/2211.10435.pdf. """ from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT from langchain.chains.pal.math_prompt import MATH_PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.utilities import PythonREPL [docs]class PALChain(Chain): """Implements Program-Aided Language Models.""" llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated]""" prompt: BasePromptTemplate = MATH_PROMPT """[Deprecated]""" stop: str = "\n\n" get_answer_expr: str = "print(solution())" python_globals: Optional[Dict[str, Any]] = None python_locals: Optional[Dict[str, Any]] = None output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an PALChain with an llm is deprecated. "
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
91ec7c5588d7-1
"Directly instantiating an PALChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the one of " "the class method constructors from_math_prompt, " "from_colored_object_prompt." ) if "llm_chain" not in values and values["llm"] is not None: values["llm_chain"] = LLMChain(llm=values["llm"], prompt=MATH_PROMPT) return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, "intermediate_steps"] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() code = self.llm_chain.predict( stop=[self.stop], callbacks=_run_manager.get_child(), **inputs ) _run_manager.on_text(code, color="green", end="\n", verbose=self.verbose) repl = PythonREPL(_globals=self.python_globals, _locals=self.python_locals) res = repl.run(code + f"\n{self.get_answer_expr}") output = {self.output_key: res.strip()} if self.return_intermediate_steps: output["intermediate_steps"] = code
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
91ec7c5588d7-2
if self.return_intermediate_steps: output["intermediate_steps"] = code return output [docs] @classmethod def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain: """Load PAL from math prompt.""" llm_chain = LLMChain(llm=llm, prompt=MATH_PROMPT) return cls( llm_chain=llm_chain, stop="\n\n", get_answer_expr="print(solution())", **kwargs, ) [docs] @classmethod def from_colored_object_prompt( cls, llm: BaseLanguageModel, **kwargs: Any ) -> PALChain: """Load PAL from colored object prompt.""" llm_chain = LLMChain(llm=llm, prompt=COLORED_OBJECT_PROMPT) return cls( llm_chain=llm_chain, stop="\n\n\n", get_answer_expr="print(answer)", **kwargs, ) @property def _chain_type(self) -> str: return "pal_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
b6a6fe0ce24b-0
Source code for langchain.chains.retrieval_qa.base """Chain for question-answering against a vector database.""" from __future__ import annotations import warnings from abc import abstractmethod from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever, Document from langchain.vectorstores.base import VectorStore class BaseRetrievalQA(Chain): combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_source_documents: bool = False """Return the source documents.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys.
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
b6a6fe0ce24b-1
def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Initialize from LLM.""" _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=_prompt) document_prompt = PromptTemplate( input_variables=["page_content"], template="Context:\n{page_content}" ) combine_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_variable_name="context", document_prompt=document_prompt, ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain( llm, chain_type=chain_type, **_chain_type_kwargs ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @abstractmethod def _get_docs(self, question: str) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
b6a6fe0ce24b-2
def _get_docs(self, question: str) -> List[Document]: """Get documents to do question answering over.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] docs = self._get_docs(question) answer = self.combine_documents_chain.run( input_documents=docs, question=question, callbacks=_run_manager.get_child() ) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} @abstractmethod async def _aget_docs(self, question: str) -> List[Document]: """Get documents to do question answering over.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
b6a6fe0ce24b-3
the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] docs = await self._aget_docs(question) answer = await self.combine_documents_chain.arun( input_documents=docs, question=question, callbacks=_run_manager.get_child() ) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} [docs]class RetrievalQA(BaseRetrievalQA): """Chain for question-answering against an index. Example: .. code-block:: python from langchain.llms import OpenAI from langchain.chains import RetrievalQA from langchain.faiss import FAISS from langchain.vectorstores.base import VectorStoreRetriever retriever = VectorStoreRetriever(vectorstore=FAISS(...)) retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever) """ retriever: BaseRetriever = Field(exclude=True) def _get_docs(self, question: str) -> List[Document]: return self.retriever.get_relevant_documents(question) async def _aget_docs(self, question: str) -> List[Document]: return await self.retriever.aget_relevant_documents(question) @property def _chain_type(self) -> str: """Return the chain type."""
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
b6a6fe0ce24b-4
def _chain_type(self) -> str: """Return the chain type.""" return "retrieval_qa" [docs]class VectorDBQA(BaseRetrievalQA): """Chain for question-answering against a vector database.""" vectorstore: VectorStore = Field(exclude=True, alias="vectorstore") """Vector Database to connect to.""" k: int = 4 """Number of documents to query for.""" search_type: str = "similarity" """Search type to use over vectorstore. `similarity` or `mmr`.""" search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQA` is deprecated - " "please use `from langchain.chains import RetrievalQA`" ) return values @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "mmr"): raise ValueError(f"search_type of {search_type} not allowed.") return values def _get_docs(self, question: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs ) elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( question, k=self.k, **self.search_kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
b6a6fe0ce24b-5
question, k=self.k, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def _aget_docs(self, question: str) -> List[Document]: raise NotImplementedError("VectorDBQA does not support async") @property def _chain_type(self) -> str: """Return the chain type.""" return "vector_db_qa"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
fbcb60bc62e2-0
Source code for langchain.chains.llm_summarization_checker.base """Chain for summarization with self-verification.""" from __future__ import annotations import warnings from pathlib import Path from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sequential import SequentialChain from langchain.prompts.prompt import PromptTemplate PROMPTS_DIR = Path(__file__).parent / "prompts" CREATE_ASSERTIONS_PROMPT = PromptTemplate.from_file( PROMPTS_DIR / "create_facts.txt", ["summary"] ) CHECK_ASSERTIONS_PROMPT = PromptTemplate.from_file( PROMPTS_DIR / "check_facts.txt", ["assertions"] ) REVISED_SUMMARY_PROMPT = PromptTemplate.from_file( PROMPTS_DIR / "revise_summary.txt", ["checked_assertions", "summary"] ) ARE_ALL_TRUE_PROMPT = PromptTemplate.from_file( PROMPTS_DIR / "are_all_true_prompt.txt", ["checked_assertions"] ) def _load_sequential_chain( llm: BaseLanguageModel, create_assertions_prompt: PromptTemplate, check_assertions_prompt: PromptTemplate, revised_summary_prompt: PromptTemplate, are_all_true_prompt: PromptTemplate, verbose: bool = False, ) -> SequentialChain: chain = SequentialChain( chains=[ LLMChain( llm=llm, prompt=create_assertions_prompt, output_key="assertions", verbose=verbose, ), LLMChain(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
fbcb60bc62e2-1
verbose=verbose, ), LLMChain( llm=llm, prompt=check_assertions_prompt, output_key="checked_assertions", verbose=verbose, ), LLMChain( llm=llm, prompt=revised_summary_prompt, output_key="revised_summary", verbose=verbose, ), LLMChain( llm=llm, output_key="all_true", prompt=are_all_true_prompt, verbose=verbose, ), ], input_variables=["summary"], output_variables=["all_true", "revised_summary"], verbose=verbose, ) return chain [docs]class LLMSummarizationCheckerChain(Chain): """Chain for question-answering with self-verification. Example: .. code-block:: python from langchain import OpenAI, LLMSummarizationCheckerChain llm = OpenAI(temperature=0.0) checker_chain = LLMSummarizationCheckerChain.from_llm(llm) """ sequential_chain: SequentialChain llm: Optional[BaseLanguageModel] = None """[Deprecated] LLM wrapper to use.""" create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT """[Deprecated]""" check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT """[Deprecated]""" revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT """[Deprecated]""" are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT """[Deprecated]""" input_key: str = "query" #: :meta private:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
fbcb60bc62e2-2
input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: max_checks: int = 2 """Maximum number of times to check the assertions. Default to double-checking.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an LLMSummarizationCheckerChain with an llm is " "deprecated. Please instantiate with" " sequential_chain argument or using the from_llm class method." ) if "sequential_chain" not in values and values["llm"] is not None: values["sequential_chain"] = _load_sequential_chain( values["llm"], values.get("create_assertions_prompt", CREATE_ASSERTIONS_PROMPT), values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT), values.get("revised_summary_prompt", REVISED_SUMMARY_PROMPT), values.get("are_all_true_prompt", ARE_ALL_TRUE_PROMPT), verbose=values.get("verbose", False), ) return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, Any],
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
fbcb60bc62e2-3
def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() all_true = False count = 0 output = None original_input = inputs[self.input_key] chain_input = original_input while not all_true and count < self.max_checks: output = self.sequential_chain( {"summary": chain_input}, callbacks=_run_manager.get_child() ) count += 1 if output["all_true"].strip() == "True": break if self.verbose: print(output["revised_summary"]) chain_input = output["revised_summary"] if not output: raise ValueError("No output from chain") return {self.output_key: output["revised_summary"].strip()} @property def _chain_type(self) -> str: return "llm_summarization_checker_chain" [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT, check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT, revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT, are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT, verbose: bool = False, **kwargs: Any, ) -> LLMSummarizationCheckerChain: chain = _load_sequential_chain( llm, create_assertions_prompt, check_assertions_prompt, revised_summary_prompt,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
fbcb60bc62e2-4
create_assertions_prompt, check_assertions_prompt, revised_summary_prompt, are_all_true_prompt, verbose=verbose, ) return cls(sequential_chain=chain, verbose=verbose, **kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/llm_summarization_checker/base.html
10ca50994435-0
Source code for langchain.chains.api.base """Chain that makes API calls and summarizes the responses to answer a question.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.prompts import BasePromptTemplate from langchain.requests import TextRequestsWrapper [docs]class APIChain(Chain): """Chain that makes API calls and summarizes the responses to answer a question.""" api_request_chain: LLMChain api_answer_chain: LLMChain requests_wrapper: TextRequestsWrapper = Field(exclude=True) api_docs: str question_key: str = "question" #: :meta private: output_key: str = "output" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.question_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] @root_validator(pre=True) def validate_api_request_prompt(cls, values: Dict) -> Dict: """Check that api request prompt expects the right variables.""" input_vars = values["api_request_chain"].prompt.input_variables expected_vars = {"question", "api_docs"} if set(input_vars) != expected_vars:
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
10ca50994435-1
if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values @root_validator(pre=True) def validate_api_answer_prompt(cls, values: Dict) -> Dict: """Check that api answer prompt expects the right variables.""" input_vars = values["api_answer_chain"].prompt.input_variables expected_vars = {"question", "api_docs", "api_url", "api_response"} if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = self.api_request_chain.predict( question=question, api_docs=self.api_docs, callbacks=_run_manager.get_child(), ) _run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose) api_url = api_url.strip() api_response = self.requests_wrapper.get(api_url) _run_manager.on_text( api_response, color="yellow", end="\n", verbose=self.verbose ) answer = self.api_answer_chain.predict( question=question, api_docs=self.api_docs, api_url=api_url, api_response=api_response, callbacks=_run_manager.get_child(), ) return {self.output_key: answer} async def _acall(
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
10ca50994435-2
return {self.output_key: answer} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = await self.api_request_chain.apredict( question=question, api_docs=self.api_docs, callbacks=_run_manager.get_child(), ) await _run_manager.on_text( api_url, color="green", end="\n", verbose=self.verbose ) api_url = api_url.strip() api_response = await self.requests_wrapper.aget(api_url) await _run_manager.on_text( api_response, color="yellow", end="\n", verbose=self.verbose ) answer = await self.api_answer_chain.apredict( question=question, api_docs=self.api_docs, api_url=api_url, api_response=api_response, callbacks=_run_manager.get_child(), ) return {self.output_key: answer} [docs] @classmethod def from_llm_and_api_docs( cls, llm: BaseLanguageModel, api_docs: str, headers: Optional[dict] = None, api_url_prompt: BasePromptTemplate = API_URL_PROMPT, api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, **kwargs: Any, ) -> APIChain: """Load chain from just an LLM and the api docs.""" get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
10ca50994435-3
requests_wrapper = TextRequestsWrapper(headers=headers) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls( api_request_chain=get_request_chain, api_answer_chain=get_answer_chain, requests_wrapper=requests_wrapper, api_docs=api_docs, **kwargs, ) @property def _chain_type(self) -> str: return "api_chain"
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/base.html
10e24b43e84b-0
Source code for langchain.chains.api.openapi.chain """Chain that makes API calls and summarizes the responses to answer a question.""" from __future__ import annotations import json from typing import Any, Dict, List, NamedTuple, Optional, cast from pydantic import BaseModel, Field from requests import Response from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains.api.openapi.requests_chain import APIRequesterChain from langchain.chains.api.openapi.response_chain import APIResponderChain from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.requests import Requests from langchain.tools.openapi.utils.api_models import APIOperation class _ParamMapping(NamedTuple): """Mapping from parameter name to parameter value.""" query_params: List[str] body_params: List[str] path_params: List[str] [docs]class OpenAPIEndpointChain(Chain, BaseModel): """Chain interacts with an OpenAPI endpoint using natural language.""" api_request_chain: LLMChain api_response_chain: Optional[LLMChain] api_operation: APIOperation requests: Requests = Field(exclude=True, default_factory=Requests) param_mapping: _ParamMapping = Field(alias="param_mapping") return_intermediate_steps: bool = False instructions_key: str = "instructions" #: :meta private: output_key: str = "output" #: :meta private: max_text_length: Optional[int] = Field(ge=0) #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.instructions_key] @property
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
10e24b43e84b-1
""" return [self.instructions_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, "intermediate_steps"] def _construct_path(self, args: Dict[str, str]) -> str: """Construct the path from the deserialized input.""" path = self.api_operation.base_url + self.api_operation.path for param in self.param_mapping.path_params: path = path.replace(f"{{{param}}}", str(args.pop(param, ""))) return path def _extract_query_params(self, args: Dict[str, str]) -> Dict[str, str]: """Extract the query params from the deserialized input.""" query_params = {} for param in self.param_mapping.query_params: if param in args: query_params[param] = args.pop(param) return query_params def _extract_body_params(self, args: Dict[str, str]) -> Optional[Dict[str, str]]: """Extract the request body params from the deserialized input.""" body_params = None if self.param_mapping.body_params: body_params = {} for param in self.param_mapping.body_params: if param in args: body_params[param] = args.pop(param) return body_params [docs] def deserialize_json_input(self, serialized_args: str) -> dict: """Use the serialized typescript dictionary. Resolve the path, query params dict, and optional requestBody dict. """ args: dict = json.loads(serialized_args) path = self._construct_path(args)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
10e24b43e84b-2
path = self._construct_path(args) body_params = self._extract_body_params(args) query_params = self._extract_query_params(args) return { "url": path, "data": body_params, "params": query_params, } def _get_output(self, output: str, intermediate_steps: dict) -> dict: """Return the output from the API call.""" if self.return_intermediate_steps: return { self.output_key: output, "intermediate_steps": intermediate_steps, } else: return {self.output_key: output} def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() intermediate_steps = {} instructions = inputs[self.instructions_key] instructions = instructions[: self.max_text_length] _api_arguments = self.api_request_chain.predict_and_parse( instructions=instructions, callbacks=_run_manager.get_child() ) api_arguments = cast(str, _api_arguments) intermediate_steps["request_args"] = api_arguments _run_manager.on_text( api_arguments, color="green", end="\n", verbose=self.verbose ) if api_arguments.startswith("ERROR"): return self._get_output(api_arguments, intermediate_steps) elif api_arguments.startswith("MESSAGE:"): return self._get_output( api_arguments[len("MESSAGE:") :], intermediate_steps ) try: request_args = self.deserialize_json_input(api_arguments) method = getattr(self.requests, self.api_operation.method.value)
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
10e24b43e84b-3
method = getattr(self.requests, self.api_operation.method.value) api_response: Response = method(**request_args) if api_response.status_code != 200: method_str = str(self.api_operation.method.value) response_text = ( f"{api_response.status_code}: {api_response.reason}" + f"\nFor {method_str.upper()} {request_args['url']}\n" + f"Called with args: {request_args['params']}" ) else: response_text = api_response.text except Exception as e: response_text = f"Error with message {str(e)}" response_text = response_text[: self.max_text_length] intermediate_steps["response_text"] = response_text _run_manager.on_text( response_text, color="blue", end="\n", verbose=self.verbose ) if self.api_response_chain is not None: _answer = self.api_response_chain.predict_and_parse( response=response_text, instructions=instructions, callbacks=_run_manager.get_child(), ) answer = cast(str, _answer) _run_manager.on_text(answer, color="yellow", end="\n", verbose=self.verbose) return self._get_output(answer, intermediate_steps) else: return self._get_output(response_text, intermediate_steps) [docs] @classmethod def from_url_and_method( cls, spec_url: str, path: str, method: str, llm: BaseLanguageModel, requests: Optional[Requests] = None, return_intermediate_steps: bool = False, **kwargs: Any # TODO: Handle async ) -> "OpenAPIEndpointChain":
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
10e24b43e84b-4
# TODO: Handle async ) -> "OpenAPIEndpointChain": """Create an OpenAPIEndpoint from a spec at the specified url.""" operation = APIOperation.from_openapi_url(spec_url, path, method) return cls.from_api_operation( operation, requests=requests, llm=llm, return_intermediate_steps=return_intermediate_steps, **kwargs, ) [docs] @classmethod def from_api_operation( cls, operation: APIOperation, llm: BaseLanguageModel, requests: Optional[Requests] = None, verbose: bool = False, return_intermediate_steps: bool = False, raw_response: bool = False, callbacks: Callbacks = None, **kwargs: Any # TODO: Handle async ) -> "OpenAPIEndpointChain": """Create an OpenAPIEndpointChain from an operation and a spec.""" param_mapping = _ParamMapping( query_params=operation.query_params, body_params=operation.body_params, path_params=operation.path_params, ) requests_chain = APIRequesterChain.from_llm_and_typescript( llm, typescript_definition=operation.to_typescript(), verbose=verbose, callbacks=callbacks, ) if raw_response: response_chain = None else: response_chain = APIResponderChain.from_llm( llm, verbose=verbose, callbacks=callbacks ) _requests = requests or Requests() return cls( api_request_chain=requests_chain, api_response_chain=response_chain, api_operation=operation, requests=_requests, param_mapping=param_mapping,
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
10e24b43e84b-5
requests=_requests, param_mapping=param_mapping, verbose=verbose, return_intermediate_steps=return_intermediate_steps, callbacks=callbacks, **kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/chains/api/openapi/chain.html
1427fd0581e1-0
Source code for langchain.chat_models.google_palm """Wrapper around Google's PaLM Chat API.""" from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage, ) from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: import google.generativeai as genai logger = logging.getLogger(__name__) class ChatGooglePalmError(Exception): """Error raised when there is an issue with the Google PaLM API.""" pass def _truncate_at_stop_tokens( text: str, stop: Optional[List[str]], ) -> str: """Truncates text at the earliest stop token found.""" if stop is None: return text for stop_token in stop: stop_token_idx = text.find(stop_token) if stop_token_idx != -1: text = text[:stop_token_idx] return text def _response_to_result( response: genai.types.ChatResponse, stop: Optional[List[str]], ) -> ChatResult: """Converts a PaLM API response into a LangChain ChatResult.""" if not response.candidates:
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
1427fd0581e1-1
if not response.candidates: raise ChatGooglePalmError("ChatResponse must have at least one candidate.") generations: List[ChatGeneration] = [] for candidate in response.candidates: author = candidate.get("author") if author is None: raise ChatGooglePalmError(f"ChatResponse must have an author: {candidate}") content = _truncate_at_stop_tokens(candidate.get("content", ""), stop) if content is None: raise ChatGooglePalmError(f"ChatResponse must have a content: {candidate}") if author == "ai": generations.append( ChatGeneration(text=content, message=AIMessage(content=content)) ) elif author == "human": generations.append( ChatGeneration( text=content, message=HumanMessage(content=content), ) ) else: generations.append( ChatGeneration( text=content, message=ChatMessage(role=author, content=content), ) ) return ChatResult(generations=generations) def _messages_to_prompt_dict( input_messages: List[BaseMessage], ) -> genai.types.MessagePromptDict: """Converts a list of LangChain messages into a PaLM API MessagePrompt structure.""" import google.generativeai as genai context: str = "" examples: List[genai.types.MessageDict] = [] messages: List[genai.types.MessageDict] = [] remaining = list(enumerate(input_messages)) while remaining: index, input_message = remaining.pop(0) if isinstance(input_message, SystemMessage): if index != 0:
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
1427fd0581e1-2
if isinstance(input_message, SystemMessage): if index != 0: raise ChatGooglePalmError("System message must be first input message.") context = input_message.content elif isinstance(input_message, HumanMessage) and input_message.example: if messages: raise ChatGooglePalmError( "Message examples must come before other messages." ) _, next_input_message = remaining.pop(0) if isinstance(next_input_message, AIMessage) and next_input_message.example: examples.extend( [ genai.types.MessageDict( author="human", content=input_message.content ), genai.types.MessageDict( author="ai", content=next_input_message.content ), ] ) else: raise ChatGooglePalmError( "Human example message must be immediately followed by an " " AI example response." ) elif isinstance(input_message, AIMessage) and input_message.example: raise ChatGooglePalmError( "AI example message must be immediately preceded by a Human " "example message." ) elif isinstance(input_message, AIMessage): messages.append( genai.types.MessageDict(author="ai", content=input_message.content) ) elif isinstance(input_message, HumanMessage): messages.append( genai.types.MessageDict(author="human", content=input_message.content) ) elif isinstance(input_message, ChatMessage): messages.append( genai.types.MessageDict( author=input_message.role, content=input_message.content ) ) else: raise ChatGooglePalmError( "Messages without an explicit role not supported by PaLM API." )
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
1427fd0581e1-3
"Messages without an explicit role not supported by PaLM API." ) return genai.types.MessagePromptDict( context=context, examples=examples, messages=messages, ) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" import google.api_core.exceptions multiplier = 2 min_seconds = 1 max_seconds = 60 max_retries = 10 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _chat_with_retry(**kwargs: Any) -> Any: return llm.client.chat(**kwargs) return _chat_with_retry(**kwargs) async def achat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator async def _achat_with_retry(**kwargs: Any) -> Any:
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
1427fd0581e1-4
async def _achat_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.chat_async(**kwargs) return await _achat_with_retry(**kwargs) [docs]class ChatGooglePalm(BaseChatModel, BaseModel): """Wrapper around Google's PaLM Chat API. To use you must have the google.generativeai Python package installed and either: 1. The ``GOOGLE_API_KEY``` environment varaible set with your API key, or 2. Pass your API key using the google_api_key kwarg to the ChatGoogle constructor. Example: .. code-block:: python from langchain.chat_models import ChatGooglePalm chat = ChatGooglePalm() """ client: Any #: :meta private: model_name: str = "models/chat-bison-001" """Model name to use.""" google_api_key: Optional[str] = None temperature: Optional[float] = None """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive.""" n: int = 1 """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated."""
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
1427fd0581e1-5
not return the full n completions if duplicates are generated.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists, temperature, top_p, and top_k.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" ) try: import google.generativeai as genai genai.configure(api_key=google_api_key) except ImportError: raise ChatGooglePalmError( "Could not import google.generativeai python package. " "Please install it with `pip install google-generativeai`" ) values["client"] = genai if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: raise ValueError("top_p must be in the range [0.0, 1.0]") if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") return values def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: prompt = _messages_to_prompt_dict(messages) response: genai.types.ChatResponse = chat_with_retry( self, model=self.model_name, prompt=prompt,
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
1427fd0581e1-6
self, model=self.model_name, prompt=prompt, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, candidate_count=self.n, **kwargs, ) return _response_to_result(response, stop) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: prompt = _messages_to_prompt_dict(messages) response: genai.types.ChatResponse = await achat_with_retry( self, model=self.model_name, prompt=prompt, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, candidate_count=self.n, ) return _response_to_result(response, stop) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_name": self.model_name, "temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "n": self.n, } @property def _llm_type(self) -> str: return "google-palm-chat"
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html
4a5533d8c84c-0
Source code for langchain.chat_models.azure_openai """Azure OpenAI chat wrapper.""" from __future__ import annotations import logging from typing import Any, Dict, Mapping from pydantic import root_validator from langchain.chat_models.openai import ChatOpenAI from langchain.schema import ChatResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureChatOpenAI(ChatOpenAI): """Wrapper around Azure OpenAI Chat Completion API. To use this class you must have a deployed model on Azure OpenAI. Use `deployment_name` in the constructor to refer to the "Model deployment name" in the Azure portal. In addition, you should have the ``openai`` python package installed, and the following environment variables set or passed in constructor in lower case: - ``OPENAI_API_TYPE`` (default: ``azure``) - ``OPENAI_API_KEY`` - ``OPENAI_API_BASE`` - ``OPENAI_API_VERSION`` - ``OPENAI_PROXY`` For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name `35-turbo-dev`, the constructor should look like: .. code-block:: python AzureChatOpenAI( deployment_name="35-turbo-dev", openai_api_version="2023-03-15-preview", ) Be aware the API version may change. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. """ deployment_name: str = "" openai_api_type: str = "azure" openai_api_base: str = ""
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
4a5533d8c84c-1
openai_api_base: str = "" openai_api_version: str = "" openai_api_key: str = "" openai_organization: str = "" openai_proxy: str = "" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY", ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", ) values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) try: import openai except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
4a5533d8c84c-2
except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { **super()._default_params, "engine": self.deployment_name, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**self._default_params} @property def _invocation_params(self) -> Mapping[str, Any]: openai_creds = { "api_type": self.openai_api_type, "api_version": self.openai_api_version, } return {**openai_creds, **super()._invocation_params} @property def _llm_type(self) -> str: return "azure-openai-chat" def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: for res in response["choices"]: if res.get("finish_reason", None) == "content_filter": raise ValueError( "Azure has not provided the response due to a content" " filter being triggered" ) return super()._create_chat_result(response)
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html
b3f7380b9c5e-0
Source code for langchain.chat_models.fake """Fake ChatModel for testing purposes.""" from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.chat_models.base import SimpleChatModel from langchain.schema import BaseMessage [docs]class FakeListChatModel(SimpleChatModel): """Fake ChatModel for testing purposes.""" responses: List i: int = 0 @property def _llm_type(self) -> str: return "fake-list-chat-model" def _call( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """First try to lookup in queries, else return 'foo' or 'bar'.""" response = self.responses[self.i] self.i += 1 return response @property def _identifying_params(self) -> Mapping[str, Any]: return {"responses": self.responses}
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/fake.html
78621d32da49-0
Source code for langchain.chat_models.promptlayer_openai """PromptLayer wrapper.""" import datetime from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models import ChatOpenAI from langchain.schema import BaseMessage, ChatResult [docs]class PromptLayerChatOpenAI(ChatOpenAI): """Wrapper around OpenAI Chat large language models and PromptLayer. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerChatOpenAI adds to optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.chat_models import PromptLayerChatOpenAI openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any ) -> ChatResult:
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
78621d32da49-1
**kwargs: Any ) -> ChatResult: """Call ChatOpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(messages, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() message_dicts, params = super()._create_message_dicts(messages, stop) for i, generation in enumerate(generated_responses.generations): response_dict, params = super()._create_message_dicts( [generation.message], stop ) params = {**params, **kwargs} pl_request_id = promptlayer_api_request( "langchain.PromptLayerChatOpenAI", "langchain", message_dicts, params, self.pl_tags, response_dict, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any ) -> ChatResult: """Call ChatOpenAI agenerate and then call PromptLayer to log.""" from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp()
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
78621d32da49-2
request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(messages, stop, run_manager) request_end_time = datetime.datetime.now().timestamp() message_dicts, params = super()._create_message_dicts(messages, stop) for i, generation in enumerate(generated_responses.generations): response_dict, params = super()._create_message_dicts( [generation.message], stop ) params = {**params, **kwargs} pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerChatOpenAI.async", "langchain", message_dicts, params, self.pl_tags, response_dict, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses @property def _llm_type(self) -> str: return "promptlayer-openai-chat" @property def _identifying_params(self) -> Mapping[str, Any]: return { **super()._identifying_params, "pl_tags": self.pl_tags, "return_pl_id": self.return_pl_id, }
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html
c3077bb50a24-0
Source code for langchain.chat_models.vertexai """Wrapper around Google VertexAI chat-based models.""" from dataclasses import dataclass, field from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.llms.vertexai import _VertexAICommon, is_codey_model from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage, ) from langchain.utilities.vertexai import raise_vertex_import_error @dataclass class _MessagePair: """InputOutputTextPair represents a pair of input and output texts.""" question: HumanMessage answer: AIMessage @dataclass class _ChatHistory: """InputOutputTextPair represents a pair of input and output texts.""" history: List[_MessagePair] = field(default_factory=list) system_message: Optional[SystemMessage] = None def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory: """Parse a sequence of messages into history. A sequence should be either (SystemMessage, HumanMessage, AIMessage, HumanMessage, AIMessage, ...) or (HumanMessage, AIMessage, HumanMessage, AIMessage, ...). CodeChat does not support SystemMessage. Args: history: The list of messages to re-create the history of the chat. Returns: A parsed chat history. Raises: ValueError: If a sequence of message is odd, or a human message is not followed
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
c3077bb50a24-1
ValueError: If a sequence of message is odd, or a human message is not followed by a message from AI (e.g., Human, Human, AI or AI, AI, Human). """ if not history: return _ChatHistory() first_message = history[0] system_message = first_message if isinstance(first_message, SystemMessage) else None chat_history = _ChatHistory(system_message=system_message) messages_left = history[1:] if system_message else history if len(messages_left) % 2 != 0: raise ValueError( f"Amount of messages in history should be even, got {len(messages_left)}!" ) for question, answer in zip(messages_left[::2], messages_left[1::2]): if not isinstance(question, HumanMessage) or not isinstance(answer, AIMessage): raise ValueError( "A human message should follow a bot one, " f"got {question.type}, {answer.type}." ) chat_history.history.append(_MessagePair(question=question, answer=answer)) return chat_history [docs]class ChatVertexAI(_VertexAICommon, BaseChatModel): """Wrapper around Vertex AI large language models.""" model_name: str = "chat-bison" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" cls._try_init_vertexai(values) try: if is_codey_model(values["model_name"]): from vertexai.preview.language_models import CodeChatModel values["client"] = CodeChatModel.from_pretrained(values["model_name"]) else: from vertexai.preview.language_models import ChatModel
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
c3077bb50a24-2
else: from vertexai.preview.language_models import ChatModel values["client"] = ChatModel.from_pretrained(values["model_name"]) except ImportError: raise_vertex_import_error() return values def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. Code chat does not support context. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ if not messages: raise ValueError( "You should provide at least one message to start the chat!" ) question = messages[-1] if not isinstance(question, HumanMessage): raise ValueError( f"Last message in the list should be from human, got {question.type}." ) history = _parse_chat_history(messages[:-1]) context = history.system_message.content if history.system_message else None params = {**self._default_params, **kwargs} if not self.is_codey_model: chat = self.client.start_chat(context=context, **params) else: chat = self.client.start_chat(**params) for pair in history.history: chat._history.append((pair.question.content, pair.answer.content))
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
c3077bb50a24-3
chat._history.append((pair.question.content, pair.answer.content)) response = chat.send_message(question.content, **params) text = self._enforce_stop_words(response.text, stop) return ChatResult(generations=[ChatGeneration(message=AIMessage(content=text))]) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: raise NotImplementedError( """Vertex AI doesn't support async requests at the moment.""" )
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html
0b60ce3f167b-0
Source code for langchain.chat_models.anthropic from typing import Any, Dict, List, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.llms.anthropic import _AnthropicCommon from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage, ) [docs]class ChatAnthropic(BaseChatModel, _AnthropicCommon): r"""Wrapper around Anthropic's large language model. To use, you should have the ``anthropic`` python package installed, and the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python import anthropic from langchain.llms import Anthropic model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key") """ @property def _llm_type(self) -> str: """Return type of chat model.""" return "anthropic-chat" @property def lc_serializable(self) -> bool: return True def _convert_one_message_to_text(self, message: BaseMessage) -> str: if isinstance(message, ChatMessage): message_text = f"\n\n{message.role.capitalize()}: {message.content}" elif isinstance(message, HumanMessage): message_text = f"{self.HUMAN_PROMPT} {message.content}" elif isinstance(message, AIMessage): message_text = f"{self.AI_PROMPT} {message.content}"
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
0b60ce3f167b-1
message_text = f"{self.AI_PROMPT} {message.content}" elif isinstance(message, SystemMessage): message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>" else: raise ValueError(f"Got unknown type {message}") return message_text def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str: """Format a list of strings into a single string with necessary newlines. Args: messages (List[BaseMessage]): List of BaseMessage to combine. Returns: str: Combined string with necessary newlines. """ return "".join( self._convert_one_message_to_text(message) for message in messages ) def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str: """Format a list of messages into a full prompt for the Anthropic model Args: messages (List[BaseMessage]): List of BaseMessage to combine. Returns: str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags. """ messages = messages.copy() # don't mutate the original list if not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if not isinstance(messages[-1], AIMessage): messages.append(AIMessage(content="")) text = self._convert_messages_to_text(messages) return ( text.rstrip() ) # trim off the trailing ' ' that might come from the "Assistant: " def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
0b60ce3f167b-2
run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: prompt = self._convert_messages_to_prompt(messages) params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs} if stop: params["stop_sequences"] = stop if self.streaming: completion = "" stream_resp = self.client.completion_stream(**params) for data in stream_resp: delta = data["completion"][len(completion) :] completion = data["completion"] if run_manager: run_manager.on_llm_new_token( delta, ) else: response = self.client.completion(**params) completion = response["completion"] message = AIMessage(content=completion) return ChatResult(generations=[ChatGeneration(message=message)]) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: prompt = self._convert_messages_to_prompt(messages) params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs} if stop: params["stop_sequences"] = stop if self.streaming: completion = "" stream_resp = await self.client.acompletion_stream(**params) async for data in stream_resp: delta = data["completion"][len(completion) :] completion = data["completion"] if run_manager: await run_manager.on_llm_new_token( delta, ) else:
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
0b60ce3f167b-3
delta, ) else: response = await self.client.acompletion(**params) completion = response["completion"] message = AIMessage(content=completion) return ChatResult(generations=[ChatGeneration(message=message)]) [docs] def get_num_tokens(self, text: str) -> int: """Calculate number of tokens.""" if not self.count_tokens: raise NameError("Please ensure the anthropic package is loaded") return self.count_tokens(text)
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html
4dc84ea39083-0
Source code for langchain.chat_models.openai """OpenAI chat wrapper.""" from __future__ import annotations import logging import sys from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Union, ) from pydantic import Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, FunctionMessage, HumanMessage, SystemMessage, ) from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: import tiktoken logger = logging.getLogger(__name__) def _import_tiktoken() -> Any: try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_token_ids. " "Please install it with `pip install tiktoken`." ) return tiktoken def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry(
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-1
return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": content = _dict["content"] or "" # OpenAI returns None for tool invocations if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} else: additional_kwargs = {} return AIMessage(content=content, additional_kwargs=additional_kwargs) elif role == "system": return SystemMessage(content=_dict["content"])
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-2
elif role == "system": return SystemMessage(content=_dict["content"]) elif role == "function": return FunctionMessage(content=_dict["content"], name=_dict["name"]) else: return ChatMessage(content=_dict["content"], role=role) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} if "function_call" in message.additional_kwargs: message_dict["function_call"] = message.additional_kwargs["function_call"] elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, FunctionMessage): message_dict = { "role": "function", "content": message.content, "name": message.name, } else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict [docs]class ChatOpenAI(BaseChatModel): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-3
Example: .. code-block:: python from langchain.chat_models import ChatOpenAI openai = ChatOpenAI(model_name="gpt-3.5-turbo") """ @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} @property def lc_serializable(self) -> bool: return True client: Any #: :meta private: model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" openai_api_base: Optional[str] = None openai_organization: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" n: int = 1 """Number of chat completions to generate for each prompt.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate."""
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-4
max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = cls.all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name)
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-5
) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) try: import openai except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it "
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-6
"due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return { "model": self.model_name, "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "n": self.n, "temperature": self.temperature, **self.model_kwargs, } def _create_retry_decorator(self) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(self.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), )
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-7
), before_sleep=before_sleep_log(logger, logging.WARNING), ) [docs] def completion_with_retry(self, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = self._create_retry_decorator() @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return self.client.create(**kwargs) return _completion_with_retry(**kwargs) def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage: dict = {} for output in llm_outputs: if output is None: # Happens in streaming continue token_usage = output["token_usage"] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {"token_usage": overall_token_usage, "model_name": self.model_name} def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True function_call: Optional[dict] = None for stream_resp in self.completion_with_retry( messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role)
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-8
role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content") or "" inner_completion += token _function_call = stream_resp["choices"][0]["delta"].get("function_call") if _function_call: if function_call is None: function_call = _function_call else: function_call["arguments"] += _function_call["arguments"] if run_manager: run_manager.on_llm_new_token(token) message = _convert_dict_to_message( { "content": inner_completion, "role": role, "function_call": function_call, } ) return ChatResult(generations=[ChatGeneration(message=message)]) response = self.completion_with_retry(messages=message_dicts, **params) return self._create_chat_result(response) def _create_message_dicts( self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = dict(self._invocation_params) if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] for res in response["choices"]: message = _convert_dict_to_message(res["message"]) gen = ChatGeneration(message=message) generations.append(gen)
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-9
gen = ChatGeneration(message=message) generations.append(gen) llm_output = {"token_usage": response["usage"], "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True function_call: Optional[dict] = None async for stream_resp in await acompletion_with_retry( self, messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content", "") inner_completion += token or "" _function_call = stream_resp["choices"][0]["delta"].get("function_call") if _function_call: if function_call is None: function_call = _function_call else: function_call["arguments"] += _function_call["arguments"] if run_manager: await run_manager.on_llm_new_token(token) message = _convert_dict_to_message( { "content": inner_completion, "role": role, "function_call": function_call, } ) return ChatResult(generations=[ChatGeneration(message=message)]) else:
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-10
return ChatResult(generations=[ChatGeneration(message=message)]) else: response = await acompletion_with_retry( self, messages=message_dicts, **params ) return self._create_chat_result(response) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _invocation_params(self) -> Mapping[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, "model": self.model_name, } if self.openai_proxy: import openai openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 return {**openai_creds, **self._default_params} @property def _llm_type(self) -> str: """Return type of chat model.""" return "openai-chat" def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]: tiktoken_ = _import_tiktoken() if self.tiktoken_model_name is not None: model = self.tiktoken_model_name else: model = self.model_name if model == "gpt-3.5-turbo": # gpt-3.5-turbo may change over time.
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-11
# gpt-3.5-turbo may change over time. # Returning num tokens assuming gpt-3.5-turbo-0301. model = "gpt-3.5-turbo-0301" elif model == "gpt-4": # gpt-4 may change over time. # Returning num tokens assuming gpt-4-0314. model = "gpt-4-0314" # Returns the number of tokens used by a list of messages. try: encoding = tiktoken_.encoding_for_model(model) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken_.get_encoding(model) return model, encoding [docs] def get_token_ids(self, text: str) -> List[int]: """Get the tokens present in the text with tiktoken package.""" # tiktoken NOT supported for Python 3.7 or below if sys.version_info[1] <= 7: return super().get_token_ids(text) _, encoding_model = self._get_encoding_model() return encoding_model.encode(text) [docs] def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages)
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
4dc84ea39083-12
return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() if model.startswith("gpt-3.5-turbo"): # every message follows <im_start>{role/name}\n{content}<im_end>\n tokens_per_message = 4 # if there's a name, the role is omitted tokens_per_name = -1 elif model.startswith("gpt-4"): tokens_per_message = 3 tokens_per_name = 1 else: raise NotImplementedError( f"get_num_tokens_from_messages() is not presently implemented " f"for model {model}." "See https://github.com/openai/openai-python/blob/main/chatml.md for " "information on how messages are converted to tokens." ) num_tokens = 0 messages_dict = [_convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(value)) if key == "name": num_tokens += tokens_per_name # every reply is primed with <im_start>assistant num_tokens += 3 return num_tokens
https://api.python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html
72ccde5bcf8a-0
Source code for langchain.memory.simple from typing import Any, Dict, List from langchain.schema import BaseMemory [docs]class SimpleMemory(BaseMemory): """Simple memory for storing context or other bits of information that shouldn't ever change between prompts. """ memories: Dict[str, Any] = dict() @property def memory_variables(self) -> List[str]: return list(self.memories.keys()) [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: return self.memories [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Nothing should be saved or changed, my memory is set in stone.""" pass [docs] def clear(self) -> None: """Nothing to clear, got a memory like a vault.""" pass
https://api.python.langchain.com/en/latest/_modules/langchain/memory/simple.html
358d94aed74b-0
Source code for langchain.memory.buffer from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key from langchain.schema import get_buffer_string [docs]class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" if self.return_messages: return self.chat_memory.messages else: return get_buffer_string( self.chat_memory.messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} [docs]class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: @root_validator() def validate_chains(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer.html
358d94aed74b-1
def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) [docs] def clear(self) -> None: """Clear memory contents.""" self.buffer = ""
https://api.python.langchain.com/en/latest/_modules/langchain/memory/buffer.html