id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
105f7996bd4b-4
) ZapierNLARunAction.__doc__ = ( ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore ) # other useful actions [docs]class ZapierNLAListActions(BaseTool): """ Args: None """ name = "ZapierNLA_list_actions" description = BASE_ZAPIER_TOOL_PROMPT + ( "This tool returns a list of the user's exposed actions." ) api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) def _run( self, _: str = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.list_as_str() async def _arun( self, _: str = "", run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return await self.api_wrapper.alist_as_str() ZapierNLAListActions.__doc__ = ( ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore )
https://api.python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
a6545542e0b8-0
Source code for langchain.tools.json.tool # flake8: noqa """Tools for working with JSON specs.""" from __future__ import annotations import json import re from pathlib import Path from typing import Dict, List, Optional, Union from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool def _parse_input(text: str) -> List[Union[str, int]]: """Parse input of the form data["key1"][0]["key2"] into a list of keys.""" _res = re.findall(r"\[.*?]", text) # strip the brackets and quotes, convert to int if possible res = [i[1:-1].replace('"', "") for i in _res] res = [int(i) if i.isdigit() else i for i in res] return res class JsonSpec(BaseModel): """Base class for JSON spec.""" dict_: Dict max_value_length: int = 200 @classmethod def from_file(cls, path: Path) -> JsonSpec: """Create a JsonSpec from a file.""" if not path.exists(): raise FileNotFoundError(f"File not found: {path}") dict_ = json.loads(path.read_text()) return cls(dict_=dict_) def keys(self, text: str) -> str: """Return the keys of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]). """ try: items = _parse_input(text) val = self.dict_ for i in items:
https://api.python.langchain.com/en/latest/_modules/langchain/tools/json/tool.html
a6545542e0b8-1
val = self.dict_ for i in items: if i: val = val[i] if not isinstance(val, dict): raise ValueError( f"Value at path `{text}` is not a dict, get the value directly." ) return str(list(val.keys())) except Exception as e: return repr(e) def value(self, text: str) -> str: """Return the value of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]). """ try: items = _parse_input(text) val = self.dict_ for i in items: val = val[i] if isinstance(val, dict) and len(str(val)) > self.max_value_length: return "Value is a large dictionary, should explore its keys directly" str_val = str(val) if len(str_val) > self.max_value_length: str_val = str_val[: self.max_value_length] + "..." return str_val except Exception as e: return repr(e) [docs]class JsonListKeysTool(BaseTool): """Tool for listing keys in a JSON spec.""" name = "json_spec_list_keys" description = """ Can be used to list all keys at a given path. Before calling this you should be SURE that the path to this exists. The input is a text representation of the path to the dict in Python syntax (e.g. data["key1"][0]["key2"]). """ spec: JsonSpec def _run( self, tool_input: str,
https://api.python.langchain.com/en/latest/_modules/langchain/tools/json/tool.html
a6545542e0b8-2
def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: return self.spec.keys(tool_input) async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return self._run(tool_input) [docs]class JsonGetValueTool(BaseTool): """Tool for getting a value in a JSON spec.""" name = "json_spec_get_value" description = """ Can be used to see value in string format at a given path. Before calling this you should be SURE that the path to this exists. The input is a text representation of the path to the dict in Python syntax (e.g. data["key1"][0]["key2"]). """ spec: JsonSpec def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: return self.spec.value(tool_input) async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: return self._run(tool_input)
https://api.python.langchain.com/en/latest/_modules/langchain/tools/json/tool.html
cffefdf8e9ac-0
Source code for langchain.tools.google_serper.tool """Tool for the Serper.dev Google Search API.""" from typing import Optional from pydantic.fields import Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper [docs]class GoogleSerperRun(BaseTool): """Tool that adds the capability to query the Serper.dev Google search API.""" name = "google_serper" description = ( "A low-cost Google Search API." "Useful for when you need to answer questions about current events." "Input should be a search query." ) api_wrapper: GoogleSerperAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.run(query)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" return (await self.api_wrapper.arun(query)).__str__() [docs]class GoogleSerperResults(BaseTool): """Tool that has capability to query the Serper.dev Google Search API and get back json.""" name = "Google Serrper Results JSON" description = ( "A low-cost Google Search API." "Useful for when you need to answer questions about current events." "Input should be a search query. Output is a JSON object of the query results" )
https://api.python.langchain.com/en/latest/_modules/langchain/tools/google_serper/tool.html
cffefdf8e9ac-1
) api_wrapper: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return str(self.api_wrapper.results(query)) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" return (await self.api_wrapper.aresults(query)).__str__()
https://api.python.langchain.com/en/latest/_modules/langchain/tools/google_serper/tool.html
a940812f8722-0
Source code for langchain.prompts.few_shot """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, ) from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptTemplate(StringPromptTemplate): """Prompt template that contains few shot examples.""" @property def lc_serializable(self) -> bool: return False examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: str """A prompt template string to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: str = "" """A prompt template string to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
a940812f8722-1
def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: check_valid_template( values["prefix"] + values["suffix"], values["template_format"], values["input_variables"] + list(values["partial_variables"]), ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
a940812f8722-2
.. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) examples = [ {k: e[k] for k in self.example_prompt.input_variables} for e in examples ] # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall template. pieces = [self.prefix, *example_strings, self.suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot.html
6672a463e78f-0
Source code for langchain.prompts.loading """Load prompts from disk.""" import importlib import json import logging from pathlib import Path from typing import Union import yaml from langchain.output_parsers.regex import RegexParser from langchain.prompts.base import BasePromptTemplate from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseLLMOutputParser, NoOpOutputParser from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/" logger = logging.getLogger(__name__) def load_prompt_from_config(config: dict) -> BasePromptTemplate: """Load prompt from Config Dict.""" if "_type" not in config: logger.warning("No `_type` key found, defaulting to `prompt`.") config_type = config.pop("_type", "prompt") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} prompt not supported") prompt_loader = type_to_loader_dict[config_type] return prompt_loader(config) def _load_template(var_name: str, config: dict) -> dict: """Load template from disk if applicable.""" # Check if template_path exists in config. if f"{var_name}_path" in config: # If it does, make sure template variable doesn't also exist. if var_name in config: raise ValueError( f"Both `{var_name}_path` and `{var_name}` cannot be provided." ) # Pop the template path from the config. template_path = Path(config.pop(f"{var_name}_path")) # Load the template.
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
6672a463e78f-1
# Load the template. if template_path.suffix == ".txt": with open(template_path) as f: template = f.read() else: raise ValueError # Set the template variable to the extracted variable. config[var_name] = template return config def _load_examples(config: dict) -> dict: """Load examples if necessary.""" if isinstance(config["examples"], list): pass elif isinstance(config["examples"], str): with open(config["examples"]) as f: if config["examples"].endswith(".json"): examples = json.load(f) elif config["examples"].endswith((".yaml", ".yml")): examples = yaml.safe_load(f) else: raise ValueError( "Invalid file format. Only json or yaml formats are supported." ) config["examples"] = examples else: raise ValueError("Invalid examples format. Only list or string are supported.") return config def _load_output_parser(config: dict) -> dict: """Load output parser.""" if "output_parser" in config and config["output_parser"]: _config = config.pop("output_parser") output_parser_type = _config.pop("_type") if output_parser_type == "regex_parser": output_parser: BaseLLMOutputParser = RegexParser(**_config) elif output_parser_type == "default": output_parser = NoOpOutputParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parser"] = output_parser return config def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: """Load the few shot prompt from the config."""
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
6672a463e78f-2
"""Load the few shot prompt from the config.""" # Load the suffix and prefix templates. config = _load_template("suffix", config) config = _load_template("prefix", config) # Load the example prompt. if "example_prompt_path" in config: if "example_prompt" in config: raise ValueError( "Only one of example_prompt and example_prompt_path should " "be specified." ) config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) else: config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) # Load the examples. config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config) def _load_prompt(config: dict) -> PromptTemplate: """Load the prompt template from config.""" # Load the template from disk if necessary. config = _load_template("template", config) config = _load_output_parser(config) return PromptTemplate(**config) [docs]def load_prompt(path: Union[str, Path]) -> BasePromptTemplate: """Unified method for loading a prompt from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"} ): return hub_result else: return _load_prompt_from_file(path) def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate: """Load prompt from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
6672a463e78f-3
file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) elif file_path.suffix == ".py": spec = importlib.util.spec_from_loader( "prompt", loader=None, origin=str(file_path) ) if spec is None: raise ValueError("could not load spec") helper = importlib.util.module_from_spec(spec) with open(file_path, "rb") as f: exec(f.read(), helper.__dict__) if not isinstance(helper.PROMPT, BasePromptTemplate): raise ValueError("Did not get object of type BasePromptTemplate.") return helper.PROMPT else: raise ValueError(f"Got unsupported file type {file_path.suffix}") # Load the prompt from the config now. return load_prompt_from_config(config) type_to_loader_dict = { "prompt": _load_prompt, "few_shot": _load_few_shot_prompt, # "few_shot_with_templates": _load_few_shot_with_templates_prompt, }
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/loading.html
2f4fd244bd71-0
Source code for langchain.prompts.few_shot_with_templates """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptWithTemplates(StringPromptTemplate): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: StringPromptTemplate """A PromptTemplate to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: Optional[StringPromptTemplate] = None """A PromptTemplate to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
2f4fd244bd71-1
examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: input_variables = values["input_variables"] expected_input_variables = set(values["suffix"].input_variables) expected_input_variables |= set(values["partial_variables"]) if values["prefix"] is not None: expected_input_variables |= set(values["prefix"].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template.
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
2f4fd244bd71-2
Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall prefix. if self.prefix is None: prefix = "" else: prefix_kwargs = { k: v for k, v in kwargs.items() if k in self.prefix.input_variables } for k in prefix_kwargs.keys(): kwargs.pop(k) prefix = self.prefix.format(**prefix_kwargs) # Create the overall suffix suffix_kwargs = { k: v for k, v in kwargs.items() if k in self.suffix.input_variables } for k in suffix_kwargs.keys(): kwargs.pop(k) suffix = self.suffix.format( **suffix_kwargs, ) pieces = [prefix, *example_strings, suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot_with_templates" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
2f4fd244bd71-3
"""Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
3065d25e11fa-0
Source code for langchain.prompts.prompt """Prompt schema definition.""" from __future__ import annotations from pathlib import Path from string import Formatter from typing import Any, Dict, List, Union from pydantic import root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, _get_jinja2_variables_from_template, check_valid_template, ) [docs]class PromptTemplate(StringPromptTemplate): """Schema to represent a prompt for an LLM. Example: .. code-block:: python from langchain import PromptTemplate prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") """ @property def lc_attributes(self) -> Dict[str, Any]: return { "template_format": self.template_format, } input_variables: List[str] """A list of the names of the variables the prompt template expects.""" template: str """The prompt template.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "prompt" [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
3065d25e11fa-1
""" kwargs = self._merge_partial_and_user_variables(**kwargs) return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that template and input variables are consistent.""" if values["validate_template"]: all_inputs = values["input_variables"] + list(values["partial_variables"]) check_valid_template( values["template"], values["template_format"], all_inputs ) return values [docs] @classmethod def from_examples( cls, examples: List[str], suffix: str, input_variables: List[str], example_separator: str = "\n\n", prefix: str = "", **kwargs: Any, ) -> PromptTemplate: """Take examples in list format with prefix and suffix to create a prompt. Intended to be used as a way to dynamically create a prompt from examples. Args: examples: List of examples to use in the prompt. suffix: String to go after the list of examples. Should generally set up the user's input. input_variables: A list of variable names the final prompt template will expect. example_separator: The separator to use in between examples. Defaults to two new line characters. prefix: String that should go before any examples. Generally includes examples. Default to an empty string. Returns: The final prompt generated. """ template = example_separator.join([prefix, *examples, suffix]) return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_file(
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
3065d25e11fa-2
[docs] @classmethod def from_file( cls, template_file: Union[str, Path], input_variables: List[str], **kwargs: Any ) -> PromptTemplate: """Load a prompt from a file. Args: template_file: The path to the file containing the prompt template. input_variables: A list of variable names the final prompt template will expect. Returns: The prompt loaded from the file. """ with open(str(template_file), "r") as f: template = f.read() return cls(input_variables=input_variables, template=template, **kwargs) [docs] @classmethod def from_template(cls, template: str, **kwargs: Any) -> PromptTemplate: """Load a prompt template from a template.""" if "template_format" in kwargs and kwargs["template_format"] == "jinja2": # Get the variables for the template input_variables = _get_jinja2_variables_from_template(template) else: input_variables = { v for _, v, _, _ in Formatter().parse(template) if v is not None } if "partial_variables" in kwargs: partial_variables = kwargs["partial_variables"] input_variables = { var for var in input_variables if var not in partial_variables } return cls( input_variables=list(sorted(input_variables)), template=template, **kwargs ) # For backwards compatibility. Prompt = PromptTemplate
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/prompt.html
33fc832210c9-0
Source code for langchain.prompts.pipeline from typing import Any, Dict, List, Tuple from pydantic import root_validator from langchain.prompts.base import BasePromptTemplate from langchain.prompts.chat import BaseChatPromptTemplate from langchain.schema import PromptValue def _get_inputs(inputs: dict, input_variables: List[str]) -> dict: return {k: inputs[k] for k in input_variables} [docs]class PipelinePromptTemplate(BasePromptTemplate): """A prompt template for composing multiple prompts together. This can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts: - final_prompt: This is the final prompt that is returned - pipeline_prompts: This is a list of tuples, consisting of a string (`name`) and a Prompt Template. Each PromptTemplate will be formatted and then passed to future prompt templates as a variable with the same name as `name` """ final_prompt: BasePromptTemplate pipeline_prompts: List[Tuple[str, BasePromptTemplate]] @root_validator(pre=True) def get_input_variables(cls, values: Dict) -> Dict: """Get input variables.""" created_variables = set() all_variables = set() for k, prompt in values["pipeline_prompts"]: created_variables.add(k) all_variables.update(prompt.input_variables) values["input_variables"] = list(all_variables.difference(created_variables)) return values [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: for k, prompt in self.pipeline_prompts: _inputs = _get_inputs(kwargs, prompt.input_variables) if isinstance(prompt, BaseChatPromptTemplate):
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/pipeline.html
33fc832210c9-1
if isinstance(prompt, BaseChatPromptTemplate): kwargs[k] = prompt.format_messages(**_inputs) else: kwargs[k] = prompt.format(**_inputs) _inputs = _get_inputs(kwargs, self.final_prompt.input_variables) return self.final_prompt.format_prompt(**_inputs) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() @property def _prompt_type(self) -> str: raise ValueError
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/pipeline.html
d7c82cd7cccf-0
Source code for langchain.prompts.chat """Chat prompt template.""" from __future__ import annotations from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, TypeVar, Union from pydantic import Field, root_validator from langchain.load.serializable import Serializable from langchain.memory.buffer import get_buffer_string from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, PromptValue, SystemMessage, ) class BaseMessagePromptTemplate(Serializable, ABC): @property def lc_serializable(self) -> bool: return True @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To messages.""" @property @abstractmethod def input_variables(self) -> List[str]: """Input variables for this prompt template.""" [docs]class MessagesPlaceholder(BaseMessagePromptTemplate): """Prompt template that assumes variable is already list of messages.""" variable_name: str [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To a BaseMessage.""" value = kwargs[self.variable_name] if not isinstance(value, list): raise ValueError( f"variable {self.variable_name} should be a list of base messages, " f"got {value}" ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f"variable {self.variable_name} should be a list of base messages," f" got {value}" )
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
d7c82cd7cccf-1
f" got {value}" ) return value @property def input_variables(self) -> List[str]: """Input variables for this prompt template.""" return [self.variable_name] MessagePromptTemplateT = TypeVar( "MessagePromptTemplateT", bound="BaseStringMessagePromptTemplate" ) class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): prompt: StringPromptTemplate additional_kwargs: dict = Field(default_factory=dict) @classmethod def from_template( cls: Type[MessagePromptTemplateT], template: str, template_format: str = "f-string", **kwargs: Any, ) -> MessagePromptTemplateT: prompt = PromptTemplate.from_template(template, template_format=template_format) return cls(prompt=prompt, **kwargs) @classmethod def from_template_file( cls: Type[MessagePromptTemplateT], template_file: Union[str, Path], input_variables: List[str], **kwargs: Any, ) -> MessagePromptTemplateT: prompt = PromptTemplate.from_file(template_file, input_variables) return cls(prompt=prompt, **kwargs) @abstractmethod def format(self, **kwargs: Any) -> BaseMessage: """To a BaseMessage.""" def format_messages(self, **kwargs: Any) -> List[BaseMessage]: return [self.format(**kwargs)] @property def input_variables(self) -> List[str]: return self.prompt.input_variables [docs]class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): role: str [docs] def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return ChatMessage(
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
d7c82cd7cccf-2
text = self.prompt.format(**kwargs) return ChatMessage( content=text, role=self.role, additional_kwargs=self.additional_kwargs ) [docs]class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate): [docs] def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return HumanMessage(content=text, additional_kwargs=self.additional_kwargs) [docs]class AIMessagePromptTemplate(BaseStringMessagePromptTemplate): [docs] def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return AIMessage(content=text, additional_kwargs=self.additional_kwargs) [docs]class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate): [docs] def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return SystemMessage(content=text, additional_kwargs=self.additional_kwargs) class ChatPromptValue(PromptValue): messages: List[BaseMessage] def to_string(self) -> str: """Return prompt as string.""" return get_buffer_string(self.messages) def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return self.messages [docs]class BaseChatPromptTemplate(BasePromptTemplate, ABC): [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: messages = self.format_messages(**kwargs) return ChatPromptValue(messages=messages) [docs] @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """Format kwargs into a list of messages."""
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
d7c82cd7cccf-3
"""Format kwargs into a list of messages.""" [docs]class ChatPromptTemplate(BaseChatPromptTemplate, ABC): input_variables: List[str] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] @root_validator(pre=True) def validate_input_variables(cls, values: dict) -> dict: messages = values["messages"] input_vars = set() for message in messages: if isinstance(message, BaseMessagePromptTemplate): input_vars.update(message.input_variables) if "partial_variables" in values: input_vars = input_vars - set(values["partial_variables"]) if "input_variables" in values: if input_vars != set(values["input_variables"]): raise ValueError( "Got mismatched input_variables. " f"Expected: {input_vars}. " f"Got: {values['input_variables']}" ) else: values["input_variables"] = list(input_vars) return values [docs] @classmethod def from_template(cls, template: str, **kwargs: Any) -> ChatPromptTemplate: prompt_template = PromptTemplate.from_template(template, **kwargs) message = HumanMessagePromptTemplate(prompt=prompt_template) return cls.from_messages([message]) [docs] @classmethod def from_role_strings( cls, string_messages: List[Tuple[str, str]] ) -> ChatPromptTemplate: messages = [ ChatMessagePromptTemplate( prompt=PromptTemplate.from_template(template), role=role ) for role, template in string_messages ] return cls.from_messages(messages) [docs] @classmethod def from_strings(
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
d7c82cd7cccf-4
[docs] @classmethod def from_strings( cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]] ) -> ChatPromptTemplate: messages = [ role(prompt=PromptTemplate.from_template(template)) for role, template in string_messages ] return cls.from_messages(messages) [docs] @classmethod def from_messages( cls, messages: Sequence[Union[BaseMessagePromptTemplate, BaseMessage]] ) -> ChatPromptTemplate: input_vars = set() for message in messages: if isinstance(message, BaseMessagePromptTemplate): input_vars.update(message.input_variables) return cls(input_variables=list(input_vars), messages=messages) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string() [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance(message_template, BaseMessagePromptTemplate): rel_params = { k: v for k, v in kwargs.items() if k in message_template.input_variables } message = message_template.format_messages(**rel_params) result.extend(message) else: raise ValueError(f"Unexpected input: {message_template}") return result [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: raise NotImplementedError @property def _prompt_type(self) -> str: return "chat"
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
d7c82cd7cccf-5
def _prompt_type(self) -> str: return "chat" [docs] def save(self, file_path: Union[Path, str]) -> None: raise NotImplementedError
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/chat.html
50e608f52343-0
Source code for langchain.prompts.base """BasePrompt schema definition.""" from __future__ import annotations import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Union import yaml from pydantic import Field, root_validator from langchain.formatting import formatter from langchain.load.serializable import Serializable from langchain.schema import BaseMessage, BaseOutputParser, HumanMessage, PromptValue def jinja2_formatter(template: str, **kwargs: Any) -> str: """Format a template using jinja2.""" try: from jinja2 import Template except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) return Template(template).render(**kwargs) def validate_jinja2(template: str, input_variables: List[str]) -> None: """ Validate that the input variables are valid for the template. Raise an exception if missing or extra variables are found. Args: template: The template string. input_variables: The input variables. """ input_variables_set = set(input_variables) valid_variables = _get_jinja2_variables_from_template(template) missing_variables = valid_variables - input_variables_set extra_variables = input_variables_set - valid_variables error_message = "" if missing_variables: error_message += f"Missing variables: {missing_variables} " if extra_variables: error_message += f"Extra variables: {extra_variables}" if error_message: raise KeyError(error_message.strip())
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/base.html
50e608f52343-1
if error_message: raise KeyError(error_message.strip()) def _get_jinja2_variables_from_template(template: str) -> Set[str]: try: from jinja2 import Environment, meta except ImportError: raise ImportError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) return variables DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = { "f-string": formatter.format, "jinja2": jinja2_formatter, } DEFAULT_VALIDATOR_MAPPING: Dict[str, Callable] = { "f-string": formatter.validate_input_variables, "jinja2": validate_jinja2, } def check_valid_template( template: str, template_format: str, input_variables: List[str] ) -> None: """Check that template string is valid.""" if template_format not in DEFAULT_FORMATTER_MAPPING: valid_formats = list(DEFAULT_FORMATTER_MAPPING) raise ValueError( f"Invalid template format. Got `{template_format}`;" f" should be one of {valid_formats}" ) try: validator_func = DEFAULT_VALIDATOR_MAPPING[template_format] validator_func(template, input_variables) except KeyError as e: raise ValueError( "Invalid prompt schema; check for mismatched or missing input parameters. " + str(e) ) class StringPromptValue(PromptValue): text: str def to_string(self) -> str: """Return prompt as string.""" return self.text
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/base.html
50e608f52343-2
"""Return prompt as string.""" return self.text def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return [HumanMessage(content=self.text)] [docs]class BasePromptTemplate(Serializable, ABC): """Base class for all prompt templates, returning a prompt.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" output_parser: Optional[BaseOutputParser] = None """How to parse the output of calling an LLM on this formatted prompt.""" partial_variables: Mapping[str, Union[str, Callable[[], str]]] = Field( default_factory=dict ) @property def lc_serializable(self) -> bool: return True class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @abstractmethod def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" @root_validator() def validate_variable_names(cls, values: Dict) -> Dict: """Validate variable names do not include restricted names.""" if "stop" in values["input_variables"]: raise ValueError( "Cannot have an input variable named 'stop', as it is used internally," " please rename." ) if "stop" in values["partial_variables"]: raise ValueError( "Cannot have an partial variable named 'stop', as it is used " "internally, please rename." ) overall = set(values["input_variables"]).intersection( values["partial_variables"] ) if overall: raise ValueError( f"Found overlapping input and partial variables: {overall}" )
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/base.html
50e608f52343-3
f"Found overlapping input and partial variables: {overall}" ) return values [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: """Return a partial of the prompt template.""" prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs) ) prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} return type(self)(**prompt_dict) def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]: # Get partial params: partial_kwargs = { k: v if isinstance(v, str) else v() for k, v in self.partial_variables.items() } return {**partial_kwargs, **kwargs} [docs] @abstractmethod def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ @property def _prompt_type(self) -> str: """Return the prompt type key.""" raise NotImplementedError [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of prompt.""" prompt_dict = super().dict(**kwargs) prompt_dict["_type"] = self._prompt_type return prompt_dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the prompt. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/base.html
50e608f52343-4
"""Save the prompt. Args: file_path: Path to directory to save prompt to. Example: .. code-block:: python prompt.save(file_path="path/prompt.yaml") """ if self.partial_variables: raise ValueError("Cannot save prompt with partial variables.") # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs]class StringPromptTemplate(BasePromptTemplate, ABC): """String prompt should expose the format method, returning a prompt.""" [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" return StringPromptValue(text=self.format(**kwargs))
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/base.html
8bdd7a161c64-0
Source code for langchain.prompts.example_selector.semantic_similarity """Example selector that selects examples based on SemanticSimilarity.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.vectorstores.base import VectorStore def sorted_values(values: Dict[str, str]) -> List[Any]: """Return a list of values in dict sorted by key.""" return [values[val] for val in sorted(values)] [docs]class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel): """Example selector that selects examples based on SemanticSimilarity.""" vectorstore: VectorStore """VectorStore than contains information about examples.""" k: int = 4 """Number of examples to select.""" example_keys: Optional[List[str]] = None """Optional keys to filter examples to.""" input_keys: Optional[List[str]] = None """Optional keys to filter input to. If provided, the search is based on the input variables instead of all variables.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def add_example(self, example: Dict[str, str]) -> str: """Add new example to vectorstore.""" if self.input_keys: string_example = " ".join( sorted_values({key: example[key] for key in self.input_keys}) ) else: string_example = " ".join(sorted_values(example)) ids = self.vectorstore.add_texts([string_example], metadatas=[example]) return ids[0]
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
8bdd7a161c64-1
return ids[0] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.similarity_search(query, k=self.k) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, **vectorstore_cls_kwargs: Any, ) -> SemanticSimilarityExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables.
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
8bdd7a161c64-2
instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, input_keys=input_keys) [docs]class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector): """ExampleSelector that selects examples based on Max Marginal Relevance. This was shown to improve performance in this paper: https://arxiv.org/pdf/2211.13892.pdf """ fetch_k: int = 20 """Number of examples to fetch to rerank.""" [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.max_marginal_relevance_search( query, k=self.k, fetch_k=self.fetch_k ) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs]
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
8bdd7a161c64-3
examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, fetch_k: int = 20, **vectorstore_cls_kwargs: Any, ) -> MaxMarginalRelevanceExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs )
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
8bdd7a161c64-4
) return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/semantic_similarity.html
62125bbde01f-0
Source code for langchain.prompts.example_selector.ngram_overlap """Select and order examples based on ngram overlap score (sentence_bleu score). https://www.nltk.org/_modules/nltk/translate/bleu_score.html https://aclanthology.org/P02-1040.pdf """ from typing import Dict, List import numpy as np from pydantic import BaseModel, root_validator from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate def ngram_overlap_score(source: List[str], example: List[str]) -> float: """Compute ngram overlap score of source and example as sentence_bleu score. Use sentence_bleu with method1 smoothing function and auto reweighting. Return float value between 0.0 and 1.0 inclusive. https://www.nltk.org/_modules/nltk/translate/bleu_score.html https://aclanthology.org/P02-1040.pdf """ from nltk.translate.bleu_score import ( SmoothingFunction, # type: ignore sentence_bleu, ) hypotheses = source[0].split() references = [s.split() for s in example] return float( sentence_bleu( references, hypotheses, smoothing_function=SmoothingFunction().method1, auto_reweigh=True, ) ) [docs]class NGramOverlapExampleSelector(BaseExampleSelector, BaseModel): """Select and order examples based on ngram overlap score (sentence_bleu score). https://www.nltk.org/_modules/nltk/translate/bleu_score.html https://aclanthology.org/P02-1040.pdf """ examples: List[dict]
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html
62125bbde01f-1
""" examples: List[dict] """A list of the examples that the prompt template expects.""" example_prompt: PromptTemplate """Prompt template used to format the examples.""" threshold: float = -1.0 """Threshold at which algorithm stops. Set to -1.0 by default. For negative threshold: select_examples sorts examples by ngram_overlap_score, but excludes none. For threshold greater than 1.0: select_examples excludes all examples, and returns an empty list. For threshold equal to 0.0: select_examples sorts examples by ngram_overlap_score, and excludes examples with no ngram overlap with input. """ @root_validator(pre=True) def check_dependencies(cls, values: Dict) -> Dict: """Check that valid dependencies exist.""" try: from nltk.translate.bleu_score import ( # noqa: disable=F401 SmoothingFunction, sentence_bleu, ) except ImportError as e: raise ValueError( "Not all the correct dependencies for this ExampleSelect exist" ) from e return values [docs] def add_example(self, example: Dict[str, str]) -> None: """Add new example to list.""" self.examples.append(example) [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Return list of examples sorted by ngram_overlap_score with input. Descending order. Excludes any examples with ngram_overlap_score less than or equal to threshold. """ inputs = list(input_variables.values()) examples = [] k = len(self.examples) score = [0.0] * k
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html
62125bbde01f-2
k = len(self.examples) score = [0.0] * k first_prompt_template_key = self.example_prompt.input_variables[0] for i in range(k): score[i] = ngram_overlap_score( inputs, [self.examples[i][first_prompt_template_key]] ) while True: arg_max = np.argmax(score) if (score[arg_max] < self.threshold) or abs( score[arg_max] - self.threshold ) < 1e-9: break examples.append(self.examples[arg_max]) score[arg_max] = self.threshold - 1.0 return examples
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/ngram_overlap.html
c69aedf17c80-0
Source code for langchain.prompts.example_selector.length_based """Select examples based on length.""" import re from typing import Callable, Dict, List from pydantic import BaseModel, validator from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate def _get_length_based(text: str) -> int: return len(re.split("\n| ", text)) [docs]class LengthBasedExampleSelector(BaseExampleSelector, BaseModel): """Select examples based on length.""" examples: List[dict] """A list of the examples that the prompt template expects.""" example_prompt: PromptTemplate """Prompt template used to format the examples.""" get_text_length: Callable[[str], int] = _get_length_based """Function to measure prompt length. Defaults to word count.""" max_length: int = 2048 """Max length for the prompt, beyond which examples are cut.""" example_text_lengths: List[int] = [] #: :meta private: [docs] def add_example(self, example: Dict[str, str]) -> None: """Add new example to list.""" self.examples.append(example) string_example = self.example_prompt.format(**example) self.example_text_lengths.append(self.get_text_length(string_example)) @validator("example_text_lengths", always=True) def calculate_example_text_lengths(cls, v: List[int], values: Dict) -> List[int]: """Calculate text lengths if they don't exist.""" # Check if text lengths were passed in if v: return v # If they were not, calculate them example_prompt = values["example_prompt"] get_text_length = values["get_text_length"]
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html
c69aedf17c80-1
get_text_length = values["get_text_length"] string_examples = [example_prompt.format(**eg) for eg in values["examples"]] return [get_text_length(eg) for eg in string_examples] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on the input lengths.""" inputs = " ".join(input_variables.values()) remaining_length = self.max_length - self.get_text_length(inputs) i = 0 examples = [] while remaining_length > 0 and i < len(self.examples): new_length = remaining_length - self.example_text_lengths[i] if new_length < 0: break else: examples.append(self.examples[i]) remaining_length = new_length i += 1 return examples
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/example_selector/length_based.html
1ac7ca91b95f-0
Source code for langchain.llms.llamacpp """Wrapper around llama.cpp.""" import logging from typing import Any, Dict, Generator, List, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM logger = logging.getLogger(__name__) [docs]class LlamaCpp(LLM): """Wrapper around the llama.cpp model. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.llms import LlamaCppEmbeddings llm = LlamaCppEmbeddings(model_path="/path/to/llama/model") """ client: Any #: :meta private: model_path: str """The path to the Llama model file.""" lora_base: Optional[str] = None """The path to the Llama LoRA base model.""" lora_path: Optional[str] = None """The path to the Llama LoRA. If None, no LoRa is loaded.""" n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(True, alias="f16_kv")
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
1ac7ca91b95f-1
f16_kv: bool = Field(True, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" suffix: Optional[str] = Field(None) """A suffix to append to the generated text. If None, no suffix is appended.""" max_tokens: Optional[int] = 256 """The maximum number of tokens to generate.""" temperature: Optional[float] = 0.8 """The temperature to use for sampling.""" top_p: Optional[float] = 0.95 """The top-p value to use for sampling.""" logprobs: Optional[int] = Field(None) """The number of logprobs to return. If None, no logprobs are returned.""" echo: Optional[bool] = False """Whether to echo the prompt.""" stop: Optional[List[str]] = []
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
1ac7ca91b95f-2
"""Whether to echo the prompt.""" stop: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" repeat_penalty: Optional[float] = 1.1 """The penalty to apply to repeated tokens.""" top_k: Optional[int] = 40 """The top-k value to use for sampling.""" last_n_tokens_size: Optional[int] = 64 """The number of tokens to look back when applying the repeat_penalty.""" use_mmap: Optional[bool] = True """Whether to keep the model loaded in RAM""" streaming: bool = True """Whether to stream the results, token by token.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "lora_path", "lora_base", "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", "use_mmap", "last_n_tokens_size", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, **model_params) except ImportError: raise ModuleNotFoundError(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
1ac7ca91b95f-3
except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling llama_cpp.""" return { "suffix": self.suffix, "max_tokens": self.max_tokens, "temperature": self.temperature, "top_p": self.top_p, "logprobs": self.logprobs, "echo": self.echo, "stop_sequences": self.stop, # key here is convention among LLM classes "repeat_penalty": self.repeat_penalty, "top_k": self.top_k, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_path": self.model_path}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "llamacpp" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing parameters in format needed by llama_cpp. Args: stop (Optional[List[str]]): List of stop sequences for llama_cpp. Returns: Dictionary containing the combined parameters. """
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
1ac7ca91b95f-4
Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params if self.stop and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params # llama_cpp expects the "stop" key not this, so we remove it: params.pop("stop_sequences") # then sets it as configured, or default to an empty list: params["stop"] = self.stop or stop or [] return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the Llama model and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/local/llama/model.bin") llm("This is a prompt.") """ if self.streaming: # If streaming is enabled, we use the stream # method that yields as they are generated # and return the combined strings from the first choices's text: combined_text_output = "" for token in self.stream(prompt=prompt, stop=stop, run_manager=run_manager): combined_text_output += token["choices"][0]["text"] return combined_text_output else: params = self._get_parameters(stop)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
1ac7ca91b95f-5
return combined_text_output else: params = self._get_parameters(stop) params = {**params, **kwargs} result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] [docs] def stream( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> Generator[Dict, None, None]: """Yields results objects as they are generated in real time. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. It also calls the callback manager's on_llm_new_token event with similar parameters to the OpenAI LLM class method of the same name. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens being generated. Yields: A dictionary like objects containing a string token and metadata. See llama-cpp-python docs and below for more. Example: .. code-block:: python from langchain.llms import LlamaCpp llm = LlamaCpp( model_path="/path/to/local/model.bin", temperature = 0.5 ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): result = chunk["choices"][0] print(result["text"], end='', flush=True) """ params = self._get_parameters(stop)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
1ac7ca91b95f-6
""" params = self._get_parameters(stop) result = self.client(prompt=prompt, stream=True, **params) for chunk in result: token = chunk["choices"][0]["text"] log_probs = chunk["choices"][0].get("logprobs", None) if run_manager: run_manager.on_llm_new_token( token=token, verbose=self.verbose, log_probs=log_probs ) yield chunk [docs] def get_num_tokens(self, text: str) -> int: tokenized_text = self.client.tokenize(text.encode("utf-8")) return len(tokenized_text)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html
fd45908b0cc6-0
Source code for langchain.llms.aleph_alpha """Wrapper around Aleph Alpha APIs.""" from typing import Any, Dict, List, Optional, Sequence from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class AlephAlpha(LLM): """Wrapper around Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
fd45908b0cc6-1
"""Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
fd45908b0cc6-2
echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
fd45908b0cc6-3
"""Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: import aleph_alpha_client values["client"] = aleph_alpha_client.Client(token=aleph_alpha_api_key) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
fd45908b0cc6-4
"minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "aleph_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model.
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
fd45908b0cc6-5
Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = aleph_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/aleph_alpha.html
0fde7549855c-0
Source code for langchain.llms.baseten """Wrapper around Baseten deployed model API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Field from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM logger = logging.getLogger(__name__) [docs]class Baseten(LLM): """Use your Baseten models in Langchain To use, you should have the ``baseten`` python package installed, and run ``baseten.login()`` with your Baseten API key. The required ``model`` param can be either a model id or model version id. Using a model version ID will result in slightly faster invocation. Any other model parameters can also be passed in with the format input={model_param: value, ...} The Baseten model must accept a dictionary of input with the key "prompt" and return a dictionary with a key "data" which maps to a list of response strings. Example: .. code-block:: python from langchain.llms import Baseten my_model = Baseten(model="MODEL_ID") output = my_model("prompt") """ model: str input: Dict[str, Any] = Field(default_factory=dict) model_kwargs: Dict[str, Any] = Field(default_factory=dict) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of model.""" return "baseten"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/baseten.html
0fde7549855c-1
"""Return type of model.""" return "baseten" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to Baseten deployed model endpoint.""" try: import baseten except ImportError as exc: raise ValueError( "Could not import Baseten Python package. " "Please install it with `pip install baseten`." ) from exc # get the model and version try: model = baseten.deployed_model_version_id(self.model) response = model.predict({"prompt": prompt}) except baseten.common.core.ApiError: model = baseten.deployed_model_id(self.model) response = model.predict({"prompt": prompt}) return "".join(response)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/baseten.html
c62e9d8027c6-0
Source code for langchain.llms.google_palm """Wrapper arround Google's PaLM Text APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import BaseModel, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator() -> Callable[[Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" try: import google.api_core.exceptions except ImportError: raise ImportError( "Could not import google-api-core python package. " "Please install it with `pip install google-api-core`." ) multiplier = 2 min_seconds = 1 max_seconds = 60 max_retries = 10 return retry( reraise=True, stop=stop_after_attempt(max_retries), wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) ), before_sleep=before_sleep_log(logger, logging.WARNING), )
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
c62e9d8027c6-1
), before_sleep=before_sleep_log(logger, logging.WARNING), ) def generate_with_retry(llm: GooglePalm, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _generate_with_retry(**kwargs: Any) -> Any: return llm.client.generate_text(**kwargs) return _generate_with_retry(**kwargs) def _strip_erroneous_leading_spaces(text: str) -> str: """Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == " " for line in text.split("\n")[1:]) if has_leading_space: return text.replace("\n ", "\n") else: return text [docs]class GooglePalm(BaseLLM, BaseModel): client: Any #: :meta private: google_api_key: Optional[str] model_name: str = "models/text-bison-001" """Model name to use.""" temperature: float = 0.7 """Run inference with this temperature. Must by in the closed interval [0.0, 1.0].""" top_p: Optional[float] = None """Decode using nucleus sampling: consider the smallest set of tokens whose probability sum is at least top_p. Must be in the closed interval [0.0, 1.0].""" top_k: Optional[int] = None """Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
c62e9d8027c6-2
Must be positive.""" max_output_tokens: Optional[int] = None """Maximum number of tokens to include in a candidate. Must be greater than zero. If unset, will default to 64.""" n: int = 1 """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env( values, "google_api_key", "GOOGLE_API_KEY" ) try: import google.generativeai as genai genai.configure(api_key=google_api_key) except ImportError: raise ImportError( "Could not import google-generativeai python package. " "Please install it with `pip install google-generativeai`." ) values["client"] = genai if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: raise ValueError("top_p must be in the range [0.0, 1.0]") if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0: raise ValueError("max_output_tokens must be greater than zero") return values def _generate(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
c62e9d8027c6-3
return values def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: generations = [] for prompt in prompts: completion = generate_with_retry( self, model=self.model_name, prompt=prompt, stop_sequences=stop, temperature=self.temperature, top_p=self.top_p, top_k=self.top_k, max_output_tokens=self.max_output_tokens, candidate_count=self.n, **kwargs, ) prompt_generations = [] for candidate in completion.candidates: raw_text = candidate["output"] stripped_text = _strip_erroneous_leading_spaces(raw_text) prompt_generations.append(Generation(text=stripped_text)) generations.append(prompt_generations) return LLMResult(generations=generations) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: raise NotImplementedError() @property def _llm_type(self) -> str: """Return type of llm.""" return "google_palm"
https://api.python.langchain.com/en/latest/_modules/langchain/llms/google_palm.html
b4492914f85a-0
Source code for langchain.llms.stochasticai """Wrapper around StochasticAI APIs.""" import logging import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class StochasticAI(LLM): """Wrapper around StochasticAI large language models. To use, you should have the environment variable ``STOCHASTICAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") """ api_url: str = "" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" stochasticai_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.")
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
b4492914f85a-1
raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" stochasticai_api_key = get_from_dict_or_env( values, "stochasticai_api_key", "STOCHASTICAI_API_KEY" ) values["stochasticai_api_key"] = stochasticai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.api_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "stochasticai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
b4492914f85a-2
response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {} params = {**params, **kwargs} response_post = requests.post( url=self.api_url, json={"prompt": prompt, "params": params}, headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get( url=response_post_json["data"]["responseUrl"], headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_get.raise_for_status() response_get_json = response_get.json()["data"] text = response_get_json.get("completion") completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/stochasticai.html
56c5238ee178-0
Source code for langchain.llms.cohere """Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]: import cohere min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(cohere.error.CohereError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.generate(**kwargs) return _completion_with_retry(**kwargs) [docs]class Cohere(LLM): """Wrapper around Cohere large language models.
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
56c5238ee178-1
"""Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" max_retries: int = 10 """Maximum number of retries to make when generating.""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator()
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
56c5238ee178-2
extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Cohere's generate endpoint. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
56c5238ee178-3
"""Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop params = {**params, **kwargs} response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/cohere.html
253cf09f5076-0
Source code for langchain.llms.openlm from typing import Any, Dict from pydantic import root_validator from langchain.llms.openai import BaseOpenAI [docs]class OpenLM(BaseOpenAI): @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} @root_validator() def validate_environment(cls, values: Dict) -> Dict: try: import openlm values["client"] = openlm.Completion except ImportError: raise ValueError( "Could not import openlm python package. " "Please install it with `pip install openlm`." ) if values["streaming"]: raise ValueError("Streaming not supported with openlm") return values
https://api.python.langchain.com/en/latest/_modules/langchain/llms/openlm.html
a679f808e73d-0
Source code for langchain.llms.replicate """Wrapper around Replicate API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Replicate(LLM): """Wrapper around Replicate models. To use, you should have the ``replicate`` python package installed, and the environment variable ``REPLICATE_API_TOKEN`` set with your API token. You can find your token here: https://replicate.com/account The model param is required, but any other model parameters can also be passed in with the format input={model_param: value, ...} Example: .. code-block:: python from langchain.llms import Replicate replicate = Replicate(model="stability-ai/stable-diffusion: \ 27b93a2413e7f36cd83da926f365628\ 0b2931564ff050bf9575f1fdf9bcd7478", input={"image_dimensions": "512x512"}) """ model: str input: Dict[str, Any] = Field(default_factory=dict) model_kwargs: Dict[str, Any] = Field(default_factory=dict) replicate_api_token: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
a679f808e73d-1
"""Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env( values, "REPLICATE_API_TOKEN", "REPLICATE_API_TOKEN" ) values["replicate_api_token"] = replicate_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model": self.model, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of model.""" return "replicate" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to replicate endpoint.""" try: import replicate as replicate_python except ImportError:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
a679f808e73d-2
try: import replicate as replicate_python except ImportError: raise ImportError( "Could not import replicate python package. " "Please install it with `pip install replicate`." ) # get the model and version model_str, version_str = self.model.split(":") model = replicate_python.models.get(model_str) version = model.versions.get(version_str) # sort through the openapi schema to get the name of the first input input_properties = sorted( version.openapi_schema["components"]["schemas"]["Input"][ "properties" ].items(), key=lambda item: item[1].get("x-order", 0), ) first_input_name = input_properties[0][0] inputs = {first_input_name: prompt, **self.input} iterator = replicate_python.run(self.model, input={**inputs, **kwargs}) return "".join([output for output in iterator])
https://api.python.langchain.com/en/latest/_modules/langchain/llms/replicate.html
9624ca20bd3b-0
Source code for langchain.llms.predictionguard """Wrapper around Prediction Guard APIs.""" import logging from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class PredictionGuard(LLM): """Wrapper around Prediction Guard large language models. To use, you should have the ``predictionguard`` python package installed, and the environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass it as a named parameter to the constructor. To use Prediction Guard's API along with OpenAI models, set the environment variable ``OPENAI_API_KEY`` with your OpenAI API key as well. Example: .. code-block:: python pgllm = PredictionGuard(model="MPT-7B-Instruct", token="my-access-token", output={ "type": "boolean" }) """ client: Any #: :meta private: model: Optional[str] = "MPT-7B-Instruct" """Model name to use.""" output: Optional[Dict[str, Any]] = None """The output type or structure for controlling the LLM output.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" token: Optional[str] = None """Your Prediction Guard access token.""" stop: Optional[List[str]] = None
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
9624ca20bd3b-1
"""Your Prediction Guard access token.""" stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the access token and python package exists in environment.""" token = get_from_dict_or_env(values, "token", "PREDICTIONGUARD_TOKEN") try: import predictionguard as pg values["client"] = pg.Client(token=token) except ImportError: raise ImportError( "Could not import predictionguard python package. " "Please install it with `pip install predictionguard`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Prediction Guard API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "predictionguard" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Prediction Guard's model API. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model.
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
9624ca20bd3b-2
Returns: The string generated by the model. Example: .. code-block:: python response = pgllm("Tell me a joke.") """ import predictionguard as pg params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = pg.Completion.create( model=self.model, prompt=prompt, output=self.output, temperature=params["temperature"], max_tokens=params["max_tokens"], **kwargs, ) text = response["choices"][0]["text"] # If stop tokens are provided, Prediction Guard's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/predictionguard.html
349ef8efbd58-0
Source code for langchain.llms.manifest """Wrapper around HazyResearch's Manifest library.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM [docs]class ManifestWrapper(LLM): """Wrapper around HazyResearch's Manifest library.""" client: Any #: :meta private: llm_kwargs: Optional[Dict] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: from manifest import Manifest if not isinstance(values["client"], Manifest): raise ValueError except ImportError: raise ValueError( "Could not import manifest python package. " "Please install it with `pip install manifest-ml`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: kwargs = self.llm_kwargs or {} return {**self.client.client.get_model_params(), **kwargs} @property def _llm_type(self) -> str: """Return type of llm.""" return "manifest" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to LLM through Manifest.""" if stop is not None and len(stop) != 1: raise NotImplementedError(
https://api.python.langchain.com/en/latest/_modules/langchain/llms/manifest.html
349ef8efbd58-1
if stop is not None and len(stop) != 1: raise NotImplementedError( f"Manifest currently only supports a single stop token, got {stop}" ) params = self.llm_kwargs or {} params = {**params, **kwargs} if stop is not None: params["stop_token"] = stop return self.client.run(prompt, **params)
https://api.python.langchain.com/en/latest/_modules/langchain/llms/manifest.html
59ebb5f794d9-0
Source code for langchain.llms.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from abc import abstractmethod from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar, Union from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens INPUT_TYPE = TypeVar("INPUT_TYPE", bound=Union[str, List[str]]) OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]]]) class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]): """A handler class to transform input from LLM to a format that SageMaker endpoint expects. Similarily, the class also handles transforming output from the SageMaker endpoint to a format that LLM class expects. """ """ Example: .. code-block:: python class ContentHandler(ContentHandlerBase): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ content_type: Optional[str] = "text/plain" """The MIME type of the input data passed to endpoint""" accepts: Optional[str] = "text/plain" """The MIME type of the response data returned from endpoint""" @abstractmethod
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
59ebb5f794d9-1
"""The MIME type of the response data returned from endpoint""" @abstractmethod def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) -> bytes: """Transforms the input to a format that model can accept as the request Body. Should return bytes or seekable file like object in the format specified in the content_type request header. """ @abstractmethod def transform_output(self, output: bytes) -> OUTPUT_TYPE: """Transforms the output from the model to string that the LLM class expects. """ class LLMContentHandler(ContentHandlerBase[str, str]): """Content handler for LLM class.""" [docs]class SagemakerEndpoint(LLM): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain import SagemakerEndpoint endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" )
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
59ebb5f794d9-2
) credentials_profile_name = ( "default" ) se = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: LLMContentHandler """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.llms.sagemaker_endpoint import LLMContentHandler class ContentHandler(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
59ebb5f794d9-3
def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
59ebb5f794d9-4
@property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_name": self.endpoint_name}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "sagemaker_endpoint" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Sagemaker inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} _model_kwargs = {**_model_kwargs, **kwargs} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(prompt, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") text = self.content_handler.transform_output(response["Body"])
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
59ebb5f794d9-5
text = self.content_handler.transform_output(response["Body"]) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to the sagemaker endpoint. text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/sagemaker_endpoint.html
463436fc7ab0-0
Source code for langchain.llms.gooseai """Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
463436fc7ab0-1
presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
463436fc7ab0-2
) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
463436fc7ab0-3
if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop params = {**params, **kwargs} response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/gooseai.html
5547c4f4eda3-0
Source code for langchain.llms.textgen """Wrapper around text-generation-webui.""" import logging from typing import Any, Dict, List, Optional import requests from pydantic import Field from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM logger = logging.getLogger(__name__) [docs]class TextGen(LLM): """Wrapper around the text-generation-webui model. To use, you should have the text-generation-webui installed, a model loaded, and --api added as a command-line option. Suggested installation, use one-click installer for your OS: https://github.com/oobabooga/text-generation-webui#one-click-installers Paremeters below taken from text-generation-webui api example: https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py Example: .. code-block:: python from langchain.llms import TextGen llm = TextGen(model_url="http://localhost:8500") """ model_url: str """The full URL to the textgen webui including http[s]://host:port """ max_new_tokens: Optional[int] = 250 """The maximum number of tokens to generate.""" do_sample: bool = Field(True, alias="do_sample") """Do sample""" temperature: Optional[float] = 1.3 """Primary factor to control randomness of outputs. 0 = deterministic (only the most likely token is used). Higher value = more randomness.""" top_p: Optional[float] = 0.1 """If not set to 1, select tokens with probabilities adding up to less than this number. Higher value = higher range of possible random results."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
5547c4f4eda3-1
number. Higher value = higher range of possible random results.""" typical_p: Optional[float] = 1 """If not set to 1, select only tokens that are at least this much more likely to appear than random tokens, given the prior text.""" epsilon_cutoff: Optional[float] = 0 # In units of 1e-4 """Epsilon cutoff""" eta_cutoff: Optional[float] = 0 # In units of 1e-4 """ETA cutoff""" repetition_penalty: Optional[float] = 1.18 """Exponential penalty factor for repeating prior tokens. 1 means no penalty, higher value = less repetition, lower value = more repetition.""" top_k: Optional[float] = 40 """Similar to top_p, but select instead only the top_k most likely tokens. Higher value = higher range of possible random results.""" min_length: Optional[int] = 0 """Minimum generation length in tokens.""" no_repeat_ngram_size: Optional[int] = 0 """If not set to 0, specifies the length of token sets that are completely blocked from repeating at all. Higher values = blocks larger phrases, lower values = blocks words or letters from repeating. Only 0 or high values are a good idea in most cases.""" num_beams: Optional[int] = 1 """Number of beams""" penalty_alpha: Optional[float] = 0 """Penalty Alpha""" length_penalty: Optional[float] = 1 """Length Penalty""" early_stopping: bool = Field(False, alias="early_stopping") """Early stopping""" seed: int = Field(-1, alias="seed") """Seed (-1 for random)"""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
5547c4f4eda3-2
"""Seed (-1 for random)""" add_bos_token: bool = Field(True, alias="add_bos_token") """Add the bos_token to the beginning of prompts. Disabling this can make the replies more creative.""" truncation_length: Optional[int] = 2048 """Truncate the prompt up to this length. The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.""" ban_eos_token: bool = Field(False, alias="ban_eos_token") """Ban the eos_token. Forces the model to never end the generation prematurely.""" skip_special_tokens: bool = Field(True, alias="skip_special_tokens") """Skip special tokens. Some specific models need this unset.""" stopping_strings: Optional[List[str]] = [] """A list of strings to stop generation when encountered.""" streaming: bool = False """Whether to stream the results, token by token (currently unimplemented).""" @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling textgen.""" return { "max_new_tokens": self.max_new_tokens, "do_sample": self.do_sample, "temperature": self.temperature, "top_p": self.top_p, "typical_p": self.typical_p, "epsilon_cutoff": self.epsilon_cutoff, "eta_cutoff": self.eta_cutoff, "repetition_penalty": self.repetition_penalty, "top_k": self.top_k, "min_length": self.min_length, "no_repeat_ngram_size": self.no_repeat_ngram_size, "num_beams": self.num_beams,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
5547c4f4eda3-3
"num_beams": self.num_beams, "penalty_alpha": self.penalty_alpha, "length_penalty": self.length_penalty, "early_stopping": self.early_stopping, "seed": self.seed, "add_bos_token": self.add_bos_token, "truncation_length": self.truncation_length, "ban_eos_token": self.ban_eos_token, "skip_special_tokens": self.skip_special_tokens, "stopping_strings": self.stopping_strings, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model_url": self.model_url}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "textgen" def _get_parameters(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """ Performs sanity check, preparing paramaters in format needed by textgen. Args: stop (Optional[List[str]]): List of stop sequences for textgen. Returns: Dictionary containing the combined parameters. """ # Raise error if stop sequences are in both input and default params # if self.stop and stop is not None: if self.stopping_strings and stop is not None: raise ValueError("`stop` found in both the input and default params.") params = self._default_params # then sets it as configured, or default to an empty list: params["stop"] = self.stopping_strings or stop or [] return params def _call( self, prompt: str,
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
5547c4f4eda3-4
return params def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call the textgen web API and return the output. Args: prompt: The prompt to use for generation. stop: A list of strings to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python from langchain.llms import TextGen llm = TextGen(model_url="http://localhost:5000") llm("Write a story about llamas.") """ if self.streaming: raise ValueError("`streaming` option currently unsupported.") url = f"{self.model_url}/api/v1/generate" params = self._get_parameters(stop) request = params.copy() request["prompt"] = prompt response = requests.post(url, json=request) if response.status_code == 200: result = response.json()["results"][0]["text"] print(prompt + result) else: print(f"ERROR: response: {response}") result = "" return result
https://api.python.langchain.com/en/latest/_modules/langchain/llms/textgen.html
576ad606a94a-0
Source code for langchain.llms.nlpcloud """Wrapper around NLPCloud APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class NLPCloud(LLM): """Wrapper around NLPCloud large language models. To use, you should have the ``nlpcloud`` python package installed, and the environment variable ``NLPCLOUD_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import NLPCloud nlpcloud = NLPCloud(model="gpt-neox-20b") """ client: Any #: :meta private: model_name: str = "finetuned-gpt-neox-20b" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" min_length: int = 1 """The minimum number of tokens to generate in the completion.""" max_length: int = 256 """The maximum number of tokens to generate in the completion.""" length_no_input: bool = True """Whether min_length and max_length should include the length of the input.""" remove_input: bool = True """Remove input text from API response""" remove_end_sequence: bool = True """Whether or not to remove the end sequence token.""" bad_words: List[str] = [] """List of tokens not allowed to be generated.""" top_p: int = 1 """Total probability mass of tokens to consider at each step."""
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
576ad606a94a-1
"""Total probability mass of tokens to consider at each step.""" top_k: int = 50 """The number of highest probability tokens to keep for top-k filtering.""" repetition_penalty: float = 1.0 """Penalizes repeated tokens. 1.0 means no penalty.""" length_penalty: float = 1.0 """Exponential penalty to the length.""" do_sample: bool = True """Whether to use sampling (True) or greedy decoding.""" num_beams: int = 1 """Number of beams for beam search.""" early_stopping: bool = False """Whether to stop beam search at num_beams sentences.""" num_return_sequences: int = 1 """How many completions to generate for each prompt.""" nlpcloud_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( values, "nlpcloud_api_key", "NLPCLOUD_API_KEY" ) try: import nlpcloud values["client"] = nlpcloud.Client( values["model_name"], nlpcloud_api_key, gpu=True, lang="en" ) except ImportError: raise ImportError( "Could not import nlpcloud python package. " "Please install it with `pip install nlpcloud`." ) return values @property def _default_params(self) -> Mapping[str, Any]:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
576ad606a94a-2
@property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling NLPCloud API.""" return { "temperature": self.temperature, "min_length": self.min_length, "max_length": self.max_length, "length_no_input": self.length_no_input, "remove_input": self.remove_input, "remove_end_sequence": self.remove_end_sequence, "bad_words": self.bad_words, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, "length_penalty": self.length_penalty, "do_sample": self.do_sample, "num_beams": self.num_beams, "early_stopping": self.early_stopping, "num_return_sequences": self.num_return_sequences, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "nlpcloud" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to NLPCloud's create endpoint. Args: prompt: The prompt to pass into the model. stop: Not supported by this interface (pass in init method) Returns: The string generated by the model. Example:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
576ad606a94a-3
Returns: The string generated by the model. Example: .. code-block:: python response = nlpcloud("Tell me a joke.") """ if stop and len(stop) > 1: raise ValueError( "NLPCloud only supports a single stop sequence per generation." "Pass in a list of length 1." ) elif stop and len(stop) == 1: end_sequence = stop[0] else: end_sequence = None params = {**self._default_params, **kwargs} response = self.client.generation(prompt, end_sequence=end_sequence, **params) return response["generated_text"]
https://api.python.langchain.com/en/latest/_modules/langchain/llms/nlpcloud.html
85f469c96910-0
Source code for langchain.llms.bananadev """Wrapper around Banana API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Banana(LLM): """Wrapper around Banana large language models. To use, you should have the ``banana-dev`` python package installed, and the environment variable ``BANANA_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import Banana banana = Banana(model_key="") """ model_key: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" banana_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names:
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
85f469c96910-1
if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" banana_api_key = get_from_dict_or_env( values, "banana_api_key", "BANANA_API_KEY" ) values["banana_api_key"] = banana_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_key": self.model_key}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "bananadev" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call to Banana endpoint.""" try: import banana_dev as banana except ImportError: raise ImportError( "Could not import banana-dev python package. " "Please install it with `pip install banana-dev`." ) params = self.model_kwargs or {}
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html
85f469c96910-2
) params = self.model_kwargs or {} params = {**params, **kwargs} api_key = self.banana_api_key model_key = self.model_key model_inputs = { # a json specific to your model. "prompt": prompt, **params, } response = banana.run(api_key, model_key, model_inputs) try: text = response["modelOutputs"][0]["output"] except (KeyError, TypeError): returned = response["modelOutputs"][0] raise ValueError( "Response should be of schema: {'output': 'text'}." f"\nResponse was: {returned}" "\nTo fix this:" "\n- fork the source repo of the Banana model" "\n- modify app.py to return the above schema" "\n- deploy that as a custom repo" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text
https://api.python.langchain.com/en/latest/_modules/langchain/llms/bananadev.html