id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 59
127
|
---|---|---|
f754198f078c-5
|
ai_prefix: str = "AI"
llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
entity_cache: List[str] = []
k: int = 3
chat_history_key: str = "history"
entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
@property
def buffer(self) -> List[BaseMessage]:
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return ["entities", self.chat_history_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=inputs[prompt_input_key],
)
if output.strip() == "NONE":
entities = []
else:
entities = [w.strip() for w in output.split(",")]
entity_summaries = {}
for entity in entities:
entity_summaries[entity] = self.entity_store.get(entity, "")
self.entity_cache = entities
if self.return_messages:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/entity.html
|
f754198f078c-6
|
self.entity_cache = entities
if self.return_messages:
buffer: Any = self.buffer[-self.k * 2 :]
else:
buffer = buffer_string
return {
self.chat_history_key: buffer,
"entities": entity_summaries,
}
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
buffer_string = get_buffer_string(
self.buffer[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
input_data = inputs[prompt_input_key]
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
for entity in self.entity_cache:
existing_summary = self.entity_store.get(entity, "")
output = chain.predict(
summary=existing_summary,
entity=entity,
history=buffer_string,
input=input_data,
)
self.entity_store.set(entity, output.strip())
[docs] def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/entity.html
|
3eae703cd074-0
|
Source code for langchain.memory.buffer
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import get_buffer_string
[docs]class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
@property
def buffer(self) -> Any:
"""String buffer of memory."""
if self.return_messages:
return self.chat_memory.messages
else:
return get_buffer_string(
self.chat_memory.messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
[docs]class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: Optional[str] = None
input_key: Optional[str] = None
memory_key: str = "history" #: :meta private:
@root_validator()
def validate_chains(cls, values: Dict) -> Dict:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/buffer.html
|
3eae703cd074-1
|
def validate_chains(cls, values: Dict) -> Dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
raise ValueError(
"return_messages must be False for ConversationStringBufferMemory"
)
return values
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += "\n" + "\n".join([human, ai])
[docs] def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/buffer.html
|
da535f71bcbf-0
|
Source code for langchain.memory.readonly
from typing import Any, Dict, List
from langchain.schema import BaseMemory
[docs]class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
[docs] def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/readonly.html
|
7bade8772e28-0
|
Source code for langchain.memory.summary
from __future__ import annotations
from typing import Any, Dict, List, Type
from pydantic import BaseModel, root_validator
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
SystemMessage,
get_buffer_string,
)
class SummarizerMixin(BaseModel):
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: Type[BaseMessage] = SystemMessage
def predict_new_summary(
self, messages: List[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return chain.predict(summary=existing_summary, new_lines=new_lines)
[docs]class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Conversation summarizer to memory."""
buffer: str = ""
memory_key: str = "history" #: :meta private:
[docs] @classmethod
def from_messages(
cls,
llm: BaseLanguageModel,
chat_memory: BaseChatMessageHistory,
*,
summarize_step: int = 2,
**kwargs: Any,
) -> ConversationSummaryMemory:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/summary.html
|
7bade8772e28-1
|
**kwargs: Any,
) -> ConversationSummaryMemory:
obj = cls(llm=llm, chat_memory=chat_memory, **kwargs)
for i in range(0, len(obj.chat_memory.messages), summarize_step):
obj.buffer = obj.predict_new_summary(
obj.chat_memory.messages[i : i + summarize_step], obj.buffer
)
return obj
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.buffer = self.predict_new_summary(
self.chat_memory.messages[-2:], self.buffer
)
[docs] def clear(self) -> None:
"""Clear memory contents."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/summary.html
|
7bade8772e28-2
|
[docs] def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/summary.html
|
9c91c449cbfa-0
|
Source code for langchain.memory.vectorstore
"""Class for a VectorStore-backed memory object."""
from typing import Any, Dict, List, Optional, Union
from pydantic import Field
from langchain.memory.chat_memory import BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import Document
from langchain.vectorstores.base import VectorStoreRetriever
[docs]class VectorStoreRetrieverMemory(BaseMemory):
"""Class for a VectorStore-backed memory object."""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
memory_key: str = "history" #: :meta private:
"""Key name to locate the memories in the result of load_memory_variables."""
input_key: Optional[str] = None
"""Key name to index the inputs to load_memory_variables."""
return_docs: bool = False
"""Whether or not to return the result of querying the database directly."""
@property
def memory_variables(self) -> List[str]:
"""The list of keys emitted from the load_memory_variables method."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
[docs] def load_memory_variables(
self, inputs: Dict[str, Any]
) -> Dict[str, Union[List[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/vectorstore.html
|
9c91c449cbfa-1
|
docs = self.retriever.get_relevant_documents(query)
result: Union[List[Document], str]
if not self.return_docs:
result = "\n".join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
def _form_documents(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> List[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
filtered_inputs = {k: v for k, v in inputs.items() if k != self.memory_key}
texts = [
f"{k}: {v}"
for k, v in list(filtered_inputs.items()) + list(outputs.items())
]
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents)
[docs] def clear(self) -> None:
"""Nothing to clear."""
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/vectorstore.html
|
41f620be7ba0-0
|
Source code for langchain.memory.simple
from typing import Any, Dict, List
from langchain.schema import BaseMemory
[docs]class SimpleMemory(BaseMemory):
"""Simple memory for storing context or other bits of information that shouldn't
ever change between prompts.
"""
memories: Dict[str, Any] = dict()
@property
def memory_variables(self) -> List[str]:
return list(self.memories.keys())
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
return self.memories
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed, my memory is set in stone."""
pass
[docs] def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/simple.html
|
44c59d235693-0
|
Source code for langchain.memory.buffer_window
from typing import Any, Dict, List
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseMessage, get_buffer_string
[docs]class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
buffer: Any = self.buffer[-self.k * 2 :] if self.k > 0 else []
if not self.return_messages:
buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: buffer}
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/buffer_window.html
|
9b2f4aad14b2-0
|
Source code for langchain.memory.combined
import warnings
from typing import Any, Dict, List, Set
from pydantic import validator
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseMemory
[docs]class CombinedMemory(BaseMemory):
"""Class for combining multiple memories' data together."""
memories: List[BaseMemory]
"""For tracking all the memories that should be accessed."""
@validator("memories")
def check_repeated_memory_variable(
cls, value: List[BaseMemory]
) -> List[BaseMemory]:
all_variables: Set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
raise ValueError(
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
all_variables |= set(val.memory_variables)
return value
@validator("memories")
def check_input_key(cls, value: List[BaseMemory]) -> List[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory):
if val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}"
)
return value
@property
def memory_variables(self) -> List[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/combined.html
|
9b2f4aad14b2-1
|
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
[docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: Dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
memory_data = {
**memory_data,
**data,
}
return memory_data
[docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
[docs] def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/combined.html
|
20edaa51834b-0
|
Source code for langchain.memory.chat_message_histories.cassandra
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_KEYSPACE_NAME = "chat_history"
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_USERNAME = "cassandra"
DEFAULT_PASSWORD = "cassandra"
DEFAULT_PORT = 9042
[docs]class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in Cassandra.
Args:
contact_points: list of ips to connect to Cassandra cluster
session_id: arbitrary key that is used to store the messages
of a single chat session.
port: port to connect to Cassandra cluster
username: username to connect to Cassandra cluster
password: password to connect to Cassandra cluster
keyspace_name: name of the keyspace to use
table_name: name of the table to use
"""
def __init__(
self,
contact_points: List[str],
session_id: str,
port: int = DEFAULT_PORT,
username: str = DEFAULT_USERNAME,
password: str = DEFAULT_PASSWORD,
keyspace_name: str = DEFAULT_KEYSPACE_NAME,
table_name: str = DEFAULT_TABLE_NAME,
):
self.contact_points = contact_points
self.session_id = session_id
self.port = port
self.username = username
self.password = password
self.keyspace_name = keyspace_name
self.table_name = table_name
try:
from cassandra import (
AuthenticationFailed,
OperationTimedOut,
UnresolvableContactPoints,
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/cassandra.html
|
20edaa51834b-1
|
OperationTimedOut,
UnresolvableContactPoints,
)
from cassandra.cluster import Cluster, PlainTextAuthProvider
except ImportError:
raise ValueError(
"Could not import cassandra-driver python package. "
"Please install it with `pip install cassandra-driver`."
)
self.cluster: Cluster = Cluster(
contact_points,
port=port,
auth_provider=PlainTextAuthProvider(
username=self.username, password=self.password
),
)
try:
self.session = self.cluster.connect()
except (
AuthenticationFailed,
UnresolvableContactPoints,
OperationTimedOut,
) as error:
logger.error(
"Unable to establish connection with \
cassandra chat message history database"
)
raise error
self._prepare_cassandra()
def _prepare_cassandra(self) -> None:
"""Create the keyspace and table if they don't exist yet"""
from cassandra import OperationTimedOut, Unavailable
try:
self.session.execute(
f"""CREATE KEYSPACE IF NOT EXISTS
{self.keyspace_name} WITH REPLICATION =
{{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};"""
)
except (OperationTimedOut, Unavailable) as error:
logger.error(
f"Unable to create cassandra \
chat message history keyspace: {self.keyspace_name}."
)
raise error
self.session.set_keyspace(self.keyspace_name)
try:
self.session.execute(
f"""CREATE TABLE IF NOT EXISTS
{self.table_name} (id UUID, session_id varchar,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/cassandra.html
|
20edaa51834b-2
|
{self.table_name} (id UUID, session_id varchar,
history text, PRIMARY KEY ((session_id), id) );"""
)
except (OperationTimedOut, Unavailable) as error:
logger.error(
f"Unable to create cassandra \
chat message history table: {self.table_name}"
)
raise error
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Cassandra"""
from cassandra import ReadFailure, ReadTimeout, Unavailable
try:
rows = self.session.execute(
f"""SELECT * FROM {self.table_name}
WHERE session_id = '{self.session_id}' ;"""
)
except (Unavailable, ReadTimeout, ReadFailure) as error:
logger.error("Unable to Retreive chat history messages from cassadra")
raise error
if rows:
items = [json.loads(row.history) for row in rows]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Cassandra"""
import uuid
from cassandra import Unavailable, WriteFailure, WriteTimeout
try:
self.session.execute(
"""INSERT INTO message_store
(id, session_id, history) VALUES (%s, %s, %s);""",
(uuid.uuid4(), self.session_id, json.dumps(_message_to_dict(message))),
)
except (Unavailable, WriteTimeout, WriteFailure) as error:
logger.error("Unable to write chat history messages to cassandra")
raise error
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/cassandra.html
|
20edaa51834b-3
|
logger.error("Unable to write chat history messages to cassandra")
raise error
[docs] def clear(self) -> None:
"""Clear session memory from Cassandra"""
from cassandra import OperationTimedOut, Unavailable
try:
self.session.execute(
f"DELETE FROM {self.table_name} WHERE session_id = '{self.session_id}';"
)
except (Unavailable, OperationTimedOut) as error:
logger.error("Unable to clear chat history messages from cassandra")
raise error
def __del__(self) -> None:
if self.session:
self.session.shutdown()
if self.cluster:
self.cluster.shutdown()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/cassandra.html
|
6749248172d0-0
|
Source code for langchain.memory.chat_message_histories.cosmos_db
"""Azure CosmosDB Memory History."""
from __future__ import annotations
import logging
from types import TracebackType
from typing import TYPE_CHECKING, Any, List, Optional, Type
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from azure.cosmos import ContainerProxy
[docs]class CosmosDBChatMessageHistory(BaseChatMessageHistory):
"""Chat history backed by Azure CosmosDB."""
def __init__(
self,
cosmos_endpoint: str,
cosmos_database: str,
cosmos_container: str,
session_id: str,
user_id: str,
credential: Any = None,
connection_string: Optional[str] = None,
ttl: Optional[int] = None,
cosmos_client_kwargs: Optional[dict] = None,
):
"""
Initializes a new instance of the CosmosDBChatMessageHistory class.
Make sure to call prepare_cosmos or use the context manager to make
sure your database is ready.
Either a credential or a connection string must be provided.
:param cosmos_endpoint: The connection endpoint for the Azure Cosmos DB account.
:param cosmos_database: The name of the database to use.
:param cosmos_container: The name of the container to use.
:param session_id: The session ID to use, can be overwritten while loading.
:param user_id: The user ID to use, can be overwritten while loading.
:param credential: The credential to use to authenticate to Azure Cosmos DB.
:param connection_string: The connection string to use to authenticate.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/cosmos_db.html
|
6749248172d0-1
|
:param connection_string: The connection string to use to authenticate.
:param ttl: The time to live (in seconds) to use for documents in the container.
:param cosmos_client_kwargs: Additional kwargs to pass to the CosmosClient.
"""
self.cosmos_endpoint = cosmos_endpoint
self.cosmos_database = cosmos_database
self.cosmos_container = cosmos_container
self.credential = credential
self.conn_string = connection_string
self.session_id = session_id
self.user_id = user_id
self.ttl = ttl
self.messages: List[BaseMessage] = []
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosClient,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
if self.credential:
self._client = CosmosClient(
url=self.cosmos_endpoint,
credential=self.credential,
**cosmos_client_kwargs or {},
)
elif self.conn_string:
self._client = CosmosClient.from_connection_string(
conn_str=self.conn_string,
**cosmos_client_kwargs or {},
)
else:
raise ValueError("Either a connection string or a credential must be set.")
self._container: Optional[ContainerProxy] = None
[docs] def prepare_cosmos(self) -> None:
"""Prepare the CosmosDB client.
Use this function or the context manager to make sure your database is ready.
"""
try:
from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/cosmos_db.html
|
6749248172d0-2
|
PartitionKey,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
database = self._client.create_database_if_not_exists(self.cosmos_database)
self._container = database.create_container_if_not_exists(
self.cosmos_container,
partition_key=PartitionKey("/user_id"),
default_ttl=self.ttl,
)
self.load_messages()
def __enter__(self) -> "CosmosDBChatMessageHistory":
"""Context manager entry point."""
self._client.__enter__()
self.prepare_cosmos()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Context manager exit"""
self.upsert_messages()
self._client.__exit__(exc_type, exc_val, traceback)
[docs] def load_messages(self) -> None:
"""Retrieve the messages from Cosmos"""
if not self._container:
raise ValueError("Container not initialized")
try:
from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501
CosmosHttpResponseError,
)
except ImportError as exc:
raise ImportError(
"You must install the azure-cosmos package to use the CosmosDBChatMessageHistory." # noqa: E501
) from exc
try:
item = self._container.read_item(
item=self.session_id, partition_key=self.user_id
)
except CosmosHttpResponseError:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/cosmos_db.html
|
6749248172d0-3
|
)
except CosmosHttpResponseError:
logger.info("no session found")
return
if "messages" in item and len(item["messages"]) > 0:
self.messages = messages_from_dict(item["messages"])
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
self.upsert_messages()
[docs] def upsert_messages(self) -> None:
"""Update the cosmosdb item."""
if not self._container:
raise ValueError("Container not initialized")
self._container.upsert_item(
body={
"id": self.session_id,
"user_id": self.user_id,
"messages": messages_to_dict(self.messages),
}
)
[docs] def clear(self) -> None:
"""Clear session memory from this memory and cosmos."""
self.messages = []
if self._container:
self._container.delete_item(
item=self.session_id, partition_key=self.user_id
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/cosmos_db.html
|
854ce6e834b8-0
|
Source code for langchain.memory.chat_message_histories.in_memory
from typing import List
from pydantic import BaseModel
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
)
[docs]class ChatMessageHistory(BaseChatMessageHistory, BaseModel):
messages: List[BaseMessage] = []
[docs] def add_message(self, message: BaseMessage) -> None:
"""Add a self-created message to the store"""
self.messages.append(message)
[docs] def clear(self) -> None:
self.messages = []
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/in_memory.html
|
2aa1cbe92cf1-0
|
Source code for langchain.memory.chat_message_histories.file
import json
import logging
from pathlib import Path
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
[docs]class FileChatMessageHistory(BaseChatMessageHistory):
"""
Chat message history that stores history in a local file.
Args:
file_path: path of the local file to store the messages.
"""
def __init__(self, file_path: str):
self.file_path = Path(file_path)
if not self.file_path.exists():
self.file_path.touch()
self.file_path.write_text(json.dumps([]))
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from the local file"""
items = json.loads(self.file_path.read_text())
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in the local file"""
messages = messages_to_dict(self.messages)
messages.append(messages_to_dict([message])[0])
self.file_path.write_text(json.dumps(messages))
[docs] def clear(self) -> None:
"""Clear session memory from the local file"""
self.file_path.write_text(json.dumps([]))
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/file.html
|
74dcd0b60b09-0
|
Source code for langchain.memory.chat_message_histories.redis
import json
import logging
from typing import List, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
[docs]class RedisChatMessageHistory(BaseChatMessageHistory):
def __init__(
self,
session_id: str,
url: str = "redis://localhost:6379/0",
key_prefix: str = "message_store:",
ttl: Optional[int] = None,
):
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
self.redis_client = redis.Redis.from_url(url=url)
except redis.exceptions.ConnectionError as error:
logger.error(error)
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
@property
def key(self) -> str:
"""Construct the record key to use"""
return self.key_prefix + self.session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m.decode("utf-8")) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in Redis"""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/redis.html
|
74dcd0b60b09-1
|
"""Append the message to the record in Redis"""
self.redis_client.lpush(self.key, json.dumps(_message_to_dict(message)))
if self.ttl:
self.redis_client.expire(self.key, self.ttl)
[docs] def clear(self) -> None:
"""Clear session memory from Redis"""
self.redis_client.delete(self.key)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/redis.html
|
865da7177912-0
|
Source code for langchain.memory.chat_message_histories.postgres
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history"
[docs]class PostgresChatMessageHistory(BaseChatMessageHistory):
def __init__(
self,
session_id: str,
connection_string: str = DEFAULT_CONNECTION_STRING,
table_name: str = "message_store",
):
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} (
id SERIAL PRIMARY KEY,
session_id TEXT NOT NULL,
message JSONB NOT NULL
);"""
self.cursor.execute(create_table_query)
self.connection.commit()
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from PostgreSQL"""
query = f"SELECT message FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
items = [record["message"] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/postgres.html
|
865da7177912-1
|
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in PostgreSQL"""
from psycopg import sql
query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format(
sql.Identifier(self.table_name)
)
self.cursor.execute(
query, (self.session_id, json.dumps(_message_to_dict(message)))
)
self.connection.commit()
[docs] def clear(self) -> None:
"""Clear session memory from PostgreSQL"""
query = f"DELETE FROM {self.table_name} WHERE session_id = %s;"
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
def __del__(self) -> None:
if self.cursor:
self.cursor.close()
if self.connection:
self.connection.close()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/postgres.html
|
c80fabc7b629-0
|
Source code for langchain.memory.chat_message_histories.dynamodb
import logging
from typing import List, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
messages_to_dict,
)
logger = logging.getLogger(__name__)
[docs]class DynamoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in AWS DynamoDB.
This class expects that a DynamoDB table with name `table_name`
and a partition Key of `SessionId` is present.
Args:
table_name: name of the DynamoDB table
session_id: arbitrary key that is used to store the messages
of a single chat session.
endpoint_url: URL of the AWS endpoint to connect to. This argument
is optional and useful for test purposes, like using Localstack.
If you plan to use AWS cloud service, you normally don't have to
worry about setting the endpoint_url.
"""
def __init__(
self, table_name: str, session_id: str, endpoint_url: Optional[str] = None
):
import boto3
if endpoint_url:
client = boto3.resource("dynamodb", endpoint_url=endpoint_url)
else:
client = boto3.resource("dynamodb")
self.table = client.Table(table_name)
self.session_id = session_id
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from DynamoDB"""
from botocore.exceptions import ClientError
try:
response = self.table.get_item(Key={"SessionId": self.session_id})
except ClientError as error:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/dynamodb.html
|
c80fabc7b629-1
|
except ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
logger.warning("No record found with session id: %s", self.session_id)
else:
logger.error(error)
if response and "Item" in response:
items = response["Item"]["History"]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in DynamoDB"""
from botocore.exceptions import ClientError
messages = messages_to_dict(self.messages)
_message = _message_to_dict(message)
messages.append(_message)
try:
self.table.put_item(
Item={"SessionId": self.session_id, "History": messages}
)
except ClientError as err:
logger.error(err)
[docs] def clear(self) -> None:
"""Clear session memory from DynamoDB"""
from botocore.exceptions import ClientError
try:
self.table.delete_item(Key={"SessionId": self.session_id})
except ClientError as err:
logger.error(err)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/dynamodb.html
|
a9683cbd94d7-0
|
Source code for langchain.memory.chat_message_histories.momento
from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
from langchain.utils import get_from_env
if TYPE_CHECKING:
import momento
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
[docs]class MomentoChatMessageHistory(BaseChatMessageHistory):
"""Chat message history cache that uses Momento as a backend.
See https://gomomento.com/"""
def __init__(
self,
session_id: str,
cache_client: momento.CacheClient,
cache_name: str,
*,
key_prefix: str = "message_store:",
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/momento.html
|
a9683cbd94d7-1
|
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
[docs] @classmethod
def from_client_params(
cls,
session_id: str,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/momento.html
|
a9683cbd94d7-2
|
def from_client_params(
cls,
session_id: str,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoChatMessageHistory:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
credentials = CredentialProvider.from_string(auth_token)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(session_id, cache_client, cache_name, ttl=ttl, **kwargs)
@property
def messages(self) -> list[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from Momento.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
Returns:
list[BaseMessage]: List of cached messages
"""
from momento.responses import CacheListFetch
fetch_response = self.cache_client.list_fetch(self.cache_name, self.key)
if isinstance(fetch_response, CacheListFetch.Hit):
items = [json.loads(m) for m in fetch_response.value_list_string]
return messages_from_dict(items)
elif isinstance(fetch_response, CacheListFetch.Miss):
return []
elif isinstance(fetch_response, CacheListFetch.Error):
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/momento.html
|
a9683cbd94d7-3
|
return []
elif isinstance(fetch_response, CacheListFetch.Error):
raise fetch_response.inner_exception
else:
raise Exception(f"Unexpected response: {fetch_response}")
[docs] def add_message(self, message: BaseMessage) -> None:
"""Store a message in the cache.
Args:
message (BaseMessage): The message object to store.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheListPushBack
item = json.dumps(_message_to_dict(message))
push_response = self.cache_client.list_push_back(
self.cache_name, self.key, item, ttl=self.ttl
)
if isinstance(push_response, CacheListPushBack.Success):
return None
elif isinstance(push_response, CacheListPushBack.Error):
raise push_response.inner_exception
else:
raise Exception(f"Unexpected response: {push_response}")
[docs] def clear(self) -> None:
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f"Unexpected response: {delete_response}")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/momento.html
|
19a9baed8b87-0
|
Source code for langchain.memory.chat_message_histories.mongodb
import json
import logging
from typing import List
from langchain.schema import (
BaseChatMessageHistory,
BaseMessage,
_message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_DBNAME = "chat_history"
DEFAULT_COLLECTION_NAME = "message_store"
[docs]class MongoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in MongoDB.
Args:
connection_string: connection string to connect to MongoDB
session_id: arbitrary key that is used to store the messages
of a single chat session.
database_name: name of the database to use
collection_name: name of the collection to use
"""
def __init__(
self,
connection_string: str,
session_id: str,
database_name: str = DEFAULT_DBNAME,
collection_name: str = DEFAULT_COLLECTION_NAME,
):
from pymongo import MongoClient, errors
self.connection_string = connection_string
self.session_id = session_id
self.database_name = database_name
self.collection_name = collection_name
try:
self.client: MongoClient = MongoClient(connection_string)
except errors.ConnectionFailure as error:
logger.error(error)
self.db = self.client[database_name]
self.collection = self.db[collection_name]
self.collection.create_index("SessionId")
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from MongoDB"""
from pymongo import errors
try:
cursor = self.collection.find({"SessionId": self.session_id})
except errors.OperationFailure as error:
logger.error(error)
if cursor:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/mongodb.html
|
19a9baed8b87-1
|
except errors.OperationFailure as error:
logger.error(error)
if cursor:
items = [json.loads(document["History"]) for document in cursor]
else:
items = []
messages = messages_from_dict(items)
return messages
[docs] def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in MongoDB"""
from pymongo import errors
try:
self.collection.insert_one(
{
"SessionId": self.session_id,
"History": json.dumps(_message_to_dict(message)),
}
)
except errors.WriteError as err:
logger.error(err)
[docs] def clear(self) -> None:
"""Clear session memory from MongoDB"""
from pymongo import errors
try:
self.collection.delete_many({"SessionId": self.session_id})
except errors.WriteError as err:
logger.error(err)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/memory/chat_message_histories/mongodb.html
|
e7a97b3acca7-0
|
Source code for langchain.llms.deepinfra
"""Wrapper around DeepInfra APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "google/flan-t5-xl"
[docs]class DeepInfra(LLM):
"""Wrapper around DeepInfra deployed models.
To use, you should have the ``requests`` python package installed, and the
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import DeepInfra
di = DeepInfra(model_id="google/flan-t5-xl",
deepinfra_api_token="my-api-key")
"""
model_id: str = DEFAULT_MODEL_ID
model_kwargs: Optional[dict] = None
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/deepinfra.html
|
e7a97b3acca7-1
|
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "deepinfra"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs = {**_model_kwargs, **kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
try:
res = requests.post(
f"https://api.deepinfra.com/v1/inference/{self.model_id}",
headers=headers,
json={"input": prompt, **_model_kwargs},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/deepinfra.html
|
e7a97b3acca7-2
|
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
text = t["results"][0]["generated_text"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/deepinfra.html
|
06b7f3f65424-0
|
Source code for langchain.llms.rwkv
"""Wrapper for the RWKV model.
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
"""
from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import BaseModel, Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
[docs]class RWKV(LLM, BaseModel):
r"""Wrapper around RWKV language models.
To use, you should have the ``rwkv`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import RWKV
model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32")
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained RWKV model file."""
tokens_path: str
"""Path to the RWKV tokens file."""
strategy: str = "cpu fp32"
"""Token context window."""
rwkv_verbose: bool = True
"""Print debug information."""
temperature: float = 1.0
"""The temperature to use for sampling."""
top_p: float = 0.5
"""The top-p value to use for sampling."""
penalty_alpha_frequency: float = 0.4
"""Positive values penalize new tokens based on their existing frequency
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/rwkv.html
|
06b7f3f65424-1
|
"""Positive values penalize new tokens based on their existing frequency
in the text so far, decreasing the model's likelihood to repeat the same
line verbatim.."""
penalty_alpha_presence: float = 0.4
"""Positive values penalize new tokens based on whether they appear
in the text so far, increasing the model's likelihood to talk about
new topics.."""
CHUNK_LEN: int = 256
"""Batch size for prompt processing."""
max_tokens_per_generation: int = 256
"""Maximum number of tokens to generate."""
client: Any = None #: :meta private:
tokenizer: Any = None #: :meta private:
pipeline: Any = None #: :meta private:
model_tokens: Any = None #: :meta private:
model_state: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"verbose": self.verbose,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_alpha_frequency": self.penalty_alpha_frequency,
"penalty_alpha_presence": self.penalty_alpha_presence,
"CHUNK_LEN": self.CHUNK_LEN,
"max_tokens_per_generation": self.max_tokens_per_generation,
}
@staticmethod
def _rwkv_param_names() -> Set[str]:
"""Get the identifying parameters."""
return {
"verbose",
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/rwkv.html
|
06b7f3f65424-2
|
"""Validate that the python package exists in the environment."""
try:
import tokenizers
except ImportError:
raise ImportError(
"Could not import tokenizers python package. "
"Please install it with `pip install tokenizers`."
)
try:
from rwkv.model import RWKV as RWKVMODEL
from rwkv.utils import PIPELINE
values["tokenizer"] = tokenizers.Tokenizer.from_file(values["tokens_path"])
rwkv_keys = cls._rwkv_param_names()
model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys}
model_kwargs["verbose"] = values["rwkv_verbose"]
values["client"] = RWKVMODEL(
values["model"], strategy=values["strategy"], **model_kwargs
)
values["pipeline"] = PIPELINE(values["client"], values["tokens_path"])
except ImportError:
raise ValueError(
"Could not import rwkv python package. "
"Please install it with `pip install rwkv`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params,
**{k: v for k, v in self.__dict__.items() if k in RWKV._rwkv_param_names()},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "rwkv-4"
def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any:
AVOID_REPEAT_TOKENS = []
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/rwkv.html
|
06b7f3f65424-3
|
AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ",:?!"
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
tokens = [int(x) for x in _tokens]
self.model_tokens += tokens
out: Any = None
while len(tokens) > 0:
out, self.model_state = self.client.forward(
tokens[: self.CHUNK_LEN], self.model_state
)
tokens = tokens[self.CHUNK_LEN :]
END_OF_LINE = 187
out[END_OF_LINE] += newline_adj # adjust \n probability
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out
def rwkv_generate(self, prompt: str) -> str:
self.model_state = None
self.model_tokens = []
logits = self.run_rnn(self.tokenizer.encode(prompt).ids)
begin = len(self.model_tokens)
out_last = begin
occurrence: Dict = {}
decoded = ""
for i in range(self.max_tokens_per_generation):
for n in occurrence:
logits[n] -= (
self.penalty_alpha_presence
+ occurrence[n] * self.penalty_alpha_frequency
)
token = self.pipeline.sample_logits(
logits, temperature=self.temperature, top_p=self.top_p
)
END_OF_TEXT = 0
if token == END_OF_TEXT:
break
if token not in occurrence:
occurrence[token] = 1
else:
occurrence[token] += 1
logits = self.run_rnn([token])
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/rwkv.html
|
06b7f3f65424-4
|
occurrence[token] += 1
logits = self.run_rnn([token])
xxx = self.tokenizer.decode(self.model_tokens[out_last:])
if "\ufffd" not in xxx: # avoid utf-8 display issues
decoded += xxx
out_last = begin + i + 1
if i >= self.max_tokens_per_generation - 100:
break
return decoded
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""RWKV generation
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text = self.rwkv_generate(prompt)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/rwkv.html
|
867e1d128899-0
|
Source code for langchain.llms.sagemaker_endpoint
"""Wrapper around Sagemaker InvokeEndpoint API."""
from abc import abstractmethod
from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar, Union
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
INPUT_TYPE = TypeVar("INPUT_TYPE", bound=Union[str, List[str]])
OUTPUT_TYPE = TypeVar("OUTPUT_TYPE", bound=Union[str, List[List[float]]])
class ContentHandlerBase(Generic[INPUT_TYPE, OUTPUT_TYPE]):
"""A handler class to transform input from LLM to a
format that SageMaker endpoint expects. Similarily,
the class also handles transforming output from the
SageMaker endpoint to a format that LLM class expects.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
content_type: Optional[str] = "text/plain"
"""The MIME type of the input data passed to endpoint"""
accepts: Optional[str] = "text/plain"
"""The MIME type of the response data returned from endpoint"""
@abstractmethod
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/sagemaker_endpoint.html
|
867e1d128899-1
|
"""The MIME type of the response data returned from endpoint"""
@abstractmethod
def transform_input(self, prompt: INPUT_TYPE, model_kwargs: Dict) -> bytes:
"""Transforms the input to a format that model can accept
as the request Body. Should return bytes or seekable file
like object in the format specified in the content_type
request header.
"""
@abstractmethod
def transform_output(self, output: bytes) -> OUTPUT_TYPE:
"""Transforms the output from the model to string that
the LLM class expects.
"""
class LLMContentHandler(ContentHandlerBase[str, str]):
"""Content handler for LLM class."""
[docs]class SagemakerEndpoint(LLM):
"""Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain import SagemakerEndpoint
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/sagemaker_endpoint.html
|
867e1d128899-2
|
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: LLMContentHandler
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
from langchain.llms.sagemaker_endpoint import LLMContentHandler
class ContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/sagemaker_endpoint.html
|
867e1d128899-3
|
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/sagemaker_endpoint.html
|
867e1d128899-4
|
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_name": self.endpoint_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "sagemaker_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Sagemaker inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_model_kwargs = {**_model_kwargs, **kwargs}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(prompt, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
text = self.content_handler.transform_output(response["Body"])
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/sagemaker_endpoint.html
|
867e1d128899-5
|
text = self.content_handler.transform_output(response["Body"])
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to the sagemaker endpoint.
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/sagemaker_endpoint.html
|
6912a53ff031-0
|
Source code for langchain.llms.petals
"""Wrapper around Petals API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class Petals(LLM):
"""Wrapper around Petals Bloom models.
To use, you should have the ``petals`` python package installed, and the
environment variable ``HUGGINGFACE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import petals
petals = Petals()
"""
client: Any
"""The client to use for the API calls."""
tokenizer: Any
"""The tokenizer to use for the API calls."""
model_name: str = "bigscience/bloom-petals"
"""The model to use."""
temperature: float = 0.7
"""What sampling temperature to use"""
max_new_tokens: int = 256
"""The maximum number of new tokens to generate in the completion."""
top_p: float = 0.9
"""The cumulative probability for top-p sampling."""
top_k: Optional[int] = None
"""The number of highest probability vocabulary tokens
to keep for top-k-filtering."""
do_sample: bool = True
"""Whether or not to use sampling; use greedy decoding otherwise."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/petals.html
|
6912a53ff031-1
|
"""Whether or not to use sampling; use greedy decoding otherwise."""
max_length: Optional[int] = None
"""The maximum length of the sequence to be generated."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call
not explicitly specified."""
huggingface_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingface_api_key = get_from_dict_or_env(
values, "huggingface_api_key", "HUGGINGFACE_API_KEY"
)
try:
from petals import DistributedBloomForCausalLM
from transformers import BloomTokenizerFast
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/petals.html
|
6912a53ff031-2
|
from petals import DistributedBloomForCausalLM
from transformers import BloomTokenizerFast
model_name = values["model_name"]
values["tokenizer"] = BloomTokenizerFast.from_pretrained(model_name)
values["client"] = DistributedBloomForCausalLM.from_pretrained(model_name)
values["huggingface_api_key"] = huggingface_api_key
except ImportError:
raise ValueError(
"Could not import transformers or petals python package."
"Please install with `pip install -U transformers petals`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Petals API."""
normal_params = {
"temperature": self.temperature,
"max_new_tokens": self.max_new_tokens,
"top_p": self.top_p,
"top_k": self.top_k,
"do_sample": self.do_sample,
"max_length": self.max_length,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "petals"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Petals API."""
params = self._default_params
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/petals.html
|
6912a53ff031-3
|
"""Call the Petals API."""
params = self._default_params
params = {**params, **kwargs}
inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"]
outputs = self.client.generate(inputs, **params)
text = self.tokenizer.decode(outputs[0])
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/petals.html
|
9d38096d69b8-0
|
Source code for langchain.llms.writer
"""Wrapper around Writer APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
[docs]class Writer(LLM):
"""Wrapper around Writer large language models.
To use, you should have the environment variable ``WRITER_API_KEY`` and
``WRITER_ORG_ID`` set with your API key and organization ID respectively.
Example:
.. code-block:: python
from langchain import Writer
writer = Writer(model_id="palmyra-base")
"""
writer_org_id: Optional[str] = None
"""Writer organization ID."""
model_id: str = "palmyra-instruct"
"""Model name to use."""
min_tokens: Optional[int] = None
"""Minimum number of tokens to generate."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
temperature: Optional[float] = None
"""What sampling temperature to use."""
top_p: Optional[float] = None
"""Total probability mass of tokens to consider at each step."""
stop: Optional[List[str]] = None
"""Sequences when completion generation will stop."""
presence_penalty: Optional[float] = None
"""Penalizes repeated tokens regardless of frequency."""
repetition_penalty: Optional[float] = None
"""Penalizes repeated tokens according to frequency."""
best_of: Optional[int] = None
"""Generates this many completions server-side and returns the "best"."""
logprobs: bool = False
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/writer.html
|
9d38096d69b8-1
|
logprobs: bool = False
"""Whether to return log probabilities."""
n: Optional[int] = None
"""How many completions to generate."""
writer_api_key: Optional[str] = None
"""Writer API key."""
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and organization id exist in environment."""
writer_api_key = get_from_dict_or_env(
values, "writer_api_key", "WRITER_API_KEY"
)
values["writer_api_key"] = writer_api_key
writer_org_id = get_from_dict_or_env(values, "writer_org_id", "WRITER_ORG_ID")
values["writer_org_id"] = writer_org_id
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Writer API."""
return {
"minTokens": self.min_tokens,
"maxTokens": self.max_tokens,
"temperature": self.temperature,
"topP": self.top_p,
"stop": self.stop,
"presencePenalty": self.presence_penalty,
"repetitionPenalty": self.repetition_penalty,
"bestOf": self.best_of,
"logprobs": self.logprobs,
"n": self.n,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/writer.html
|
9d38096d69b8-2
|
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id, "writer_org_id": self.writer_org_id},
**self._default_params,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "writer"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Writer's completions endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = Writer("Tell me a joke.")
"""
if self.base_url is not None:
base_url = self.base_url
else:
base_url = (
"https://enterprise-api.writer.com/llm"
f"/organization/{self.writer_org_id}"
f"/model/{self.model_id}/completions"
)
params = {**self._default_params, **kwargs}
response = requests.post(
url=base_url,
headers={
"Authorization": f"{self.writer_api_key}",
"Content-Type": "application/json",
"Accept": "application/json",
},
json={"prompt": prompt, **params},
)
text = response.text
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/writer.html
|
9d38096d69b8-3
|
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/writer.html
|
1fc3dc302d95-0
|
Source code for langchain.llms.mosaicml
"""Wrapper around MosaicML APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
INSTRUCTION_KEY = "### Instruction:"
RESPONSE_KEY = "### Response:"
INTRO_BLURB = (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request."
)
PROMPT_FOR_GENERATION_FORMAT = """{intro}
{instruction_key}
{instruction}
{response_key}
""".format(
intro=INTRO_BLURB,
instruction_key=INSTRUCTION_KEY,
instruction="{instruction}",
response_key=RESPONSE_KEY,
)
[docs]class MosaicML(LLM):
"""Wrapper around MosaicML's LLM inference service.
To use, you should have the
environment variable ``MOSAICML_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import MosaicML
endpoint_url = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
mosaic_llm = MosaicML(
endpoint_url=endpoint_url,
mosaicml_api_token="my-api-key"
)
"""
endpoint_url: str = (
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/mosaicml.html
|
1fc3dc302d95-1
|
)
"""
endpoint_url: str = (
"https://models.hosted-on.mosaicml.hosting/mpt-7b-instruct/v1/predict"
)
"""Endpoint URL to use."""
inject_instruction_format: bool = False
"""Whether to inject the instruction format into the prompt."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
mosaicml_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(
values, "mosaicml_api_token", "MOSAICML_API_TOKEN"
)
values["mosaicml_api_token"] = mosaicml_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "mosaicml"
def _transform_prompt(self, prompt: str) -> str:
"""Transform prompt."""
if self.inject_instruction_format:
prompt = PROMPT_FOR_GENERATION_FORMAT.format(
instruction=prompt,
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/mosaicml.html
|
1fc3dc302d95-2
|
instruction=prompt,
)
return prompt
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
is_retry: bool = False,
**kwargs: Any,
) -> str:
"""Call out to a MosaicML LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = mosaic_llm("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
prompt = self._transform_prompt(prompt)
payload = {"input_strings": [prompt]}
payload.update(_model_kwargs)
payload.update(kwargs)
# HTTP headers for authorization
headers = {
"Authorization": f"{self.mosaicml_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
parsed_response = response.json()
if "error" in parsed_response:
# if we get rate limited, try sleeping for 1 second
if (
not is_retry
and "rate limit exceeded" in parsed_response["error"].lower()
):
import time
time.sleep(self.retry_sleep)
return self._call(prompt, stop, run_manager, is_retry=True)
raise ValueError(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/mosaicml.html
|
1fc3dc302d95-3
|
raise ValueError(
f"Error raised by inference API: {parsed_response['error']}"
)
# The inference API has changed a couple of times, so we add some handling
# to be robust to multiple response formats.
if isinstance(parsed_response, dict):
if "data" in parsed_response:
output_item = parsed_response["data"]
elif "output" in parsed_response:
output_item = parsed_response["output"]
else:
raise ValueError(
f"No key data or output in response: {parsed_response}"
)
if isinstance(output_item, list):
text = output_item[0]
else:
text = output_item
elif isinstance(parsed_response, list):
first_item = parsed_response[0]
if isinstance(first_item, str):
text = first_item
elif isinstance(first_item, dict):
if "output" in parsed_response:
text = first_item["output"]
else:
raise ValueError(
f"No key data or output in response: {parsed_response}"
)
else:
raise ValueError(f"Unexpected response format: {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
text = text[len(prompt) :]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
# TODO: replace when MosaicML supports custom stop tokens natively
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/mosaicml.html
|
1fc3dc302d95-4
|
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/mosaicml.html
|
70dcace8783d-0
|
Source code for langchain.llms.replicate
"""Wrapper around Replicate API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class Replicate(LLM):
"""Wrapper around Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format input={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(model="stability-ai/stable-diffusion: \
27b93a2413e7f36cd83da926f365628\
0b2931564ff050bf9575f1fdf9bcd7478",
input={"image_dimensions": "512x512"})
"""
model: str
input: Dict[str, Any] = Field(default_factory=dict)
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
replicate_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/replicate.html
|
70dcace8783d-1
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "REPLICATE_API_TOKEN", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/replicate.html
|
70dcace8783d-2
|
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
version = model.versions.get(version_str)
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
version.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
first_input_name = input_properties[0][0]
inputs = {first_input_name: prompt, **self.input}
iterator = replicate_python.run(self.model, input={**inputs, **kwargs})
return "".join([output for output in iterator])
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/replicate.html
|
8e5972ee73f8-0
|
Source code for langchain.llms.huggingface_endpoint
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
[docs]class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/huggingface_endpoint.html
|
8e5972ee73f8-1
|
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/huggingface_endpoint.html
|
8e5972ee73f8-2
|
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
params = {**_model_kwargs, **kwargs}
parameter_payload = {"inputs": prompt, "parameters": params}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/huggingface_endpoint.html
|
8e5972ee73f8-3
|
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/huggingface_endpoint.html
|
1fbaaa65f49e-0
|
Source code for langchain.llms.self_hosted_hugging_face
"""Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware."""
import importlib.util
import logging
from typing import Any, Callable, List, Mapping, Optional
from pydantic import Extra
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
logger = logging.getLogger(__name__)
def _generate_text(
pipeline: Any,
prompt: str,
*args: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Inference function to send to the remote hardware.
Accepts a Hugging Face pipeline (or more likely,
a key pointing to such a pipeline on the cluster's object store)
and returns generated text.
"""
response = pipeline(prompt, *args, **kwargs)
if pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted_hugging_face.html
|
1fbaaa65f49e-1
|
text = enforce_stop_tokens(text, stop)
return text
def _load_transformer(
model_id: str = DEFAULT_MODEL_ID,
task: str = DEFAULT_TASK,
device: int = 0,
model_kwargs: Optional[dict] = None,
) -> Any:
"""Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
"""
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers import pipeline as hf_pipeline
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted_hugging_face.html
|
1fbaaa65f49e-2
|
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
model_kwargs=_model_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return pipeline
[docs]class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
"""Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example using from_model_id:
.. code-block:: python
from langchain.llms import SelfHostedHuggingFaceLLM
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceLLM(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted_hugging_face.html
|
1fbaaa65f49e-3
|
hf = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-large", task="text2text-generation",
hardware=gpu
)
Example passing fn that generates a pipeline (bc the pipeline is not serializable):
.. code-block:: python
from langchain.llms import SelfHostedHuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def get_pipeline():
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer
)
return pipe
hf = SelfHostedHuggingFaceLLM(
model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu)
"""
model_id: str = DEFAULT_MODEL_ID
"""Hugging Face model_id to load the model."""
task: str = DEFAULT_TASK
"""Hugging Face task ("text-generation", "text2text-generation" or
"summarization")."""
device: int = 0
"""Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_reqs: List[str] = ["./", "transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
model_load_fn: Callable = _load_transformer
"""Function to load the model remotely on the server."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted_hugging_face.html
|
1fbaaa65f49e-4
|
"""Function to load the model remotely on the server."""
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def __init__(self, **kwargs: Any):
"""Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
load_fn_kwargs = {
"model_id": kwargs.get("model_id", DEFAULT_MODEL_ID),
"task": kwargs.get("task", DEFAULT_TASK),
"device": kwargs.get("device", 0),
"model_kwargs": kwargs.get("model_kwargs", None),
}
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
return "selfhosted_huggingface_pipeline"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return self.client(
pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs
)
By Harrison Chase
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted_hugging_face.html
|
1fbaaa65f49e-5
|
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted_hugging_face.html
|
15c77f83a7b2-0
|
Source code for langchain.llms.stochasticai
"""Wrapper around StochasticAI APIs."""
import logging
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class StochasticAI(LLM):
"""Wrapper around StochasticAI large language models.
To use, you should have the environment variable ``STOCHASTICAI_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain.llms import StochasticAI
stochasticai = StochasticAI(api_url="")
"""
api_url: str = ""
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
stochasticai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/stochasticai.html
|
15c77f83a7b2-1
|
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
stochasticai_api_key = get_from_dict_or_env(
values, "stochasticai_api_key", "STOCHASTICAI_API_KEY"
)
values["stochasticai_api_key"] = stochasticai_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"endpoint_url": self.api_url},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "stochasticai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to StochasticAI's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = StochasticAI("Tell me a joke.")
"""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/stochasticai.html
|
15c77f83a7b2-2
|
response = StochasticAI("Tell me a joke.")
"""
params = self.model_kwargs or {}
params = {**params, **kwargs}
response_post = requests.post(
url=self.api_url,
json={"prompt": prompt, "params": params},
headers={
"apiKey": f"{self.stochasticai_api_key}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_post.raise_for_status()
response_post_json = response_post.json()
completed = False
while not completed:
response_get = requests.get(
url=response_post_json["data"]["responseUrl"],
headers={
"apiKey": f"{self.stochasticai_api_key}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
response_get.raise_for_status()
response_get_json = response_get.json()["data"]
text = response_get_json.get("completion")
completed = text is not None
time.sleep(0.5)
text = text[0]
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/stochasticai.html
|
2be2f3485db7-0
|
Source code for langchain.llms.beam
"""Wrapper around Beam API."""
import base64
import json
import logging
import subprocess
import textwrap
import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
DEFAULT_NUM_TRIES = 10
DEFAULT_SLEEP_TIME = 4
[docs]class Beam(LLM):
"""Wrapper around Beam API for gpt2 large language model.
To use, you should have the ``beam-sdk`` python package installed,
and the environment variable ``BEAM_CLIENT_ID`` set with your client id
and ``BEAM_CLIENT_SECRET`` set with your client secret. Information on how
to get these is available here: https://docs.beam.cloud/account/api-keys.
The wrapper can then be called as follows, where the name, cpu, memory, gpu,
python version, and python packages can be updated accordingly. Once deployed,
the instance can be called.
Example:
.. code-block:: python
llm = Beam(model_name="gpt2",
name="langchain-gpt2",
cpu=8,
memory="32Gi",
gpu="A10G",
python_version="python3.8",
python_packages=[
"diffusers[torch]>=0.10",
"transformers",
"torch",
"pillow",
"accelerate",
"safetensors",
"xformers",],
max_length=50)
llm._deploy()
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/beam.html
|
2be2f3485db7-1
|
max_length=50)
llm._deploy()
call_result = llm._call(input)
"""
model_name: str = ""
name: str = ""
cpu: str = ""
memory: str = ""
gpu: str = ""
python_version: str = ""
python_packages: List[str] = []
max_length: str = ""
url: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
beam_client_id: str = ""
beam_client_secret: str = ""
app_id: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/beam.html
|
2be2f3485db7-2
|
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
beam_client_id = get_from_dict_or_env(
values, "beam_client_id", "BEAM_CLIENT_ID"
)
beam_client_secret = get_from_dict_or_env(
values, "beam_client_secret", "BEAM_CLIENT_SECRET"
)
values["beam_client_id"] = beam_client_id
values["beam_client_secret"] = beam_client_secret
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"name": self.name,
"cpu": self.cpu,
"memory": self.memory,
"gpu": self.gpu,
"python_version": self.python_version,
"python_packages": self.python_packages,
"max_length": self.max_length,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "beam"
[docs] def app_creation(self) -> None:
"""Creates a Python file which will contain your Beam app definition."""
script = textwrap.dedent(
"""\
import beam
# The environment your code will run on
app = beam.App(
name="{name}",
cpu={cpu},
memory="{memory}",
gpu="{gpu}",
python_version="{python_version}",
python_packages={python_packages},
)
app.Trigger.RestAPI(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/beam.html
|
2be2f3485db7-3
|
python_packages={python_packages},
)
app.Trigger.RestAPI(
inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}},
outputs={{"text": beam.Types.String()}},
handler="run.py:beam_langchain",
)
"""
)
script_name = "app.py"
with open(script_name, "w") as file:
file.write(
script.format(
name=self.name,
cpu=self.cpu,
memory=self.memory,
gpu=self.gpu,
python_version=self.python_version,
python_packages=self.python_packages,
)
)
[docs] def run_creation(self) -> None:
"""Creates a Python file which will be deployed on beam."""
script = textwrap.dedent(
"""
import os
import transformers
from transformers import GPT2LMHeadModel, GPT2Tokenizer
model_name = "{model_name}"
def beam_langchain(**inputs):
prompt = inputs["prompt"]
length = inputs["max_length"]
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
encodedPrompt = tokenizer.encode(prompt, return_tensors='pt')
outputs = model.generate(encodedPrompt, max_length=int(length),
do_sample=True, pad_token_id=tokenizer.eos_token_id)
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(output)
return {{"text": output}}
"""
)
script_name = "run.py"
with open(script_name, "w") as file:
file.write(script.format(model_name=self.model_name))
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/beam.html
|
2be2f3485db7-4
|
file.write(script.format(model_name=self.model_name))
def _deploy(self) -> str:
"""Call to Beam."""
try:
import beam # type: ignore
if beam.__path__ == "":
raise ImportError
except ImportError:
raise ImportError(
"Could not import beam python package. "
"Please install it with `curl "
"https://raw.githubusercontent.com/slai-labs"
"/get-beam/main/get-beam.sh -sSfL | sh`."
)
self.app_creation()
self.run_creation()
process = subprocess.run(
"beam deploy app.py", shell=True, capture_output=True, text=True
)
if process.returncode == 0:
output = process.stdout
logger.info(output)
lines = output.split("\n")
for line in lines:
if line.startswith(" i Send requests to: https://apps.beam.cloud/"):
self.app_id = line.split("/")[-1]
self.url = line.split(":")[1].strip()
return self.app_id
raise ValueError(
f"""Failed to retrieve the appID from the deployment output.
Deployment output: {output}"""
)
else:
raise ValueError(f"Deployment failed. Error: {process.stderr}")
@property
def authorization(self) -> str:
if self.beam_client_id:
credential_str = self.beam_client_id + ":" + self.beam_client_secret
else:
credential_str = self.beam_client_secret
return base64.b64encode(credential_str.encode()).decode()
def _call(
self,
prompt: str,
stop: Optional[list] = None,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/beam.html
|
2be2f3485db7-5
|
self,
prompt: str,
stop: Optional[list] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Beam."""
url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url
payload = {"prompt": prompt, "max_length": self.max_length}
payload.update(kwargs)
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Authorization": "Basic " + self.authorization,
"Connection": "keep-alive",
"Content-Type": "application/json",
}
for _ in range(DEFAULT_NUM_TRIES):
request = requests.post(url, headers=headers, data=json.dumps(payload))
if request.status_code == 200:
return request.json()["text"]
time.sleep(DEFAULT_SLEEP_TIME)
logger.warning("Unable to successfully call model.")
return ""
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/beam.html
|
aa5d199abcf6-0
|
Source code for langchain.llms.huggingface_hub
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
DEFAULT_REPO_ID = "gpt2"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
[docs]class HuggingFaceHub(LLM):
"""Wrapper around HuggingFaceHub models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceHub
hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key")
"""
client: Any #: :meta private:
repo_id: str = DEFAULT_REPO_ID
"""Model name to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/huggingface_hub.html
|
aa5d199abcf6-1
|
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values["repo_id"]
client = InferenceApi(
repo_id=repo_id,
token=huggingfacehub_api_token,
task=values.get("task"),
)
if client.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {client.task}, "
f"currently only {VALID_TASKS} are supported"
)
values["client"] = client
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"repo_id": self.repo_id, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_hub"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/huggingface_hub.html
|
aa5d199abcf6-2
|
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
params = {**_model_kwargs, **kwargs}
response = self.client(inputs=prompt, params=params)
if "error" in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
if self.client.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.client.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.client.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.client.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/huggingface_hub.html
|
aa5d199abcf6-3
|
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/huggingface_hub.html
|
53df2fb38aff-0
|
Source code for langchain.llms.baseten
"""Wrapper around Baseten deployed model API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
[docs]class Baseten(LLM):
"""Use your Baseten models in Langchain
To use, you should have the ``baseten`` python package installed,
and run ``baseten.login()`` with your Baseten API key.
The required ``model`` param can be either a model id or model
version id. Using a model version ID will result in
slightly faster invocation.
Any other model parameters can also
be passed in with the format input={model_param: value, ...}
The Baseten model must accept a dictionary of input with the key
"prompt" and return a dictionary with a key "data" which maps
to a list of response strings.
Example:
.. code-block:: python
from langchain.llms import Baseten
my_model = Baseten(model="MODEL_ID")
output = my_model("prompt")
"""
model: str
input: Dict[str, Any] = Field(default_factory=dict)
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "baseten"
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/baseten.html
|
53df2fb38aff-1
|
"""Return type of model."""
return "baseten"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Baseten deployed model endpoint."""
try:
import baseten
except ImportError as exc:
raise ValueError(
"Could not import Baseten Python package. "
"Please install it with `pip install baseten`."
) from exc
# get the model and version
try:
model = baseten.deployed_model_version_id(self.model)
response = model.predict({"prompt": prompt})
except baseten.common.core.ApiError:
model = baseten.deployed_model_id(self.model)
response = model.predict({"prompt": prompt})
return "".join(response)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/baseten.html
|
8682edeeb4ca-0
|
Source code for langchain.llms.pipelineai
"""Wrapper around Pipeline Cloud API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class PipelineAI(LLM, BaseModel):
"""Wrapper around PipelineAI large language models.
To use, you should have the ``pipeline-ai`` python package installed,
and the environment variable ``PIPELINE_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain import PipelineAI
pipeline = PipelineAI(pipeline_key="")
"""
pipeline_key: str = ""
"""The id or tag of the target pipeline"""
pipeline_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any pipeline parameters valid for `create` call not
explicitly specified."""
pipeline_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("pipeline_kwargs", {})
for field_name in list(values):
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/pipelineai.html
|
8682edeeb4ca-1
|
extra = values.get("pipeline_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transfered to pipeline_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["pipeline_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
pipeline_api_key = get_from_dict_or_env(
values, "pipeline_api_key", "PIPELINE_API_KEY"
)
values["pipeline_api_key"] = pipeline_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"pipeline_key": self.pipeline_key},
**{"pipeline_kwargs": self.pipeline_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "pipeline_ai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Pipeline Cloud endpoint."""
try:
from pipeline import PipelineCloud
except ImportError:
raise ValueError(
"Could not import pipeline-ai python package. "
"Please install it with `pip install pipeline-ai`."
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/pipelineai.html
|
8682edeeb4ca-2
|
"Please install it with `pip install pipeline-ai`."
)
client = PipelineCloud(token=self.pipeline_api_key)
params = self.pipeline_kwargs or {}
params = {**params, **kwargs}
run = client.run_pipeline(self.pipeline_key, [prompt, params])
try:
text = run.result_preview[0][0]
except AttributeError:
raise AttributeError(
f"A pipeline run should have a `result_preview` attribute."
f"Run was: {run}"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the pipeline parameters
text = enforce_stop_tokens(text, stop)
return text
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 16, 2023.
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/pipelineai.html
|
bec8d20e826f-0
|
Source code for langchain.llms.openai
"""Wrapper around OpenAI APIs."""
from __future__ import annotations
import logging
import sys
import warnings
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Dict,
Generator,
List,
Literal,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM
from langchain.schema import Generation, LLMResult
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:
"""Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0][
"finish_reason"
]
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
|
bec8d20e826f-1
|
"finish_reason"
]
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"]
def _streaming_response_template() -> Dict[str, Any]:
return {
"choices": [
{
"text": "",
"finish_reason": None,
"logprobs": None,
}
]
}
def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat]) -> Callable[[Any], Any]:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
|
bec8d20e826f-2
|
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
class BaseOpenAI(BaseLLM):
"""Wrapper around OpenAI large language models."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@property
def lc_serializable(self) -> bool:
return True
client: Any #: :meta private:
model_name: str = Field("text-davinci-003", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
|
bec8d20e826f-3
|
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
openai_api_base: Optional[str] = None
openai_organization: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore
"""Initialize the OpenAI object."""
model_name = data.get("model_name", "")
if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"):
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
|
bec8d20e826f-4
|
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return OpenAIChat(**data)
return super().__new__(cls)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = cls.all_required_field_names()
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
|
bec8d20e826f-5
|
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"request_timeout": self.request_timeout,
|
rtdocs_stable/api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.