id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
f50ec90d5116-4
suffix_to_use = suffix if include_df_in_prompt: dfs_head = "\n\n".join([d.head().to_markdown() for d in dfs]) suffix_to_use = suffix_to_use.format( dfs_head=dfs_head, ) elif include_df_in_prompt: dfs_head = "\n\n".join([d.head().to_markdown() for d in dfs]) suffix_to_use = FUNCTIONS_WITH_MULTI_DF.format( dfs_head=dfs_head, ) else: suffix_to_use = "" if prefix is None: prefix = MULTI_DF_PREFIX_FUNCTIONS prefix = prefix.format(num_dfs=str(len(dfs))) df_locals = {} for i, dataframe in enumerate(dfs): df_locals[f"df{i + 1}"] = dataframe tools = [PythonAstREPLTool(locals=df_locals)] system_message = SystemMessage(content=prefix + suffix_to_use) prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) return prompt, tools def _get_functions_prompt_and_tools( df: Any, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, include_df_in_prompt: Optional[bool] = True, ) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]: try: import pandas as pd except ImportError: raise ValueError( "pandas package not found, please install with `pip install pandas`" ) if input_variables is not None: raise ValueError("`input_variables` is not supported at the moment.") if include_df_in_prompt is not None and suffix is not None:
https://api.python.langchain.com/en/stable/_modules/langchain/agents/agent_toolkits/pandas/base.html
f50ec90d5116-5
if include_df_in_prompt is not None and suffix is not None: raise ValueError("If suffix is specified, include_df_in_prompt should not be.") if isinstance(df, list): for item in df: if not isinstance(item, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_functions_multi_prompt( df, prefix=prefix, suffix=suffix, include_df_in_prompt=include_df_in_prompt, ) else: if not isinstance(df, pd.DataFrame): raise ValueError(f"Expected pandas object, got {type(df)}") return _get_functions_single_prompt( df, prefix=prefix, suffix=suffix, include_df_in_prompt=include_df_in_prompt, ) [docs]def create_pandas_dataframe_agent( llm: BaseLanguageModel, df: Any, agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager: Optional[BaseCallbackManager] = None, prefix: Optional[str] = None, suffix: Optional[str] = None, input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = "force", agent_executor_kwargs: Optional[Dict[str, Any]] = None, include_df_in_prompt: Optional[bool] = True, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a pandas agent from an LLM and dataframe.""" agent: BaseSingleActionAgent
https://api.python.langchain.com/en/stable/_modules/langchain/agents/agent_toolkits/pandas/base.html
f50ec90d5116-6
agent: BaseSingleActionAgent if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: prompt, tools = _get_prompt_and_tools( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent( llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs, ) elif agent_type == AgentType.OPENAI_FUNCTIONS: _prompt, tools = _get_functions_prompt_and_tools( df, prefix=prefix, suffix=suffix, input_variables=input_variables, include_df_in_prompt=include_df_in_prompt, ) agent = OpenAIFunctionsAgent( llm=llm, prompt=_prompt, tools=tools, callback_manager=callback_manager, **kwargs, ) else: raise ValueError(f"Agent type {agent_type} not supported at the moment.") return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations=max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/stable/_modules/langchain/agents/agent_toolkits/pandas/base.html
db60b2750580-0
Source code for langchain.agents.agent_toolkits.json.toolkit """Toolkit for interacting with a JSON spec.""" from __future__ import annotations from typing import List from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool from langchain.tools.json.tool import JsonGetValueTool, JsonListKeysTool, JsonSpec [docs]class JsonToolkit(BaseToolkit): """Toolkit for interacting with a JSON spec.""" spec: JsonSpec [docs] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" return [ JsonListKeysTool(spec=self.spec), JsonGetValueTool(spec=self.spec), ]
https://api.python.langchain.com/en/stable/_modules/langchain/agents/agent_toolkits/json/toolkit.html
e53502d50397-0
Source code for langchain.agents.agent_toolkits.json.base """Json agent.""" from typing import Any, Dict, List, Optional from langchain.agents.agent import AgentExecutor from langchain.agents.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX from langchain.agents.agent_toolkits.json.toolkit import JsonToolkit from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain [docs]def create_json_agent( llm: BaseLanguageModel, toolkit: JsonToolkit, callback_manager: Optional[BaseCallbackManager] = None, prefix: str = JSON_PREFIX, suffix: str = JSON_SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any], ) -> AgentExecutor: """Construct a json agent from an LLM and tools.""" tools = toolkit.get_tools() prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) return AgentExecutor.from_agent_and_tools(
https://api.python.langchain.com/en/stable/_modules/langchain/agents/agent_toolkits/json/base.html
e53502d50397-1
return AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, **(agent_executor_kwargs or {}), )
https://api.python.langchain.com/en/stable/_modules/langchain/agents/agent_toolkits/json/base.html
77749ed9e6f5-0
Source code for langchain.agents.self_ask_with_search.base """Chain that does self ask with search.""" from typing import Any, Sequence, Union from pydantic import Field from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputParser from langchain.agents.self_ask_with_search.prompt import PROMPT from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input from langchain.base_language import BaseLanguageModel from langchain.prompts.base import BasePromptTemplate from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.serpapi import SerpAPIWrapper class SelfAskWithSearchAgent(Agent): """Agent for the self-ask-with-search paper.""" output_parser: AgentOutputParser = Field(default_factory=SelfAskOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return SelfAskOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.SELF_ASK_WITH_SEARCH @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Prompt does not depend on tools.""" return PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: validate_tools_single_input(cls.__name__, tools) super()._validate_tools(tools) if len(tools) != 1: raise ValueError(f"Exactly one tool must be specified, but got {tools}")
https://api.python.langchain.com/en/stable/_modules/langchain/agents/self_ask_with_search/base.html
77749ed9e6f5-1
raise ValueError(f"Exactly one tool must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Intermediate Answer"}: raise ValueError( f"Tool name should be Intermediate Answer, got {tool_names}" ) @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Intermediate answer: " @property def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" return "" [docs]class SelfAskWithSearchChain(AgentExecutor): """Chain that does self ask with search. Example: .. code-block:: python from langchain import SelfAskWithSearchChain, OpenAI, GoogleSerperAPIWrapper search_chain = GoogleSerperAPIWrapper() self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain) """ def __init__( self, llm: BaseLanguageModel, search_chain: Union[GoogleSerperAPIWrapper, SerpAPIWrapper], **kwargs: Any, ): """Initialize with just an LLM and a search chain.""" search_tool = Tool( name="Intermediate Answer", func=search_chain.run, coroutine=search_chain.arun, description="Search", ) agent = SelfAskWithSearchAgent.from_llm_and_tools(llm, [search_tool]) super().__init__(agent=agent, tools=[search_tool], **kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/agents/self_ask_with_search/base.html
5e79e95f21e2-0
Source code for langchain.experimental.autonomous_agents.baby_agi.baby_agi """BabyAGI agent.""" from collections import deque from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.experimental.autonomous_agents.baby_agi.task_creation import ( TaskCreationChain, ) from langchain.experimental.autonomous_agents.baby_agi.task_execution import ( TaskExecutionChain, ) from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import ( TaskPrioritizationChain, ) from langchain.vectorstores.base import VectorStore [docs]class BabyAGI(Chain, BaseModel): """Controller model for the BabyAGI agent.""" task_list: deque = Field(default_factory=deque) task_creation_chain: Chain = Field(...) task_prioritization_chain: Chain = Field(...) execution_chain: Chain = Field(...) task_id_counter: int = Field(1) vectorstore: VectorStore = Field(init=False) max_iterations: Optional[int] = None [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def add_task(self, task: Dict) -> None: self.task_list.append(task) def print_task_list(self) -> None: print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") for t in self.task_list: print(str(t["task_id"]) + ": " + t["task_name"])
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
5e79e95f21e2-1
print(str(t["task_id"]) + ": " + t["task_name"]) def print_next_task(self, task: Dict) -> None: print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") print(str(task["task_id"]) + ": " + task["task_name"]) def print_task_result(self, result: str) -> None: print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") print(result) @property def input_keys(self) -> List[str]: return ["objective"] @property def output_keys(self) -> List[str]: return [] [docs] def get_next_task( self, result: str, task_description: str, objective: str ) -> List[Dict]: """Get the next task.""" task_names = [t["task_name"] for t in self.task_list] incomplete_tasks = ", ".join(task_names) response = self.task_creation_chain.run( result=result, task_description=task_description, incomplete_tasks=incomplete_tasks, objective=objective, ) new_tasks = response.split("\n") return [ {"task_name": task_name} for task_name in new_tasks if task_name.strip() ] [docs] def prioritize_tasks(self, this_task_id: int, objective: str) -> List[Dict]: """Prioritize tasks.""" task_names = [t["task_name"] for t in list(self.task_list)] next_task_id = int(this_task_id) + 1
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
5e79e95f21e2-2
next_task_id = int(this_task_id) + 1 response = self.task_prioritization_chain.run( task_names=", ".join(task_names), next_task_id=str(next_task_id), objective=objective, ) new_tasks = response.split("\n") prioritized_task_list = [] for task_string in new_tasks: if not task_string.strip(): continue task_parts = task_string.strip().split(".", 1) if len(task_parts) == 2: task_id = task_parts[0].strip() task_name = task_parts[1].strip() prioritized_task_list.append( {"task_id": task_id, "task_name": task_name} ) return prioritized_task_list def _get_top_tasks(self, query: str, k: int) -> List[str]: """Get the top k tasks based on the query.""" results = self.vectorstore.similarity_search(query, k=k) if not results: return [] return [str(item.metadata["task"]) for item in results] [docs] def execute_task(self, objective: str, task: str, k: int = 5) -> str: """Execute a task.""" context = self._get_top_tasks(query=objective, k=k) return self.execution_chain.run( objective=objective, context="\n".join(context), task=task ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the agent.""" objective = inputs["objective"]
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
5e79e95f21e2-3
"""Run the agent.""" objective = inputs["objective"] first_task = inputs.get("first_task", "Make a todo list") self.add_task({"task_id": 1, "task_name": first_task}) num_iters = 0 while True: if self.task_list: self.print_task_list() # Step 1: Pull the first task task = self.task_list.popleft() self.print_next_task(task) # Step 2: Execute the task result = self.execute_task(objective, task["task_name"]) this_task_id = int(task["task_id"]) self.print_task_result(result) # Step 3: Store the result in Pinecone result_id = f"result_{task['task_id']}" self.vectorstore.add_texts( texts=[result], metadatas=[{"task": task["task_name"]}], ids=[result_id], ) # Step 4: Create new tasks and reprioritize task list new_tasks = self.get_next_task(result, task["task_name"], objective) for new_task in new_tasks: self.task_id_counter += 1 new_task.update({"task_id": self.task_id_counter}) self.add_task(new_task) self.task_list = deque(self.prioritize_tasks(this_task_id, objective)) num_iters += 1 if self.max_iterations is not None and num_iters == self.max_iterations: print( "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" ) break return {} [docs] @classmethod def from_llm(
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
5e79e95f21e2-4
return {} [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, verbose: bool = False, task_execution_chain: Optional[Chain] = None, **kwargs: Dict[str, Any], ) -> "BabyAGI": """Initialize the BabyAGI Controller.""" task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) task_prioritization_chain = TaskPrioritizationChain.from_llm( llm, verbose=verbose ) if task_execution_chain is None: execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose) else: execution_chain = task_execution_chain return cls( task_creation_chain=task_creation_chain, task_prioritization_chain=task_prioritization_chain, execution_chain=execution_chain, vectorstore=vectorstore, **kwargs, )
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/baby_agi/baby_agi.html
57796e7ab5f9-0
Source code for langchain.experimental.autonomous_agents.autogpt.agent from __future__ import annotations from typing import List, Optional from pydantic import ValidationError from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.experimental.autonomous_agents.autogpt.output_parser import ( AutoGPTOutputParser, BaseAutoGPTOutputParser, ) from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt from langchain.experimental.autonomous_agents.autogpt.prompt_generator import ( FINISH_NAME, ) from langchain.memory import ChatMessageHistory from langchain.schema import ( AIMessage, BaseChatMessageHistory, Document, HumanMessage, SystemMessage, ) from langchain.tools.base import BaseTool from langchain.tools.human.tool import HumanInputRun from langchain.vectorstores.base import VectorStoreRetriever [docs]class AutoGPT: """Agent class for interacting with Auto-GPT.""" def __init__( self, ai_name: str, memory: VectorStoreRetriever, chain: LLMChain, output_parser: BaseAutoGPTOutputParser, tools: List[BaseTool], feedback_tool: Optional[HumanInputRun] = None, chat_history_memory: Optional[BaseChatMessageHistory] = None, ): self.ai_name = ai_name self.memory = memory self.next_action_count = 0 self.chain = chain self.output_parser = output_parser self.tools = tools self.feedback_tool = feedback_tool self.chat_history_memory = chat_history_memory or ChatMessageHistory() @classmethod def from_llm_and_tools(
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
57796e7ab5f9-1
@classmethod def from_llm_and_tools( cls, ai_name: str, ai_role: str, memory: VectorStoreRetriever, tools: List[BaseTool], llm: BaseChatModel, human_in_the_loop: bool = False, output_parser: Optional[BaseAutoGPTOutputParser] = None, chat_history_memory: Optional[BaseChatMessageHistory] = None, ) -> AutoGPT: prompt = AutoGPTPrompt( ai_name=ai_name, ai_role=ai_role, tools=tools, input_variables=["memory", "messages", "goals", "user_input"], token_counter=llm.get_num_tokens, ) human_feedback_tool = HumanInputRun() if human_in_the_loop else None chain = LLMChain(llm=llm, prompt=prompt) return cls( ai_name, memory, chain, output_parser or AutoGPTOutputParser(), tools, feedback_tool=human_feedback_tool, chat_history_memory=chat_history_memory, ) def run(self, goals: List[str]) -> str: user_input = ( "Determine which next command to use, " "and respond using the format specified above:" ) # Interaction Loop loop_count = 0 while True: # Discontinue if continuous limit is reached loop_count += 1 # Send message to AI, get response assistant_reply = self.chain.run( goals=goals, messages=self.chat_history_memory.messages, memory=self.memory, user_input=user_input, ) # Print Assistant thoughts
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
57796e7ab5f9-2
user_input=user_input, ) # Print Assistant thoughts print(assistant_reply) self.chat_history_memory.add_message(HumanMessage(content=user_input)) self.chat_history_memory.add_message(AIMessage(content=assistant_reply)) # Get command name and arguments action = self.output_parser.parse(assistant_reply) tools = {t.name: t for t in self.tools} if action.name == FINISH_NAME: return action.args["response"] if action.name in tools: tool = tools[action.name] try: observation = tool.run(action.args) except ValidationError as e: observation = ( f"Validation Error in args: {str(e)}, args: {action.args}" ) except Exception as e: observation = ( f"Error: {str(e)}, {type(e).__name__}, args: {action.args}" ) result = f"Command {tool.name} returned: {observation}" elif action.name == "ERROR": result = f"Error: {action.args}. " else: result = ( f"Unknown command '{action.name}'. " f"Please refer to the 'COMMANDS' list for available " f"commands and only respond in the specified JSON format." ) memory_to_add = ( f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " ) if self.feedback_tool is not None: feedback = f"\n{self.feedback_tool.run('Input: ')}" if feedback in {"q", "stop"}: print("EXITING") return "EXITING" memory_to_add += feedback
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
57796e7ab5f9-3
return "EXITING" memory_to_add += feedback self.memory.add_documents([Document(page_content=memory_to_add)]) self.chat_history_memory.add_message(SystemMessage(content=result))
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/autonomous_agents/autogpt/agent.html
cd927b7e5792-0
Source code for langchain.experimental.generative_agents.memory import logging import re from datetime import datetime from typing import Any, Dict, List, Optional from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.prompts import PromptTemplate from langchain.retrievers import TimeWeightedVectorStoreRetriever from langchain.schema import BaseMemory, Document from langchain.utils import mock_now logger = logging.getLogger(__name__) [docs]class GenerativeAgentMemory(BaseMemory): llm: BaseLanguageModel """The core language model.""" memory_retriever: TimeWeightedVectorStoreRetriever """The retriever to fetch related memories.""" verbose: bool = False reflection_threshold: Optional[float] = None """When aggregate_importance exceeds reflection_threshold, stop to reflect.""" current_plan: List[str] = [] """The current plan of the agent.""" # A weight of 0.15 makes this less important than it # would be otherwise, relative to salience and time importance_weight: float = 0.15 """How much weight to assign the memory importance.""" aggregate_importance: float = 0.0 # : :meta private: """Track the sum of the 'importance' of recent memories. Triggers reflection when it reaches reflection_threshold.""" max_tokens_limit: int = 1200 # : :meta private: # input keys queries_key: str = "queries" most_recent_memories_token_key: str = "recent_memories_token" add_memory_key: str = "add_memory" # output keys relevant_memories_key: str = "relevant_memories"
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/memory.html
cd927b7e5792-1
# output keys relevant_memories_key: str = "relevant_memories" relevant_memories_simple_key: str = "relevant_memories_simple" most_recent_memories_key: str = "most_recent_memories" now_key: str = "now" reflecting: bool = False def chain(self, prompt: PromptTemplate) -> LLMChain: return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose) @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) lines = [line for line in lines if line.strip()] # remove empty lines return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]: """Return the 3 most salient high-level questions about recent observations.""" prompt = PromptTemplate.from_template( "{observations}\n\n" "Given only the information above, what are the 3 most salient " "high-level questions we can answer about the subjects in the statements?\n" "Provide each question on a new line." ) observations = self.memory_retriever.memory_stream[-last_k:] observation_str = "\n".join( [self._format_memory_detail(o) for o in observations] ) result = self.chain(prompt).run(observations=observation_str) return self._parse_list(result) def _get_insights_on_topic( self, topic: str, now: Optional[datetime] = None
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/memory.html
cd927b7e5792-2
self, topic: str, now: Optional[datetime] = None ) -> List[str]: """Generate 'insights' on a topic of reflection, based on pertinent memories.""" prompt = PromptTemplate.from_template( "Statements relevant to: '{topic}'\n" "---\n" "{related_statements}\n" "---\n" "What 5 high-level novel insights can you infer from the above statements " "that are relevant for answering the following question?\n" "Do not include any insights that are not relevant to the question.\n" "Do not repeat any insights that have already been made.\n\n" "Question: {topic}\n\n" "(example format: insight (because of 1, 5, 3))\n" ) related_memories = self.fetch_memories(topic, now=now) related_statements = "\n".join( [ self._format_memory_detail(memory, prefix=f"{i+1}. ") for i, memory in enumerate(related_memories) ] ) result = self.chain(prompt).run( topic=topic, related_statements=related_statements ) # TODO: Parse the connections between memories and insights return self._parse_list(result) [docs] def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]: """Reflect on recent observations and generate 'insights'.""" if self.verbose: logger.info("Character is reflecting") new_insights = [] topics = self._get_topics_of_reflection() for topic in topics: insights = self._get_insights_on_topic(topic, now=now)
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/memory.html
cd927b7e5792-3
insights = self._get_insights_on_topic(topic, now=now) for insight in insights: self.add_memory(insight, now=now) new_insights.extend(insights) return new_insights def _score_memory_importance(self, memory_content: str) -> float: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the" + " following piece of memory. Respond with a single integer." + "\nMemory: {memory_content}" + "\nRating: " ) score = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance score: {score}") match = re.search(r"^\D*(\d+)", score) if match: return (float(match.group(1)) / 10) * self.importance_weight else: return 0.0 def _score_memories_importance(self, memory_content: str) -> List[float]: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the"
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/memory.html
cd927b7e5792-4
+ " acceptance), rate the likely poignancy of the" + " following piece of memory. Always answer with only a list of numbers." + " If just given one memory still respond in a list." + " Memories are separated by semi colans (;)" + "\Memories: {memory_content}" + "\nRating: " ) scores = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance scores: {scores}") # Split into list of strings and convert to floats scores_list = [float(x) for x in scores.split(";")] return scores_list [docs] def add_memories( self, memory_content: str, now: Optional[datetime] = None ) -> List[str]: """Add an observations or memories to the agent's memory.""" importance_scores = self._score_memories_importance(memory_content) self.aggregate_importance += max(importance_scores) memory_list = memory_content.split(";") documents = [] for i in range(len(memory_list)): documents.append( Document( page_content=memory_list[i], metadata={"importance": importance_scores[i]}, ) ) result = self.memory_retriever.add_documents(documents, current_time=now) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold and not self.reflecting ): self.reflecting = True
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/memory.html
cd927b7e5792-5
and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result [docs] def add_memory( self, memory_content: str, now: Optional[datetime] = None ) -> List[str]: """Add an observation or memory to the agent's memory.""" importance_score = self._score_memory_importance(memory_content) self.aggregate_importance += importance_score document = Document( page_content=memory_content, metadata={"importance": importance_score} ) result = self.memory_retriever.add_documents([document], current_time=now) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result [docs] def fetch_memories( self, observation: str, now: Optional[datetime] = None ) -> List[Document]: """Fetch related memories.""" if now is not None: with mock_now(now): return self.memory_retriever.get_relevant_documents(observation) else: return self.memory_retriever.get_relevant_documents(observation)
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/memory.html
cd927b7e5792-6
else: return self.memory_retriever.get_relevant_documents(observation) def format_memories_detail(self, relevant_memories: List[Document]) -> str: content = [] for mem in relevant_memories: content.append(self._format_memory_detail(mem, prefix="- ")) return "\n".join([f"{mem}" for mem in content]) def _format_memory_detail(self, memory: Document, prefix: str = "") -> str: created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p") return f"{prefix}[{created_time}] {memory.page_content.strip()}" def format_memories_simple(self, relevant_memories: List[Document]) -> str: return "; ".join([f"{mem.page_content}" for mem in relevant_memories]) def _get_memories_until_limit(self, consumed_tokens: int) -> str: """Reduce the number of tokens in the documents.""" result = [] for doc in self.memory_retriever.memory_stream[::-1]: if consumed_tokens >= self.max_tokens_limit: break consumed_tokens += self.llm.get_num_tokens(doc.page_content) if consumed_tokens < self.max_tokens_limit: result.append(doc) return self.format_memories_simple(result) @property def memory_variables(self) -> List[str]: """Input keys this memory class will load dynamically.""" return [] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return key-value pairs given the text input to the chain.""" queries = inputs.get(self.queries_key) now = inputs.get(self.now_key) if queries is not None:
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/memory.html
cd927b7e5792-7
now = inputs.get(self.now_key) if queries is not None: relevant_memories = [ mem for query in queries for mem in self.fetch_memories(query, now=now) ] return { self.relevant_memories_key: self.format_memories_detail( relevant_memories ), self.relevant_memories_simple_key: self.format_memories_simple( relevant_memories ), } most_recent_memories_token = inputs.get(self.most_recent_memories_token_key) if most_recent_memories_token is not None: return { self.most_recent_memories_key: self._get_memories_until_limit( most_recent_memories_token ) } return {} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None: """Save the context of this model run to memory.""" # TODO: fix the save memory key mem = outputs.get(self.add_memory_key) now = outputs.get(self.now_key) if mem: self.add_memory(mem, now=now) [docs] def clear(self) -> None: """Clear memory contents.""" # TODO
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/memory.html
d8e7afffb7d0-0
Source code for langchain.experimental.generative_agents.generative_agent import re from datetime import datetime from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.experimental.generative_agents.memory import GenerativeAgentMemory from langchain.prompts import PromptTemplate [docs]class GenerativeAgent(BaseModel): """A character with memory and innate characteristics.""" name: str """The character's name.""" age: Optional[int] = None """The optional age of the character.""" traits: str = "N/A" """Permanent traits to ascribe to the character.""" status: str """The traits of the character you wish not to change.""" memory: GenerativeAgentMemory """The memory object that combines relevance, recency, and 'importance'.""" llm: BaseLanguageModel """The underlying language model.""" verbose: bool = False summary: str = "" #: :meta private: """Stateful self-summary generated via reflection on the character's memory.""" summary_refresh_seconds: int = 3600 #: :meta private: """How frequently to re-generate the summary.""" last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private: """The last time the character's summary was regenerated.""" daily_summaries: List[str] = Field(default_factory=list) # : :meta private: """Summary of the events in the plan that the agent took.""" [docs] class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True # LLM-related methods @staticmethod
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/generative_agent.html
d8e7afffb7d0-1
arbitrary_types_allowed = True # LLM-related methods @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] def chain(self, prompt: PromptTemplate) -> LLMChain: return LLMChain( llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory ) def _get_entity_from_observation(self, observation: str) -> str: prompt = PromptTemplate.from_template( "What is the observed entity in the following observation? {observation}" + "\nEntity=" ) return self.chain(prompt).run(observation=observation).strip() def _get_entity_action(self, observation: str, entity_name: str) -> str: prompt = PromptTemplate.from_template( "What is the {entity} doing in the following observation? {observation}" + "\nThe {entity} is" ) return ( self.chain(prompt).run(entity=entity_name, observation=observation).strip() ) [docs] def summarize_related_memories(self, observation: str) -> str: """Summarize memories that are most relevant to an observation.""" prompt = PromptTemplate.from_template( """ {q1}? Context from memory: {relevant_memories} Relevant context: """ ) entity_name = self._get_entity_from_observation(observation) entity_action = self._get_entity_action(observation, entity_name)
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/generative_agent.html
d8e7afffb7d0-2
entity_action = self._get_entity_action(observation, entity_name) q1 = f"What is the relationship between {self.name} and {entity_name}" q2 = f"{entity_name} is {entity_action}" return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip() def _generate_reaction( self, observation: str, suffix: str, now: Optional[datetime] = None ) -> str: """React to a given observation or dialogue act.""" prompt = PromptTemplate.from_template( "{agent_summary_description}" + "\nIt is {current_time}." + "\n{agent_name}'s status: {agent_status}" + "\nSummary of relevant context from {agent_name}'s memory:" + "\n{relevant_memories}" + "\nMost recent observations: {most_recent_memories}" + "\nObservation: {observation}" + "\n\n" + suffix ) agent_summary_description = self.get_summary(now=now) relevant_memories_str = self.summarize_related_memories(observation) current_time_str = ( datetime.now().strftime("%B %d, %Y, %I:%M %p") if now is None else now.strftime("%B %d, %Y, %I:%M %p") ) kwargs: Dict[str, Any] = dict( agent_summary_description=agent_summary_description, current_time=current_time_str, relevant_memories=relevant_memories_str, agent_name=self.name, observation=observation, agent_status=self.status, ) consumed_tokens = self.llm.get_num_tokens(
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/generative_agent.html
d8e7afffb7d0-3
) consumed_tokens = self.llm.get_num_tokens( prompt.format(most_recent_memories="", **kwargs) ) kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens return self.chain(prompt=prompt).run(**kwargs).strip() def _clean_response(self, text: str) -> str: return re.sub(f"^{self.name} ", "", text.strip()).strip() [docs] def generate_reaction( self, observation: str, now: Optional[datetime] = None ) -> Tuple[bool, str]: """React to a given observation.""" call_to_action_template = ( "Should {agent_name} react to the observation, and if so," + " what would be an appropriate reaction? Respond in one line." + ' If the action is to engage in dialogue, write:\nSAY: "what to say"' + "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)." + "\nEither do nothing, react, or say something but not both.\n\n" ) full_result = self._generate_reaction( observation, call_to_action_template, now=now ) result = full_result.strip().split("\n")[0] # AAA self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and reacted by {result}", self.memory.now_key: now, }, ) if "REACT:" in result: reaction = self._clean_response(result.split("REACT:")[-1]) return False, f"{self.name} {reaction}" if "SAY:" in result:
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/generative_agent.html
d8e7afffb7d0-4
if "SAY:" in result: said_value = self._clean_response(result.split("SAY:")[-1]) return True, f"{self.name} said {said_value}" else: return False, result [docs] def generate_dialogue_response( self, observation: str, now: Optional[datetime] = None ) -> Tuple[bool, str]: """React to a given observation.""" call_to_action_template = ( "What would {agent_name} say? To end the conversation, write:" ' GOODBYE: "what to say". Otherwise to continue the conversation,' ' write: SAY: "what to say next"\n\n' ) full_result = self._generate_reaction( observation, call_to_action_template, now=now ) result = full_result.strip().split("\n")[0] if "GOODBYE:" in result: farewell = self._clean_response(result.split("GOODBYE:")[-1]) self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and said {farewell}", self.memory.now_key: now, }, ) return False, f"{self.name} said {farewell}" if "SAY:" in result: response_text = self._clean_response(result.split("SAY:")[-1]) self.memory.save_context( {}, { self.memory.add_memory_key: f"{self.name} observed " f"{observation} and said {response_text}", self.memory.now_key: now, }, ) return True, f"{self.name} said {response_text}"
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/generative_agent.html
d8e7afffb7d0-5
) return True, f"{self.name} said {response_text}" else: return False, result ###################################################### # Agent stateful' summary methods. # # Each dialog or response prompt includes a header # # summarizing the agent's self-description. This is # # updated periodically through probing its memories # ###################################################### def _compute_agent_summary(self) -> str: """""" prompt = PromptTemplate.from_template( "How would you summarize {name}'s core characteristics given the" + " following statements:\n" + "{relevant_memories}" + "Do not embellish." + "\n\nSummary: " ) # The agent seeks to think about their core characteristics. return ( self.chain(prompt) .run(name=self.name, queries=[f"{self.name}'s core characteristics"]) .strip() ) [docs] def get_summary( self, force_refresh: bool = False, now: Optional[datetime] = None ) -> str: """Return a descriptive summary of the agent.""" current_time = datetime.now() if now is None else now since_refresh = (current_time - self.last_refreshed).seconds if ( not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh ): self.summary = self._compute_agent_summary() self.last_refreshed = current_time age = self.age if self.age is not None else "N/A" return ( f"Name: {self.name} (age: {age})" + f"\nInnate traits: {self.traits}"
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/generative_agent.html
d8e7afffb7d0-6
+ f"\nInnate traits: {self.traits}" + f"\n{self.summary}" ) [docs] def get_full_header( self, force_refresh: bool = False, now: Optional[datetime] = None ) -> str: """Return a full header of the agent's status, summary, and current time.""" now = datetime.now() if now is None else now summary = self.get_summary(force_refresh=force_refresh, now=now) current_time_str = now.strftime("%B %d, %Y, %I:%M %p") return ( f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}" )
https://api.python.langchain.com/en/stable/_modules/langchain/experimental/generative_agents/generative_agent.html
c0953299026d-0
Source code for langchain.vectorstores.typesense """Wrapper around Typesense vector search""" from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from typesense.client import Client from typesense.collection import Collection [docs]class Typesense(VectorStore): """Wrapper around Typesense vector search. To use, you should have the ``typesense`` python package installed. Example: .. code-block:: python from langchain.embedding.openai import OpenAIEmbeddings from langchain.vectorstores import Typesense import typesense node = { "host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net "port": "8108", # For Typesense Cloud use 443 "protocol": "http" # For Typesense Cloud use https } typesense_client = typesense.Client( { "nodes": [node], "api_key": "<API_KEY>", "connection_timeout_seconds": 2 } ) typesense_collection_name = "langchain-memory" embedding = OpenAIEmbeddings() vectorstore = Typesense( typesense_client=typesense_client, embedding=embedding, typesense_collection_name=typesense_collection_name, text_key="text", ) """ def __init__( self, typesense_client: Client, embedding: Embeddings, *,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/typesense.html
c0953299026d-1
typesense_client: Client, embedding: Embeddings, *, typesense_collection_name: Optional[str] = None, text_key: str = "text", ): """Initialize with Typesense client.""" try: from typesense import Client except ImportError: raise ValueError( "Could not import typesense python package. " "Please install it with `pip install typesense`." ) if not isinstance(typesense_client, Client): raise ValueError( f"typesense_client should be an instance of typesense.Client, " f"got {type(typesense_client)}" ) self._typesense_client = typesense_client self._embedding = embedding self._typesense_collection_name = ( typesense_collection_name or f"langchain-{str(uuid.uuid4())}" ) self._text_key = text_key @property def _collection(self) -> Collection: return self._typesense_client.collections[self._typesense_collection_name] def _prep_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]], ) -> List[dict]: """Embed and create the documents""" _ids = ids or (str(uuid.uuid4()) for _ in texts) _metadatas: Iterable[dict] = metadatas or ({} for _ in texts) embedded_texts = self._embedding.embed_documents(list(texts)) return [ {"id": _id, "vec": vec, f"{self._text_key}": text, "metadata": metadata}
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/typesense.html
c0953299026d-2
for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas) ] def _create_collection(self, num_dim: int) -> None: fields = [ {"name": "vec", "type": "float[]", "num_dim": num_dim}, {"name": f"{self._text_key}", "type": "string"}, {"name": ".*", "type": "auto"}, ] self._typesense_client.collections.create( {"name": self._typesense_collection_name, "fields": fields} ) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids from adding the texts into the vectorstore. """ from typesense.exceptions import ObjectNotFound docs = self._prep_texts(texts, metadatas, ids) try: self._collection.documents.import_(docs, {"action": "upsert"}) except ObjectNotFound: # Create the collection if it doesn't already exist self._create_collection(len(docs[0]["vec"])) self._collection.documents.import_(docs, {"action": "upsert"}) return [doc["id"] for doc in docs]
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/typesense.html
c0953299026d-3
return [doc["id"] for doc in docs] [docs] def similarity_search_with_score( self, query: str, k: int = 10, filter: Optional[str] = "", ) -> List[Tuple[Document, float]]: """Return typesense documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ embedded_query = [str(x) for x in self._embedding.embed_query(query)] query_obj = { "q": "*", "vector_query": f'vec:([{",".join(embedded_query)}], k:{k})', "filter_by": filter, "collection": self._typesense_collection_name, } docs = [] response = self._typesense_client.multi_search.perform( {"searches": [query_obj]}, {} ) for hit in response["results"][0]["hits"]: document = hit["document"] metadata = document["metadata"] text = document[self._text_key] score = hit["vector_distance"] docs.append((Document(page_content=text, metadata=metadata), score)) return docs [docs] def similarity_search( self, query: str, k: int = 10, filter: Optional[str] = "", **kwargs: Any, ) -> List[Document]: """Return typesense documents most similar to query.
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/typesense.html
c0953299026d-4
) -> List[Document]: """Return typesense documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter) return [doc for doc, _ in docs_and_score] [docs] @classmethod def from_client_params( cls, embedding: Embeddings, *, host: str = "localhost", port: Union[str, int] = "8108", protocol: str = "http", typesense_api_key: Optional[str] = None, connection_timeout_seconds: int = 2, **kwargs: Any, ) -> Typesense: """Initialize Typesense directly from client parameters. Example: .. code-block:: python from langchain.embedding.openai import OpenAIEmbeddings from langchain.vectorstores import Typesense # Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY". vectorstore = Typesense( OpenAIEmbeddings(), host="localhost", port="8108", protocol="http", typesense_collection_name="langchain-memory", ) """ try: from typesense import Client except ImportError: raise ValueError( "Could not import typesense python package. " "Please install it with `pip install typesense`." )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/typesense.html
c0953299026d-5
"Please install it with `pip install typesense`." ) node = { "host": host, "port": str(port), "protocol": protocol, } typesense_api_key = typesense_api_key or get_from_env( "typesense_api_key", "TYPESENSE_API_KEY" ) client_config = { "nodes": [node], "api_key": typesense_api_key, "connection_timeout_seconds": connection_timeout_seconds, } return cls(Client(client_config), embedding, **kwargs) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, typesense_client: Optional[Client] = None, typesense_client_params: Optional[dict] = None, typesense_collection_name: Optional[str] = None, text_key: str = "text", **kwargs: Any, ) -> Typesense: """Construct Typesense wrapper from raw text.""" if typesense_client: vectorstore = cls(typesense_client, embedding, **kwargs) elif typesense_client_params: vectorstore = cls.from_client_params( embedding, **typesense_client_params, **kwargs ) else: raise ValueError( "Must specify one of typesense_client or typesense_client_params." ) vectorstore.add_texts(texts, metadatas=metadatas, ids=ids) return vectorstore
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/typesense.html
ee538ed776f5-0
Source code for langchain.vectorstores.supabase from __future__ import annotations import uuid from itertools import repeat from typing import ( TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type, Union, ) import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import supabase [docs]class SupabaseVectorStore(VectorStore): """VectorStore for a Supabase postgres database. Assumes you have the `pgvector` extension installed and a `match_documents` (or similar) function. For more details: https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase You can implement your own `match_documents` function in order to limit the search space to a subset of documents based on your own authorization or business logic. Note that the Supabase Python client does not yet support async operations. If you'd like to use `max_marginal_relevance_search`, please review the instructions below on modifying the `match_documents` function to return matched embeddings. """ _client: supabase.client.Client # This is the embedding function. Don't confuse with the embedding vectors. # We should perhaps rename the underlying Embedding base class to EmbeddingFunction # or something _embedding: Embeddings table_name: str query_name: str def __init__( self, client: supabase.client.Client, embedding: Embeddings, table_name: str,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ee538ed776f5-1
embedding: Embeddings, table_name: str, query_name: Union[str, None] = None, ) -> None: """Initialize with supabase client.""" try: import supabase # noqa: F401 except ImportError: raise ValueError( "Could not import supabase python package. " "Please install it with `pip install supabase`." ) self._client = client self._embedding: Embeddings = embedding self.table_name = table_name or "documents" self.query_name = query_name or "match_documents" [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict[Any, Any]]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: ids = ids or [str(uuid.uuid4()) for _ in texts] docs = self._texts_to_documents(texts, metadatas) vectors = self._embedding.embed_documents(list(texts)) return self.add_vectors(vectors, docs, ids) [docs] @classmethod def from_texts( cls: Type["SupabaseVectorStore"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[supabase.client.Client] = None, table_name: Optional[str] = "documents", query_name: Union[str, None] = "match_documents", ids: Optional[List[str]] = None, **kwargs: Any, ) -> "SupabaseVectorStore": """Return VectorStore initialized from texts and embeddings."""
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ee538ed776f5-2
"""Return VectorStore initialized from texts and embeddings.""" if not client: raise ValueError("Supabase client is required.") if not table_name: raise ValueError("Supabase document table_name is required.") embeddings = embedding.embed_documents(texts) ids = [str(uuid.uuid4()) for _ in texts] docs = cls._texts_to_documents(texts, metadatas) _ids = cls._add_vectors(client, table_name, embeddings, docs, ids) return cls( client=client, embedding=embedding, table_name=table_name, query_name=query_name, ) [docs] def add_vectors( self, vectors: List[List[float]], documents: List[Document], ids: List[str], ) -> List[str]: return self._add_vectors(self._client, self.table_name, vectors, documents, ids) [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: vectors = self._embedding.embed_documents([query]) return self.similarity_search_by_vector(vectors[0], k) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: result = self.similarity_search_by_vector_with_relevance_scores(embedding, k) documents = [doc for doc, _ in result] return documents [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ee538ed776f5-3
) -> List[Tuple[Document, float]]: vectors = self._embedding.embed_documents([query]) return self.similarity_search_by_vector_with_relevance_scores(vectors[0], k) [docs] def similarity_search_by_vector_with_relevance_scores( self, query: List[float], k: int ) -> List[Tuple[Document, float]]: match_documents_params = dict(query_embedding=query, match_count=k) res = self._client.rpc(self.query_name, match_documents_params).execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), ) for search in res.data if search.get("content") ] return match_result [docs] def similarity_search_by_vector_returning_embeddings( self, query: List[float], k: int ) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]: match_documents_params = dict(query_embedding=query, match_count=k) res = self._client.rpc(self.query_name, match_documents_params).execute() match_result = [ ( Document( metadata=search.get("metadata", {}), # type: ignore page_content=search.get("content", ""), ), search.get("similarity", 0.0), # Supabase returns a vector type as its string represation (!). # This is a hack to convert the string to numpy array. np.fromstring( search.get("embedding", "").strip("[]"), np.float32, sep="," ), )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ee538ed776f5-4
), ) for search in res.data if search.get("content") ] return match_result @staticmethod def _texts_to_documents( texts: Iterable[str], metadatas: Optional[Iterable[dict[Any, Any]]] = None, ) -> List[Document]: """Return list of Documents from list of texts and metadatas.""" if metadatas is None: metadatas = repeat({}) docs = [ Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas) ] return docs @staticmethod def _add_vectors( client: supabase.client.Client, table_name: str, vectors: List[List[float]], documents: List[Document], ids: List[str], ) -> List[str]: """Add vectors to Supabase table.""" rows: List[dict[str, Any]] = [ { "id": ids[idx], "content": documents[idx].page_content, "embedding": embedding, "metadata": documents[idx].metadata, # type: ignore } for idx, embedding in enumerate(vectors) ] # According to the SupabaseVectorStore JS implementation, the best chunk size # is 500 chunk_size = 500 id_list: List[str] = [] for i in range(0, len(rows), chunk_size): chunk = rows[i : i + chunk_size] result = client.from_(table_name).upsert(chunk).execute() # type: ignore if len(result.data) == 0:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ee538ed776f5-5
if len(result.data) == 0: raise Exception("Error inserting: No rows added") # VectorStore.add_vectors returns ids as strings ids = [str(i.get("id")) for i in result.data if i.get("id")] id_list.extend(ids) return id_list [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ result = self.similarity_search_by_vector_returning_embeddings( embedding, fetch_k ) matched_documents = [doc_tuple[0] for doc_tuple in result] matched_embeddings = [doc_tuple[2] for doc_tuple in result] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), matched_embeddings, k=k, lambda_mult=lambda_mult, )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ee538ed776f5-6
matched_embeddings, k=k, lambda_mult=lambda_mult, ) filtered_documents = [matched_documents[i] for i in mmr_selected] return filtered_documents [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. `max_marginal_relevance_search` requires that `query_name` returns matched embeddings alongside the match documents. The following function demonstrates how to do this: ```sql CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536), match_count int) RETURNS TABLE( id bigint, content text, metadata jsonb, embedding vector(1536), similarity float) LANGUAGE plpgsql AS $$ # variable_conflict use_column BEGIN RETURN query SELECT id, content, metadata, embedding,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
ee538ed776f5-7
SELECT id, content, metadata, embedding, 1 -(docstore.embedding <=> query_embedding) AS similarity FROM docstore ORDER BY docstore.embedding <=> query_embedding LIMIT match_count; END; $$; ``` """ embedding = self._embedding.embed_documents([query]) docs = self.max_marginal_relevance_search_by_vector( embedding[0], k, fetch_k, lambda_mult=lambda_mult ) return docs [docs] def delete(self, ids: List[str]) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ rows: List[dict[str, Any]] = [ { "id": id, } for id in ids ] # TODO: Check if this can be done in bulk for row in rows: self._client.from_(self.table_name).delete().eq("id", row["id"]).execute()
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/supabase.html
499e023589c9-0
Source code for langchain.vectorstores.cassandra """Wrapper around Cassandra vector-store capabilities, based on cassIO.""" from __future__ import annotations import hashlib import typing from typing import Any, Iterable, List, Optional, Tuple, Type, TypeVar import numpy as np if typing.TYPE_CHECKING: from cassandra.cluster import Session from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance CVST = TypeVar("CVST", bound="Cassandra") # a positive number of seconds to expire entries, or None for no expiration. CASSANDRA_VECTORSTORE_DEFAULT_TTL_SECONDS = None def _hash(_input: str) -> str: """Use a deterministic hashing approach.""" return hashlib.md5(_input.encode()).hexdigest() [docs]class Cassandra(VectorStore): """Wrapper around Cassandra embeddings platform. There is no notion of a default table name, since each embedding function implies its own vector dimension, which is part of the schema. Example: .. code-block:: python from langchain.vectorstores import Cassandra from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() session = ... keyspace = 'my_keyspace' vectorstore = Cassandra(embeddings, session, keyspace, 'my_doc_archive') """ _embedding_dimension: int | None def _getEmbeddingDimension(self) -> int: if self._embedding_dimension is None: self._embedding_dimension = len( self.embedding.embed_query("This is a sample sentence.") ) return self._embedding_dimension def __init__( self,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
499e023589c9-1
) return self._embedding_dimension def __init__( self, embedding: Embeddings, session: Session, keyspace: str, table_name: str, ttl_seconds: int | None = CASSANDRA_VECTORSTORE_DEFAULT_TTL_SECONDS, ) -> None: try: from cassio.vector import VectorTable except (ImportError, ModuleNotFoundError): raise ImportError( "Could not import cassio python package. " "Please install it with `pip install cassio`." ) """Create a vector table.""" self.embedding = embedding self.session = session self.keyspace = keyspace self.table_name = table_name self.ttl_seconds = ttl_seconds # self._embedding_dimension = None # self.table = VectorTable( session=session, keyspace=keyspace, table=table_name, embedding_dimension=self._getEmbeddingDimension(), auto_id=False, # the `add_texts` contract admits user-provided ids ) [docs] def delete_collection(self) -> None: """ Just an alias for `clear` (to better align with other VectorStore implementations). """ self.clear() [docs] def clear(self) -> None: """Empty the collection.""" self.table.clear() [docs] def delete_by_document_id(self, document_id: str) -> None: return self.table.delete(document_id) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
499e023589c9-2
ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts. """ _texts = list(texts) # lest it be a generator or something if ids is None: # unless otherwise specified, we have deterministic IDs: # re-inserting an existing document will not create a duplicate. # (and effectively update the metadata) ids = [_hash(text) for text in _texts] if metadatas is None: metadatas = [{} for _ in _texts] # ttl_seconds = kwargs.get("ttl_seconds", self.ttl_seconds) # embedding_vectors = self.embedding.embed_documents(_texts) for text, embedding_vector, text_id, metadata in zip( _texts, embedding_vectors, ids, metadatas ): self.table.put( document=text, embedding_vector=embedding_vector, document_id=text_id, metadata=metadata, ttl_seconds=ttl_seconds, ) # return ids # id-returning search facilities [docs] def similarity_search_with_score_id_by_vector( self, embedding: List[float], k: int = 4, ) -> List[Tuple[Document, float, str]]: """Return docs most similar to embedding vector.
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
499e023589c9-3
"""Return docs most similar to embedding vector. No support for `filter` query (on metadata) along with vector search. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. Returns: List of (Document, score, id), the most similar to the query vector. """ hits = self.table.search( embedding_vector=embedding, top_k=k, metric="cos", metric_threshold=None, ) # We stick to 'cos' distance as it can be normalized on a 0-1 axis # (1=most relevant), as required by this class' contract. return [ ( Document( page_content=hit["document"], metadata=hit["metadata"], ), 0.5 + 0.5 * hit["distance"], hit["document_id"], ) for hit in hits ] [docs] def similarity_search_with_score_id( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float, str]]: embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_id_by_vector( embedding=embedding_vector, k=k, ) # id-unaware search facilities [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector.
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
499e023589c9-4
"""Return docs most similar to embedding vector. No support for `filter` query (on metadata) along with vector search. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. Returns: List of (Document, score), the most similar to the query vector. """ return [ (doc, score) for (doc, score, docId) in self.similarity_search_with_score_id_by_vector( embedding=embedding, k=k, ) ] [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: # embedding_vector = self.embedding.embed_query(query) return self.similarity_search_by_vector( embedding_vector, k, **kwargs, ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any, ) -> List[Document]: return [ doc for doc, _ in self.similarity_search_with_score_by_vector( embedding, k, ) ] [docs] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_by_vector( embedding_vector, k, )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
499e023589c9-5
embedding_vector, k, ) # Even though this is a `_`-method, # it is apparently used by VectorSearch parent class # in an exposed method (`similarity_search_with_relevance_scores`). # So we implement it (hmm). def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: return self.similarity_search_with_score( query, k, **kwargs, ) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Returns: List of Documents selected by maximal marginal relevance. """ prefetchHits = self.table.search( embedding_vector=embedding, top_k=fetch_k, metric="cos", metric_threshold=None, )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
499e023589c9-6
metric="cos", metric_threshold=None, ) # let the mmr utility pick the *indices* in the above array mmrChosenIndices = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), [pfHit["embedding_vector"] for pfHit in prefetchHits], k=k, lambda_mult=lambda_mult, ) mmrHits = [ pfHit for pfIndex, pfHit in enumerate(prefetchHits) if pfIndex in mmrChosenIndices ] return [ Document( page_content=hit["document"], metadata=hit["metadata"], ) for hit in mmrHits ] [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Optional. Returns: List of Documents selected by maximal marginal relevance. """ embedding_vector = self.embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector(
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
499e023589c9-7
return self.max_marginal_relevance_search_by_vector( embedding_vector, k, fetch_k, lambda_mult=lambda_mult, ) [docs] @classmethod def from_texts( cls: Type[CVST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> CVST: """Create a Cassandra vectorstore from raw texts. No support for specifying text IDs Returns: a Cassandra vectorstore. """ session: Session = kwargs["session"] keyspace: str = kwargs["keyspace"] table_name: str = kwargs["table_name"] cassandraStore = cls( embedding=embedding, session=session, keyspace=keyspace, table_name=table_name, ) cassandraStore.add_texts(texts=texts, metadatas=metadatas) return cassandraStore [docs] @classmethod def from_documents( cls: Type[CVST], documents: List[Document], embedding: Embeddings, **kwargs: Any, ) -> CVST: """Create a Cassandra vectorstore from a document list. No support for specifying text IDs Returns: a Cassandra vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] session: Session = kwargs["session"] keyspace: str = kwargs["keyspace"] table_name: str = kwargs["table_name"] return cls.from_texts( texts=texts,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
499e023589c9-8
return cls.from_texts( texts=texts, metadatas=metadatas, embedding=embedding, session=session, keyspace=keyspace, table_name=table_name, )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/cassandra.html
f93d527147eb-0
Source code for langchain.vectorstores.alibabacloud_opensearch import json import logging import numbers from hashlib import sha1 from typing import Any, Dict, Iterable, List, Optional, Tuple from langchain.embeddings.base import Embeddings from langchain.schema import Document from langchain.vectorstores.base import VectorStore logger = logging.getLogger() [docs]class AlibabaCloudOpenSearchSettings: """Opensearch Client Configuration Attribute: endpoint (str) : The endpoint of opensearch instance, You can find it from the console of Alibaba Cloud OpenSearch. instance_id (str) : The identify of opensearch instance, You can find it from the console of Alibaba Cloud OpenSearch. datasource_name (str): The name of the data source specified when creating it. username (str) : The username specified when purchasing the instance. password (str) : The password specified when purchasing the instance. embedding_index_name (str) : The name of the vector attribute specified when configuring the instance attributes. field_name_mapping (Dict) : Using field name mapping between opensearch vector store and opensearch instance configuration table field names: { 'id': 'The id field name map of index document.', 'document': 'The text field name map of index document.', 'embedding': 'In the embedding field of the opensearch instance, the values must be in float16 multivalue type and separated by commas.', 'metadata_field_x': 'Metadata field mapping includes the mapped field name and operator in the mapping value, separated by a comma between the mapped field name and the operator.', } """ endpoint: str instance_id: str username: str password: str
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
f93d527147eb-1
instance_id: str username: str password: str datasource_name: str embedding_index_name: str field_name_mapping: Dict[str, str] = { "id": "id", "document": "document", "embedding": "embedding", "metadata_field_x": "metadata_field_x,operator", } def __init__( self, endpoint: str, instance_id: str, username: str, password: str, datasource_name: str, embedding_index_name: str, field_name_mapping: Dict[str, str], ) -> None: self.endpoint = endpoint self.instance_id = instance_id self.username = username self.password = password self.datasource_name = datasource_name self.embedding_index_name = embedding_index_name self.field_name_mapping = field_name_mapping def __getitem__(self, item: str) -> Any: return getattr(self, item) def create_metadata(fields: Dict[str, Any]) -> Dict[str, Any]: """Create metadata from fields. Args: fields: The fields of the document. The fields must be a dict. Returns: metadata: The metadata of the document. The metadata must be a dict. """ metadata: Dict[str, Any] = {} for key, value in fields.items(): if key == "id" or key == "document" or key == "embedding": continue metadata[key] = value return metadata [docs]class AlibabaCloudOpenSearch(VectorStore): """Alibaba Cloud OpenSearch Vector Store""" def __init__( self, embedding: Embeddings,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
f93d527147eb-2
def __init__( self, embedding: Embeddings, config: AlibabaCloudOpenSearchSettings, **kwargs: Any, ) -> None: try: from alibabacloud_ha3engine import client, models from alibabacloud_tea_util import models as util_models except ImportError: raise ValueError( "Could not import alibaba cloud opensearch python package. " "Please install it with `pip install alibabacloud-ha3engine`." ) self.config = config self.embedding = embedding self.runtime = util_models.RuntimeOptions( connect_timeout=5000, read_timeout=10000, autoretry=False, ignore_ssl=False, max_idle_conns=50, ) self.ha3EngineClient = client.Client( models.Config( endpoint=config.endpoint, instance_id=config.instance_id, protocol="http", access_user_name=config.username, access_pass_word=config.password, ) ) self.options_headers: Dict[str, str] = {} [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: def _upsert(push_doc_list: List[Dict]) -> List[str]: if push_doc_list is None or len(push_doc_list) == 0: return [] try: push_request = models.PushDocumentsRequestModel( self.options_headers, push_doc_list ) push_response = self.ha3EngineClient.push_documents( self.config.datasource_name, field_name_map["id"], push_request
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
f93d527147eb-3
self.config.datasource_name, field_name_map["id"], push_request ) json_response = json.loads(push_response.body) if json_response["status"] == "OK": return [ push_doc["fields"][field_name_map["id"]] for push_doc in push_doc_list ] return [] except Exception as e: logger.error( f"add doc to endpoint:{self.config.endpoint} " f"instance_id:{self.config.instance_id} failed.", e, ) raise e from alibabacloud_ha3engine import models ids = [sha1(t.encode("utf-8")).hexdigest() for t in texts] embeddings = self.embedding.embed_documents(list(texts)) metadatas = metadatas or [{} for _ in texts] field_name_map = self.config.field_name_mapping add_doc_list = [] text_list = list(texts) for idx, doc_id in enumerate(ids): embedding = embeddings[idx] if idx < len(embeddings) else None metadata = metadatas[idx] if idx < len(metadatas) else None text = text_list[idx] if idx < len(text_list) else None add_doc: Dict[str, Any] = dict() add_doc_fields: Dict[str, Any] = dict() add_doc_fields.__setitem__(field_name_map["id"], doc_id) add_doc_fields.__setitem__(field_name_map["document"], text) if embedding is not None: add_doc_fields.__setitem__( field_name_map["embedding"], ",".join(str(unit) for unit in embedding), ) if metadata is not None:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
f93d527147eb-4
) if metadata is not None: for md_key, md_value in metadata.items(): add_doc_fields.__setitem__( field_name_map[md_key].split(",")[0], md_value ) add_doc.__setitem__("fields", add_doc_fields) add_doc.__setitem__("cmd", "add") add_doc_list.append(add_doc) return _upsert(add_doc_list) [docs] def similarity_search( self, query: str, k: int = 4, search_filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: embedding = self.embedding.embed_query(query) return self.create_results( self.inner_embedding_query( embedding=embedding, search_filter=search_filter, k=k ) ) [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, search_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: embedding: List[float] = self.embedding.embed_query(query) return self.create_results_with_score( self.inner_embedding_query( embedding=embedding, search_filter=search_filter, k=k ) ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: return self.create_results( self.inner_embedding_query( embedding=embedding, search_filter=search_filter, k=k )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
f93d527147eb-5
embedding=embedding, search_filter=search_filter, k=k ) ) [docs] def inner_embedding_query( self, embedding: List[float], search_filter: Optional[Dict[str, Any]] = None, k: int = 4, ) -> Dict[str, Any]: def generate_embedding_query() -> str: tmp_search_config_str = ( f"config=start:0,hit:{k},format:json&&cluster=general&&kvpairs=" f"first_formula:proxima_score({self.config.embedding_index_name})&&sort=+RANK" ) tmp_query_str = ( f"&&query={self.config.embedding_index_name}:" + "'" + ",".join(str(x) for x in embedding) + "'" ) if search_filter is not None: filter_clause = "&&filter=" + " AND ".join( [ create_filter(md_key, md_value) for md_key, md_value in search_filter.items() ] ) tmp_query_str += filter_clause return tmp_search_config_str + tmp_query_str def create_filter(md_key: str, md_value: Any) -> str: md_filter_expr = self.config.field_name_mapping[md_key] if md_filter_expr is None: return "" expr = md_filter_expr.split(",") if len(expr) != 2: logger.error( f"filter {md_filter_expr} express is not correct, " f"must contain mapping field and operator." ) return "" md_filter_key = expr[0].strip() md_filter_operator = expr[1].strip()
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
f93d527147eb-6
md_filter_operator = expr[1].strip() if isinstance(md_value, numbers.Number): return f"{md_filter_key} {md_filter_operator} {md_value}" return f'{md_filter_key}{md_filter_operator}"{md_value}"' def search_data(single_query_str: str) -> Dict[str, Any]: search_query = models.SearchQuery(query=single_query_str) search_request = models.SearchRequestModel( self.options_headers, search_query ) return json.loads(self.ha3EngineClient.search(search_request).body) from alibabacloud_ha3engine import models try: query_str = generate_embedding_query() json_response = search_data(query_str) if len(json_response["errors"]) != 0: logger.error( f"query {self.config.endpoint} {self.config.instance_id} " f"errors:{json_response['errors']} failed." ) else: return json_response except Exception as e: logger.error( f"query instance endpoint:{self.config.endpoint} " f"instance_id:{self.config.instance_id} failed.", e, ) return {} [docs] def create_results(self, json_result: Dict[str, Any]) -> List[Document]: items = json_result["result"]["items"] query_result_list: List[Document] = [] for item in items: fields = item["fields"] query_result_list.append( Document( page_content=fields[self.config.field_name_mapping["document"]], metadata=create_metadata(fields), ) ) return query_result_list [docs] def create_results_with_score( self, json_result: Dict[str, Any]
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
f93d527147eb-7
self, json_result: Dict[str, Any] ) -> List[Tuple[Document, float]]: items = json_result["result"]["items"] query_result_list: List[Tuple[Document, float]] = [] for item in items: fields = item["fields"] query_result_list.append( ( Document( page_content=fields[self.config.field_name_mapping["document"]], metadata=create_metadata(fields), ), float(item["sortExprValues"][0]), ) ) return query_result_list [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, config: Optional[AlibabaCloudOpenSearchSettings] = None, **kwargs: Any, ) -> "AlibabaCloudOpenSearch": if config is None: raise Exception("config can't be none") ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts=texts, metadatas=metadatas) return ctx [docs] @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings, ids: Optional[List[str]] = None, config: Optional[AlibabaCloudOpenSearchSettings] = None, **kwargs: Any, ) -> "AlibabaCloudOpenSearch": if config is None: raise Exception("config can't be none") texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts( texts=texts,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
f93d527147eb-8
return cls.from_texts( texts=texts, embedding=embedding, metadatas=metadatas, config=config, **kwargs, )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/alibabacloud_opensearch.html
5bfef6dfaf0e-0
Source code for langchain.vectorstores.starrocks """Wrapper around open source StarRocks VectorSearch capability.""" from __future__ import annotations import json import logging from hashlib import sha1 from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple from pydantic import BaseSettings from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger() DEBUG = False def has_mul_sub_str(s: str, *args: Any) -> bool: """ Check if a string has multiple substrings. Args: s: The string to check *args: The substrings to check for in the string Returns: bool: True if all substrings are present in the string, False otherwise """ for a in args: if a not in s: return False return True def debug_output(s: Any) -> None: """ Print a debug message if DEBUG is True. Args: s: The message to print """ if DEBUG: print(s) def get_named_result(connection: Any, query: str) -> List[dict[str, Any]]: """ Get a named result from a query. Args: connection: The connection to the database query: The query to execute Returns: List[dict[str, Any]]: The result of the query """ cursor = connection.cursor() cursor.execute(query) columns = cursor.description result = [] for value in cursor.fetchall(): r = {} for idx, datum in enumerate(value): k = columns[idx][0]
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-1
for idx, datum in enumerate(value): k = columns[idx][0] r[k] = datum result.append(r) debug_output(result) cursor.close() return result class StarRocksSettings(BaseSettings): """StarRocks Client Configuration Attribute: StarRocks_host (str) : An URL to connect to MyScale backend. Defaults to 'localhost'. StarRocks_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Username to login. Defaults to None. password (str) : Password to login. Defaults to None. database (str) : Database name to find the table. Defaults to 'default'. table (str) : Table name to operate on. Defaults to 'vector_table'. column_map (Dict) : Column type map to project column name onto langchain semantics. Must have keys: `text`, `id`, `vector`, must be same size to number of columns. For example: .. code-block:: python { 'id': 'text_id', 'embedding': 'text_embedding', 'document': 'text_plain', 'metadata': 'metadata_dictionary_in_json', } Defaults to identity map. """ host: str = "localhost" port: int = 9030 username: str = "root" password: str = "" column_map: Dict[str, str] = { "id": "id", "document": "document", "embedding": "embedding", "metadata": "metadata", } database: str = "default" table: str = "langchain"
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-2
database: str = "default" table: str = "langchain" def __getitem__(self, item: str) -> Any: return getattr(self, item) class Config: env_file = ".env" env_prefix = "starrocks_" env_file_encoding = "utf-8" [docs]class StarRocks(VectorStore): """Wrapper around StarRocks vector database You need a `pymysql` python package, and a valid account to connect to StarRocks. Right now StarRocks has only implemented `cosine_similarity` function to compute distance between two vectors. And there is no vector inside right now, so we have to iterate all vectors and compute spatial distance. For more information, please visit [StarRocks official site](https://www.starrocks.io/) [StarRocks github](https://github.com/StarRocks/starrocks) """ def __init__( self, embedding: Embeddings, config: Optional[StarRocksSettings] = None, **kwargs: Any, ) -> None: """StarRocks Wrapper to LangChain embedding_function (Embeddings): config (StarRocksSettings): Configuration to StarRocks Client """ try: import pymysql # type: ignore[import] except ImportError: raise ImportError( "Could not import pymysql python package. " "Please install it with `pip install pymysql`." ) try: from tqdm import tqdm self.pgbar = tqdm except ImportError: # Just in case if tqdm is not installed self.pgbar = lambda x, **kwargs: x
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-3
self.pgbar = lambda x, **kwargs: x super().__init__() if config is not None: self.config = config else: self.config = StarRocksSettings() assert self.config assert self.config.host and self.config.port assert self.config.column_map and self.config.database and self.config.table for k in ["id", "embedding", "document", "metadata"]: assert k in self.config.column_map # initialize the schema dim = len(embedding.embed_query("test")) self.schema = f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} string, {self.config.column_map['document']} string, {self.config.column_map['embedding']} array<float>, {self.config.column_map['metadata']} string ) ENGINE = OLAP PRIMARY KEY(id) DISTRIBUTED BY HASH(id) \ PROPERTIES ("replication_num" = "1")\ """ self.dim = dim self.BS = "\\" self.must_escape = ("\\", "'") self.embedding_function = embedding self.dist_order = "DESC" debug_output(self.config) # Create a connection to StarRocks self.connection = pymysql.connect( host=self.config.host, port=self.config.port, user=self.config.username, password=self.config.password, database=self.config.database, **kwargs, ) debug_output(self.schema) get_named_result(self.connection, self.schema) [docs] def escape_str(self, value: str) -> str: return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-4
def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str: ks = ",".join(column_names) embed_tuple_index = tuple(column_names).index( self.config.column_map["embedding"] ) _data = [] for n in transac: n = ",".join( [ f"'{self.escape_str(str(_n))}'" if idx != embed_tuple_index else f"array<float>{str(_n)}" for (idx, _n) in enumerate(n) ] ) _data.append(f"({n})") i_str = f""" INSERT INTO {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None: _insert_query = self._build_insert_sql(transac, column_names) debug_output(_insert_query) get_named_result(self.connection, _insert_query) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any, ) -> List[str]: """Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-5
metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore. """ # Embed and create the documents ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = { colmap_["id"]: ids, colmap_["document"]: texts, colmap_["embedding"]: self.embedding_function.embed_documents(list(texts)), } metadatas = metadatas or [{} for _ in texts] column_names[colmap_["metadata"]] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar( zip(*values), desc="Inserting data...", total=len(metadatas) ): assert ( len(v[keys.index(self.config.column_map["embedding"])]) == self.dim ) transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return []
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-6
return [] [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[StarRocksSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> StarRocks: """Create StarRocks wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (StarRocksSettings, Optional): StarRocks configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to StarRocks. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Returns: StarRocks Index """ ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas) return ctx def __repr__(self) -> str: """Text representation for StarRocks Vector Store, prints backends, username and schemas. Easy to use with `str(StarRocks())` Returns: repr: string to show connection info and data schema """ _repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ " _repr += f"{self.config.host}:{self.config.port}\033[0m\n\n"
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-7
_repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n" width = 25 fields = 3 _repr += "-" * (width * fields + 1) + "\n" columns = ["name", "type", "key"] _repr += f"|\033[94m{columns[0]:24s}\033[0m|\033[96m{columns[1]:24s}" _repr += f"\033[0m|\033[96m{columns[2]:24s}\033[0m|\n" _repr += "-" * (width * fields + 1) + "\n" q_str = f"DESC {self.config.database}.{self.config.table}" debug_output(q_str) rs = get_named_result(self.connection, q_str) for r in rs: _repr += f"|\033[94m{r['Field']:24s}\033[0m|\033[96m{r['Type']:24s}" _repr += f"\033[0m|\033[96m{r['Key']:24s}\033[0m|\n" _repr += "-" * (width * fields + 1) + "\n" return _repr def _build_query_sql( self, q_emb: List[float], topk: int, where_str: Optional[str] = None ) -> str: q_emb_str = ",".join(map(str, q_emb)) if where_str: where_str = f"WHERE {where_str}" else: where_str = "" q_str = f""" SELECT {self.config.column_map['document']},
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-8
q_str = f""" SELECT {self.config.column_map['document']}, {self.config.column_map['metadata']}, cosine_similarity_norm(array<float>[{q_emb_str}], {self.config.column_map['embedding']}) as dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY dist {self.dist_order} LIMIT {topk} """ debug_output(q_str) return q_str [docs] def similarity_search( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Document]: """Perform a similarity search with StarRocks Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector( self.embedding_function.embed_query(query), k, where_str, **kwargs ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search with StarRocks by vectors Args:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-9
"""Perform a similarity search with StarRocks by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_query_sql(embedding, k, where_str) try: return [ Document( page_content=r[self.config.column_map["document"]], metadata=json.loads(r[self.config.column_map["metadata"]]), ) for r in get_named_result(self.connection, q_str) ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Perform a similarity search with StarRocks Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None.
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
5bfef6dfaf0e-10
where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_query_sql( self.embedding_function.embed_query(query), k, where_str ) try: return [ ( Document( page_content=r[self.config.column_map["document"]], metadata=json.loads(r[self.config.column_map["metadata"]]), ), r["dist"], ) for r in get_named_result(self.connection, q_str) ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] def drop(self) -> None: """ Helper function: Drop data """ get_named_result( self.connection, f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}", ) @property def metadata_column(self) -> str: return self.config.column_map["metadata"]
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/starrocks.html
f36b61fa669d-0
Source code for langchain.vectorstores.awadb """Wrapper around AwaDB for embedding vectors""" from __future__ import annotations import logging import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore # from pydantic import BaseModel, Field, root_validator if TYPE_CHECKING: import awadb logger = logging.getLogger() DEFAULT_TOPN = 4 [docs]class AwaDB(VectorStore): """Interface implemented by AwaDB vector stores.""" _DEFAULT_TABLE_NAME = "langchain_awadb" def __init__( self, table_name: str = _DEFAULT_TABLE_NAME, embedding: Optional[Embeddings] = None, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, ) -> None: """Initialize with AwaDB client.""" try: import awadb except ImportError: raise ValueError( "Could not import awadb python package. " "Please install it with `pip install awadb`." ) if client is not None: self.awadb_client = client else: if log_and_data_dir is not None: self.awadb_client = awadb.Client(log_and_data_dir) else: self.awadb_client = awadb.Client() if table_name == self._DEFAULT_TABLE_NAME: table_name += "_" table_name += str(uuid.uuid4()).split("-")[-1] self.awadb_client.Create(table_name) self.table2embeddings: dict[str, Embeddings] = {}
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/awadb.html
f36b61fa669d-1
self.table2embeddings: dict[str, Embeddings] = {} if embedding is not None: self.table2embeddings[table_name] = embedding self.using_table_name = table_name [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, is_duplicate_texts: Optional[bool] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. is_duplicate_texts: Optional whether to duplicate texts. kwargs: vectorstore specific parameters. Returns: List of ids from adding the texts into the vectorstore. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embeddings = None if self.using_table_name in self.table2embeddings: embeddings = self.table2embeddings[self.using_table_name].embed_documents( list(texts) ) return self.awadb_client.AddTexts( "embedding_text", "text_embedding", texts, embeddings, metadatas, is_duplicate_texts, ) [docs] def load_local( self, table_name: str, **kwargs: Any, ) -> bool: if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") return self.awadb_client.Load(table_name) [docs] def similarity_search( self,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/awadb.html
f36b61fa669d-2
[docs] def similarity_search( self, query: str, k: int = DEFAULT_TOPN, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) else: from awadb import llm_embedding llm = llm_embedding.LLMEmbedding() embedding = llm.Embedding(query) return self.similarity_search_by_vector(embedding, k) [docs] def similarity_search_with_score( self, query: str, k: int = DEFAULT_TOPN, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) else: from awadb import llm_embedding llm = llm_embedding.LLMEmbedding() embedding = llm.Embedding(query) results: List[Tuple[Document, float]] = [] scores: List[float] = [] retrieval_docs = self.similarity_search_by_vector(embedding, k, scores)
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/awadb.html
f36b61fa669d-3
retrieval_docs = self.similarity_search_by_vector(embedding, k, scores) L2_Norm = 0.0 for score in scores: L2_Norm = L2_Norm + score * score L2_Norm = pow(L2_Norm, 0.5) doc_no = 0 for doc in retrieval_docs: doc_tuple = (doc, 1 - (scores[doc_no] / L2_Norm)) results.append(doc_tuple) doc_no = doc_no + 1 return results [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = DEFAULT_TOPN, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) show_results = self.awadb_client.Search(embedding, k) results: List[Tuple[Document, float]] = [] if show_results.__len__() == 0: return results scores: List[float] = [] retrieval_docs = self.similarity_search_by_vector(embedding, k, scores) L2_Norm = 0.0 for score in scores: L2_Norm = L2_Norm + score * score
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/awadb.html
f36b61fa669d-4
L2_Norm = L2_Norm + score * score L2_Norm = pow(L2_Norm, 0.5) doc_no = 0 for doc in retrieval_docs: doc_tuple = (doc, 1 - scores[doc_no] / L2_Norm) results.append(doc_tuple) doc_no = doc_no + 1 return results [docs] def similarity_search_by_vector( self, embedding: Optional[List[float]] = None, k: int = DEFAULT_TOPN, scores: Optional[list] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") results: List[Document] = [] if embedding is None: return results show_results = self.awadb_client.Search(embedding, k) if show_results.__len__() == 0: return results for item_detail in show_results[0]["ResultItems"]: content = "" meta_data = {} for item_key in item_detail: if ( item_key == "Field@0" and self.using_table_name in self.table2embeddings ): # text for the document content = item_detail[item_key] elif item_key == "embedding_text": content = item_detail[item_key] elif (
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/awadb.html
f36b61fa669d-5
content = item_detail[item_key] elif ( item_key == "Field@1" or item_key == "text_embedding" ): # embedding field for the document continue elif item_key == "score": # L2 distance if scores is not None: score = item_detail[item_key] scores.append(score) else: meta_data[item_key] = item_detail[item_key] results.append(Document(page_content=content, metadata=meta_data)) return results [docs] def create_table( self, table_name: str, **kwargs: Any, ) -> bool: """Create a new table.""" if self.awadb_client is None: return False ret = self.awadb_client.Create(table_name) if ret: self.using_table_name = table_name return ret [docs] def use( self, table_name: str, **kwargs: Any, ) -> bool: """Use the specified table. Don't know the tables, please invoke list_tables.""" if self.awadb_client is None: return False ret = self.awadb_client.Use(table_name) if ret: self.using_table_name = table_name return ret [docs] def list_tables( self, **kwargs: Any, ) -> List[str]: """List all the tables created by the client.""" if self.awadb_client is None: return [] return self.awadb_client.ListAllTables() [docs] def get_current_table( self, **kwargs: Any, ) -> str: """Get the current table."""
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/awadb.html
f36b61fa669d-6
) -> str: """Get the current table.""" return self.using_table_name [docs] @classmethod def from_texts( cls: Type[AwaDB], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, table_name: str = _DEFAULT_TABLE_NAME, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> AwaDB: """Create an AwaDB vectorstore from a raw documents. Args: texts (List[str]): List of texts to add to the table. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. table_name (str): Name of the table to create. log_and_data_dir (Optional[str]): Directory of logging and persistence. client (Optional[awadb.Client]): AwaDB client Returns: AwaDB: AwaDB vectorstore. """ awadb_client = cls( table_name=table_name, embedding=embedding, log_and_data_dir=log_and_data_dir, client=client, ) awadb_client.add_texts(texts=texts, metadatas=metadatas) return awadb_client [docs] @classmethod def from_documents( cls: Type[AwaDB], documents: List[Document], embedding: Optional[Embeddings] = None, table_name: str = _DEFAULT_TABLE_NAME,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/awadb.html
f36b61fa669d-7
table_name: str = _DEFAULT_TABLE_NAME, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> AwaDB: """Create an AwaDB vectorstore from a list of documents. If a log_and_data_dir specified, the table will be persisted there. Args: documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. table_name (str): Name of the table to create. log_and_data_dir (Optional[str]): Directory to persist the table. client (Optional[awadb.Client]): AwaDB client Returns: AwaDB: AwaDB vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts=texts, embedding=embedding, metadatas=metadatas, table_name=table_name, log_and_data_dir=log_and_data_dir, client=client, )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/awadb.html
9c7f89b11a7a-0
Source code for langchain.vectorstores.weaviate """Wrapper around weaviate vector database.""" from __future__ import annotations import datetime from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type from uuid import uuid4 import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def _default_schema(index_name: str) -> Dict: return { "class": index_name, "properties": [ { "name": "text", "dataType": ["text"], } ], } def _create_weaviate_client(**kwargs: Any) -> Any: client = kwargs.get("client") if client is not None: return client weaviate_url = get_from_dict_or_env(kwargs, "weaviate_url", "WEAVIATE_URL") try: # the weaviate api key param should not be mandatory weaviate_api_key = get_from_dict_or_env( kwargs, "weaviate_api_key", "WEAVIATE_API_KEY", None ) except ValueError: weaviate_api_key = None try: import weaviate except ImportError: raise ValueError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`" ) auth = ( weaviate.auth.AuthApiKey(api_key=weaviate_api_key) if weaviate_api_key is not None else None )
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-1
if weaviate_api_key is not None else None ) client = weaviate.Client(weaviate_url, auth_client_secret=auth) return client def _default_score_normalizer(val: float) -> float: return 1 - 1 / (1 + np.exp(val)) def _json_serializable(value: Any) -> Any: if isinstance(value, datetime.datetime): return value.isoformat() return value [docs]class Weaviate(VectorStore): """Wrapper around Weaviate vector database. To use, you should have the ``weaviate-client`` python package installed. Example: .. code-block:: python import weaviate from langchain.vectorstores import Weaviate client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) weaviate = Weaviate(client, index_name, text_key) """ def __init__( self, client: Any, index_name: str, text_key: str, embedding: Optional[Embeddings] = None, attributes: Optional[List[str]] = None, relevance_score_fn: Optional[ Callable[[float], float] ] = _default_score_normalizer, by_text: bool = True, ): """Initialize with Weaviate client.""" try: import weaviate except ImportError: raise ValueError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(client, weaviate.Client): raise ValueError(
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-2
) if not isinstance(client, weaviate.Client): raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._query_attrs = [self._text_key] self._relevance_score_fn = relevance_score_fn self._by_text = by_text if attributes is not None: self._query_attrs.extend(attributes) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Weaviate.""" from weaviate.util import get_valid_uuid ids = [] with self._client.batch as batch: for i, text in enumerate(texts): data_properties = {self._text_key: text} if metadatas is not None: for key, val in metadatas[i].items(): data_properties[key] = _json_serializable(val) # Allow for ids (consistent w/ other methods) # # Or uuids (backwards compatble w/ existing arg) # If the UUID of one of the objects already exists # then the existing object will be replaced by the new object. _id = get_valid_uuid(uuid4()) if "uuids" in kwargs: _id = kwargs["uuids"][i] elif "ids" in kwargs: _id = kwargs["ids"][i] if self._embedding is not None:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-3
if self._embedding is not None: vector = self._embedding.embed_documents([text])[0] else: vector = None batch.add_data_object( data_object=data_properties, class_name=self._index_name, uuid=_id, vector=vector, ) ids.append(_id) return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ if self._by_text: return self.similarity_search_by_text(query, k, **kwargs) else: if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search when " "_by_text=False" ) embedding = self._embedding.embed_query(query) return self.similarity_search_by_vector(embedding, k, **kwargs) [docs] def similarity_search_by_text( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"):
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-4
if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_text(content).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Look up similar documents by embedding vector in Weaviate.""" vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_vector(vector).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-5
docs.append(Document(page_content=text, metadata=res)) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding is not None: embedding = self._embedding.embed_query(query) else: raise ValueError( "max_marginal_relevance_search requires a suitable Embeddings object" ) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs ) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-6
**kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) results = ( query_obj.with_additional("vector") .with_near_vector(vector) .with_limit(fetch_k) .do() ) payload = results["data"]["Get"][self._index_name] embeddings = [result["_additional"]["vector"] for result in payload] mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) docs = [] for idx in mmr_selected: text = payload[idx].pop(self._text_key) payload[idx].pop("_additional") meta = payload[idx] docs.append(Document(page_content=text, metadata=meta)) return docs [docs] def similarity_search_with_score(
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-7
return docs [docs] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Return list of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search_with_score" ) content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if not self._by_text: embedding = self._embedding.embed_query(query) vector = {"vector": embedding} result = ( query_obj.with_near_vector(vector) .with_limit(k) .with_additional("vector") .do() ) else: result = ( query_obj.with_near_text(content) .with_limit(k) .with_additional("vector") .do() ) if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs_and_scores = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) score = np.dot( res["_additional"]["vector"], self._embedding.embed_query(query) ) docs_and_scores.append((Document(page_content=text, metadata=res), score)) return docs_and_scores def _similarity_search_with_relevance_scores(
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-8
return docs_and_scores def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ if self._relevance_score_fn is None: raise ValueError( "relevance_score_fn must be provided to" " Weaviate constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score(query, k=k, **kwargs) return [ (doc, self._relevance_score_fn(score)) for doc, score in docs_and_scores ] [docs] @classmethod def from_texts( cls: Type[Weaviate], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Weaviate: """Construct Weaviate wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Weaviate instance. 3. Adds the documents to the newly created Weaviate index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores.weaviate import Weaviate from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() weaviate = Weaviate.from_texts( texts, embeddings,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-9
weaviate = Weaviate.from_texts( texts, embeddings, weaviate_url="http://localhost:8080" ) """ client = _create_weaviate_client(**kwargs) from weaviate.util import get_valid_uuid index_name = kwargs.get("index_name", f"LangChain_{uuid4().hex}") embeddings = embedding.embed_documents(texts) if embedding else None text_key = "text" schema = _default_schema(index_name) attributes = list(metadatas[0].keys()) if metadatas else None # check whether the index already exists if not client.schema.contains(schema): client.schema.create_class(schema) with client.batch as batch: for i, text in enumerate(texts): data_properties = { text_key: text, } if metadatas is not None: for key in metadatas[i].keys(): data_properties[key] = metadatas[i][key] # If the UUID of one of the objects already exists # then the existing objectwill be replaced by the new object. if "uuids" in kwargs: _id = kwargs["uuids"][i] else: _id = get_valid_uuid(uuid4()) # if an embedding strategy is not provided, we let # weaviate create the embedding. Note that this will only # work if weaviate has been installed with a vectorizer module # like text2vec-contextionary for example params = { "uuid": _id, "data_object": data_properties, "class_name": index_name, } if embeddings is not None:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
9c7f89b11a7a-10
"class_name": index_name, } if embeddings is not None: params["vector"] = embeddings[i] batch.add_data_object(**params) batch.flush() relevance_score_fn = kwargs.get("relevance_score_fn") by_text: bool = kwargs.get("by_text", False) return cls( client, index_name, text_key, embedding=embedding, attributes=attributes, relevance_score_fn=relevance_score_fn, by_text=by_text, ) [docs] def delete(self, ids: List[str]) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ # TODO: Check if this can be done in bulk for id in ids: self._client.data_object.delete(uuid=id)
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/weaviate.html
510b6adefef9-0
Source code for langchain.vectorstores.rocksetdb """Wrapper around Rockset vector database.""" from __future__ import annotations import logging from enum import Enum from typing import Any, Iterable, List, Optional, Tuple from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger(__name__) [docs]class Rockset(VectorStore): """Wrapper arpund Rockset vector database. To use, you should have the `rockset` python package installed. Note that to use this, the collection being used must already exist in your Rockset instance. You must also ensure you use a Rockset ingest transformation to apply `VECTOR_ENFORCE` on the column being used to store `embedding_key` in the collection. See: https://rockset.com/blog/introducing-vector-search-on-rockset/ for more details Everything below assumes `commons` Rockset workspace. TODO: Add support for workspace args. Example: .. code-block:: python from langchain.vectorstores import Rockset from langchain.embeddings.openai import OpenAIEmbeddings import rockset # Make sure you use the right host (region) for your Rockset instance # and APIKEY has both read-write access to your collection. rs = rockset.RocksetClient(host=rockset.Regions.use1a1, api_key="***") collection_name = "langchain_demo" embeddings = OpenAIEmbeddings() vectorstore = Rockset(rs, collection_name, embeddings, "description", "description_embedding") """ def __init__( self, client: Any, embeddings: Embeddings, collection_name: str,
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/rocksetdb.html
510b6adefef9-1
client: Any, embeddings: Embeddings, collection_name: str, text_key: str, embedding_key: str, ): """Initialize with Rockset client. Args: client: Rockset client object collection: Rockset collection to insert docs / query embeddings: Langchain Embeddings object to use to generate embedding for given text. text_key: column in Rockset collection to use to store the text embedding_key: column in Rockset collection to use to store the embedding. Note: We must apply `VECTOR_ENFORCE()` on this column via Rockset ingest transformation. """ try: from rockset import RocksetClient except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) if not isinstance(client, RocksetClient): raise ValueError( f"client should be an instance of rockset.RocksetClient, " f"got {type(client)}" ) # TODO: check that `collection_name` exists in rockset. Create if not. self._client = client self._collection_name = collection_name self._embeddings = embeddings self._text_key = text_key self._embedding_key = embedding_key [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore Args:
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/rocksetdb.html
510b6adefef9-2
"""Run more texts through the embeddings and add to the vectorstore Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. batch_size: Send documents in batches to rockset. Returns: List of ids from adding the texts into the vectorstore. """ batch: list[dict] = [] stored_ids = [] for i, text in enumerate(texts): if len(batch) == batch_size: stored_ids += self._write_documents_to_rockset(batch) batch = [] doc = {} if metadatas and len(metadatas) > i: doc = metadatas[i] if ids and len(ids) > i: doc["_id"] = ids[i] doc[self._text_key] = text doc[self._embedding_key] = self._embeddings.embed_query(text) batch.append(doc) if len(batch) > 0: stored_ids += self._write_documents_to_rockset(batch) batch = [] return stored_ids [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Any = None, collection_name: str = "", text_key: str = "", embedding_key: str = "", ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> Rockset: """Create Rockset wrapper with existing texts.
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/rocksetdb.html
510b6adefef9-3
) -> Rockset: """Create Rockset wrapper with existing texts. This is intended as a quicker way to get started. """ # Sanitize imputs assert client is not None, "Rockset Client cannot be None" assert collection_name, "Collection name cannot be empty" assert text_key, "Text key name cannot be empty" assert embedding_key, "Embedding key cannot be empty" rockset = cls(client, embedding, collection_name, text_key, embedding_key) rockset.add_texts(texts, metadatas, ids, batch_size) return rockset # Rockset supports these vector distance functions. [docs] class DistanceFunction(Enum): COSINE_SIM = "COSINE_SIM" EUCLIDEAN_DIST = "EUCLIDEAN_DIST" DOT_PRODUCT = "DOT_PRODUCT" # how to sort results for "similarity" [docs] def order_by(self) -> str: if self.value == "EUCLIDEAN_DIST": return "ASC" return "DESC" [docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4.
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/rocksetdb.html
510b6adefef9-4
k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): Metadata filters supplied as a SQL `where` condition string. Defaults to None. eg. "price<=70.0 AND brand='Nintendo'" NOTE: Please do not let end-user to fill this and always be aware of SQL injection. Returns: List[Tuple[Document, float]]: List of documents with their relevance score """ return self.similarity_search_by_vector_with_relevance_scores( self._embeddings.embed_query(query), k, distance_func, where_str, **kwargs, ) [docs] def similarity_search( self, query: str, k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Same as `similarity_search_with_relevance_scores` but doesn't return the scores. """ return self.similarity_search_by_vector( self._embeddings.embed_query(query), k, distance_func, where_str, **kwargs, ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Accepts a query_embedding (vector), and returns documents with similar embeddings."""
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/rocksetdb.html
510b6adefef9-5
"""Accepts a query_embedding (vector), and returns documents with similar embeddings.""" docs_and_scores = self.similarity_search_by_vector_with_relevance_scores( embedding, k, distance_func, where_str, **kwargs ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Accepts a query_embedding (vector), and returns documents with similar embeddings along with their relevance scores.""" q_str = self._build_query_sql(embedding, distance_func, k, where_str) try: query_response = self._client.Queries.query(sql={"query": q_str}) except Exception as e: logger.error("Exception when querying Rockset: %s\n", e) return [] finalResult: list[Tuple[Document, float]] = [] for document in query_response.results: metadata = {} assert isinstance( document, dict ), "document should be of type `dict[str,Any]`. But found: `{}`".format( type(document) ) for k, v in document.items(): if k == self._text_key: assert isinstance( v, str ), "page content stored in column `{}` must be of type `str`. \ But found: `{}`".format( self._text_key, type(v) ) page_content = v
https://api.python.langchain.com/en/stable/_modules/langchain/vectorstores/rocksetdb.html