id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
797bac92f742-0
Source code for langchain.callbacks.streamlit from __future__ import annotations from typing import TYPE_CHECKING, Optional from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.streamlit.streamlit_callback_handler import ( LLMThoughtLabeler as LLMThoughtLabeler, ) from langchain.callbacks.streamlit.streamlit_callback_handler import ( StreamlitCallbackHandler as _InternalStreamlitCallbackHandler, ) if TYPE_CHECKING: from streamlit.delta_generator import DeltaGenerator [docs]def StreamlitCallbackHandler( parent_container: DeltaGenerator, *, max_thought_containers: int = 4, expand_new_thoughts: bool = True, collapse_completed_thoughts: bool = True, thought_labeler: Optional[LLMThoughtLabeler] = None, ) -> BaseCallbackHandler: """Construct a new StreamlitCallbackHandler. This CallbackHandler is geared towards use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts" inside a series of Streamlit expanders. Parameters ---------- parent_container The `st.container` that will contain all the Streamlit elements that the Handler creates. max_thought_containers The max number of completed LLM thought containers to show at once. When this threshold is reached, a new thought will cause the oldest thoughts to be collapsed into a "History" expander. Defaults to 4. expand_new_thoughts Each LLM "thought" gets its own `st.expander`. This param controls whether that expander is expanded by default. Defaults to True. collapse_completed_thoughts If True, LLM thought expanders will be collapsed when completed.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit.html
797bac92f742-1
If True, LLM thought expanders will be collapsed when completed. Defaults to True. thought_labeler An optional custom LLMThoughtLabeler instance. If unspecified, the handler will use the default thought labeling logic. Defaults to None. Returns ------- A new StreamlitCallbackHandler instance. Note that this is an "auto-updating" API: if the installed version of Streamlit has a more recent StreamlitCallbackHandler implementation, an instance of that class will be used. """ # If we're using a version of Streamlit that implements StreamlitCallbackHandler, # delegate to it instead of using our built-in handler. The official handler is # guaranteed to support the same set of kwargs. try: from streamlit.external.langchain import ( StreamlitCallbackHandler as OfficialStreamlitCallbackHandler, # type: ignore # noqa: 501 ) return OfficialStreamlitCallbackHandler( parent_container, max_thought_containers=max_thought_containers, expand_new_thoughts=expand_new_thoughts, collapse_completed_thoughts=collapse_completed_thoughts, thought_labeler=thought_labeler, ) except ImportError: return _InternalStreamlitCallbackHandler( parent_container, max_thought_containers=max_thought_containers, expand_new_thoughts=expand_new_thoughts, collapse_completed_thoughts=collapse_completed_thoughts, thought_labeler=thought_labeler, )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streamlit.html
d629d7a60d15-0
Source code for langchain.callbacks.stdout """Callback Handler that prints to std out.""" from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.input import print_text from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]class StdOutCallbackHandler(BaseCallbackHandler): """Callback Handler that prints to std out.""" def __init__(self, color: Optional[str] = None) -> None: """Initialize callback handler.""" self.color = color [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Print out the prompts.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Print out that we are entering a chain.""" class_name = serialized.get("name", "") print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/stdout.html
d629d7a60d15-1
"""Print out that we finished a chain.""" print("\n\033[1m> Finished chain.\033[0m") [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing.""" pass [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Run on agent action.""" print_text(action.log, color=color if color else self.color) [docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """If not the final action, print out observation.""" if observation_prefix is not None: print_text(f"\n{observation_prefix}") print_text(output, color=color if color else self.color) if llm_prefix is not None: print_text(f"\n{llm_prefix}") [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_text( self, text: str, color: Optional[str] = None, end: str = "",
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/stdout.html
d629d7a60d15-2
color: Optional[str] = None, end: str = "", **kwargs: Any, ) -> None: """Run when agent ends.""" print_text(text, color=color if color else self.color, end=end) [docs] def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" print_text(finish.log, color=color if self.color else color, end="\n")
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/stdout.html
0a0e225c1857-0
Source code for langchain.callbacks.wandb_callback import json import tempfile from copy import deepcopy from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, hash_string, import_pandas, import_spacy, import_textstat, ) from langchain.schema import AgentAction, AgentFinish, LLMResult def import_wandb() -> Any: """Import the wandb python package and raise an error if it is not installed.""" try: import wandb # noqa: F401 except ImportError: raise ImportError( "To use the wandb callback manager you need to have the `wandb` python " "package installed. Please install it with `pip install wandb`" ) return wandb def load_json_to_dict(json_path: Union[str, Path]) -> dict: """Load json file to a dictionary. Parameters: json_path (str): The path to the json file. Returns: (dict): The dictionary representation of the json file. """ with open(json_path, "r") as f: data = json.load(f) return data def analyze_text( text: str, complexity_metrics: bool = True, visualize: bool = True, nlp: Any = None, output_dir: Optional[Union[str, Path]] = None, ) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze. complexity_metrics (bool): Whether to compute complexity metrics.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-1
complexity_metrics (bool): Whether to compute complexity metrics. visualize (bool): Whether to visualize the text. nlp (spacy.lang): The spacy language model to use for visualization. output_dir (str): The directory to save the visualization files to. Returns: (dict): A dictionary containing the complexity metrics and visualization files serialized in a wandb.Html element. """ resp = {} textstat = import_textstat() wandb = import_wandb() spacy = import_spacy() if complexity_metrics: text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text),
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-2
"crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update(text_complexity_metrics) if visualize and nlp and output_dir is not None: doc = nlp(text) dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) dep_output_path = Path(output_dir, hash_string(f"dep-{text}") + ".html") dep_output_path.open("w", encoding="utf-8").write(dep_out) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True ) ent_output_path = Path(output_dir, hash_string(f"ent-{text}") + ".html") ent_output_path.open("w", encoding="utf-8").write(ent_out) text_visualizations = { "dependency_tree": wandb.Html(str(dep_output_path)), "entities": wandb.Html(str(ent_output_path)), } resp.update(text_visualizations) return resp def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any: """Construct an html element from a prompt and a generation. Parameters: prompt (str): The prompt. generation (str): The generation. Returns: (wandb.Html): The html element.""" wandb = import_wandb() formatted_prompt = prompt.replace("\n", "<br>") formatted_generation = generation.replace("\n", "<br>") return wandb.Html( f"""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-3
return wandb.Html( f""" <p style="color:black;">{formatted_prompt}:</p> <blockquote> <p style="color:green;"> {formatted_generation} </p> </blockquote> """, inject=False, ) [docs]class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs to Weights and Biases. Parameters: job_type (str): The type of job. project (str): The project to log to. entity (str): The entity to log to. tags (list): The tags to log. group (str): The group to log to. name (str): The name of the run. notes (str): The notes to log. visualize (bool): Whether to visualize the run. complexity_metrics (bool): Whether to log complexity metrics. stream_logs (bool): Whether to stream callback actions to W&B This handler will utilize the associated callback method called and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and action. It then logs the response using the run.log() method to Weights and Biases. """ def __init__( self, job_type: Optional[str] = None, project: Optional[str] = "langchain_callback_demo", entity: Optional[str] = None, tags: Optional[Sequence] = None, group: Optional[str] = None, name: Optional[str] = None, notes: Optional[str] = None, visualize: bool = False,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-4
notes: Optional[str] = None, visualize: bool = False, complexity_metrics: bool = False, stream_logs: bool = False, ) -> None: """Initialize callback handler.""" wandb = import_wandb() import_pandas() import_textstat() spacy = import_spacy() super().__init__() self.job_type = job_type self.project = project self.entity = entity self.tags = tags self.group = group self.name = name self.notes = notes self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() self.run: wandb.sdk.wandb_run.Run = wandb.init( # type: ignore job_type=self.job_type, project=self.project, entity=self.entity, tags=self.tags, group=self.group, name=self.name, notes=self.notes, ) warning = ( "DEPRECATION: The `WandbCallbackHandler` will soon be deprecated in favor " "of the `WandbTracer`. Please update your code to use the `WandbTracer` " "instead." ) wandb.termwarn( warning, repeat=False, ) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load("en_core_web_sm") def _init_resp(self) -> Dict: return {k: None for k in self.callback_columns}
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-5
return {k: None for k in self.callback_columns} [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(resp) prompt_resp["prompts"] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self.run.log(prompt_resp) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp() resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.get_custom_callback_meta()) self.on_llm_token_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.step += 1 self.llm_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_llm_end"})
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-6
resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.get_custom_callback_meta()) for generations in response.generations: for generation in generations: generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) generation_resp.update( analyze_text( generation.text, complexity_metrics=self.complexity_metrics, visualize=self.visualize, nlp=self.nlp, output_dir=self.temp_dir.name, ) ) self.on_llm_end_records.append(generation_resp) self.action_records.append(generation_resp) if self.stream_logs: self.run.log(generation_resp) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) chain_input = inputs["input"] if isinstance(chain_input, str): input_resp = deepcopy(resp) input_resp["input"] = chain_input self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-7
self.action_records.append(input_resp) if self.stream_logs: self.run.log(input_resp) elif isinstance(chain_input, list): for inp in chain_input: input_resp = deepcopy(resp) input_resp.update(inp) self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs: self.run.log(input_resp) else: raise ValueError("Unexpected data format provided!") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_chain_end", "outputs": outputs["output"]}) resp.update(self.get_custom_callback_meta()) self.on_chain_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized))
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-8
resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.on_tool_start_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_tool_end", "output": output}) resp.update(self.get_custom_callback_meta()) self.on_tool_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.step += 1 self.text_ctr += 1 resp = self._init_resp() resp.update({"action": "on_text", "text": text}) resp.update(self.get_custom_callback_meta()) self.on_text_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.step += 1 self.agent_ends += 1 self.ends += 1
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-9
self.agent_ends += 1 self.ends += 1 resp = self._init_resp() resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_finish_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_action_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp) def _create_session_analysis_df(self) -> Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records) on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = ( on_llm_start_records_df[["step", "prompts", "name"]] .dropna(axis=1) .rename({"step": "prompt_step"}, axis=1) ) complexity_metrics_columns = []
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-10
) complexity_metrics_columns = [] visualizations_columns = [] if self.complexity_metrics: complexity_metrics_columns = [ "flesch_reading_ease", "flesch_kincaid_grade", "smog_index", "coleman_liau_index", "automated_readability_index", "dale_chall_readability_score", "difficult_words", "linsear_write_formula", "gunning_fog", "text_standard", "fernandez_huerta", "szigriszt_pazos", "gutierrez_polini", "crawford", "gulpease_index", "osman", ] if self.visualize: visualizations_columns = ["dependency_tree", "entities"] llm_outputs_df = ( on_llm_end_records_df[ [ "step", "text", "token_usage_total_tokens", "token_usage_prompt_tokens", "token_usage_completion_tokens", ] + complexity_metrics_columns + visualizations_columns ] .dropna(axis=1) .rename({"step": "output_step", "text": "output"}, axis=1) ) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) session_analysis_df["chat_html"] = session_analysis_df[ ["prompts", "output"] ].apply( lambda row: construct_html_from_prompt_and_generation( row["prompts"], row["output"] ), axis=1, ) return session_analysis_df
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-11
), axis=1, ) return session_analysis_df [docs] def flush_tracker( self, langchain_asset: Any = None, reset: bool = True, finish: bool = False, job_type: Optional[str] = None, project: Optional[str] = None, entity: Optional[str] = None, tags: Optional[Sequence] = None, group: Optional[str] = None, name: Optional[str] = None, notes: Optional[str] = None, visualize: Optional[bool] = None, complexity_metrics: Optional[bool] = None, ) -> None: """Flush the tracker and reset the session. Args: langchain_asset: The langchain asset to save. reset: Whether to reset the session. finish: Whether to finish the run. job_type: The job type. project: The project. entity: The entity. tags: The tags. group: The group. name: The name. notes: The notes. visualize: Whether to visualize. complexity_metrics: Whether to compute complexity metrics. Returns: None """ pd = import_pandas() wandb = import_wandb() action_records_table = wandb.Table(dataframe=pd.DataFrame(self.action_records)) session_analysis_table = wandb.Table( dataframe=self._create_session_analysis_df() ) self.run.log( { "action_records": action_records_table, "session_analysis": session_analysis_table, } ) if langchain_asset:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
0a0e225c1857-12
} ) if langchain_asset: langchain_asset_path = Path(self.temp_dir.name, "model.json") model_artifact = wandb.Artifact(name="model", type="model") model_artifact.add(action_records_table, name="action_records") model_artifact.add(session_analysis_table, name="session_analysis") try: langchain_asset.save(langchain_asset_path) model_artifact.add_file(str(langchain_asset_path)) model_artifact.metadata = load_json_to_dict(langchain_asset_path) except ValueError: langchain_asset.save_agent(langchain_asset_path) model_artifact.add_file(str(langchain_asset_path)) model_artifact.metadata = load_json_to_dict(langchain_asset_path) except NotImplementedError as e: print("Could not save model.") print(repr(e)) pass self.run.log_artifact(model_artifact) if finish or reset: self.run.finish() self.temp_dir.cleanup() self.reset_callback_meta() if reset: self.__init__( # type: ignore job_type=job_type if job_type else self.job_type, project=project if project else self.project, entity=entity if entity else self.entity, tags=tags if tags else self.tags, group=group if group else self.group, name=name if name else self.name, notes=notes if notes else self.notes, visualize=visualize if visualize else self.visualize, complexity_metrics=complexity_metrics if complexity_metrics else self.complexity_metrics, )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/wandb_callback.html
47556be87f71-0
Source code for langchain.callbacks.openai_info """Callback Handler that prints to std out.""" from typing import Any, Dict, List from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import LLMResult MODEL_COST_PER_1K_TOKENS = { # GPT-4 input "gpt-4": 0.03, "gpt-4-0314": 0.03, "gpt-4-0613": 0.03, "gpt-4-32k": 0.06, "gpt-4-32k-0314": 0.06, "gpt-4-32k-0613": 0.06, # GPT-4 output "gpt-4-completion": 0.06, "gpt-4-0314-completion": 0.06, "gpt-4-0613-completion": 0.06, "gpt-4-32k-completion": 0.12, "gpt-4-32k-0314-completion": 0.12, "gpt-4-32k-0613-completion": 0.12, # GPT-3.5 input "gpt-3.5-turbo": 0.0015, "gpt-3.5-turbo-0301": 0.0015, "gpt-3.5-turbo-0613": 0.0015, "gpt-3.5-turbo-16k": 0.003,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html
47556be87f71-1
"gpt-3.5-turbo-16k-0613": 0.003, # GPT-3.5 output "gpt-3.5-turbo-completion": 0.002, "gpt-3.5-turbo-0301-completion": 0.002, "gpt-3.5-turbo-0613-completion": 0.002, "gpt-3.5-turbo-16k-completion": 0.004, "gpt-3.5-turbo-16k-0613-completion": 0.004, # Others "gpt-35-turbo": 0.002, # Azure OpenAI version of ChatGPT "text-ada-001": 0.0004, "ada": 0.0004, "text-babbage-001": 0.0005, "babbage": 0.0005, "text-curie-001": 0.002, "curie": 0.002, "text-davinci-003": 0.02, "text-davinci-002": 0.02, "code-davinci-002": 0.02, "ada-finetuned": 0.0016, "babbage-finetuned": 0.0024, "curie-finetuned": 0.012, "davinci-finetuned": 0.12, } def standardize_model_name( model_name: str, is_completion: bool = False, ) -> str: """
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html
47556be87f71-2
is_completion: bool = False, ) -> str: """ Standardize the model name to a format that can be used in the OpenAI API. Args: model_name: Model name to standardize. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Standardized model name. """ model_name = model_name.lower() if "ft-" in model_name: return model_name.split(":")[0] + "-finetuned" elif is_completion and ( model_name.startswith("gpt-4") or model_name.startswith("gpt-3.5") ): return model_name + "-completion" else: return model_name def get_openai_token_cost_for_model( model_name: str, num_tokens: int, is_completion: bool = False ) -> float: """ Get the cost in USD for a given model and number of tokens. Args: model_name: Name of the model num_tokens: Number of tokens. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Cost in USD. """ model_name = standardize_model_name(model_name, is_completion=is_completion) if model_name not in MODEL_COST_PER_1K_TOKENS: raise ValueError( f"Unknown model: {model_name}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(MODEL_COST_PER_1K_TOKENS.keys()) ) return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html
47556be87f71-3
[docs]class OpenAICallbackHandler(BaseCallbackHandler): """Callback Handler that tracks OpenAI info.""" total_tokens: int = 0 prompt_tokens: int = 0 completion_tokens: int = 0 successful_requests: int = 0 total_cost: float = 0.0 def __repr__(self) -> str: return ( f"Tokens Used: {self.total_tokens}\n" f"\tPrompt Tokens: {self.prompt_tokens}\n" f"\tCompletion Tokens: {self.completion_tokens}\n" f"Successful Requests: {self.successful_requests}\n" f"Total Cost (USD): ${self.total_cost}" ) @property def always_verbose(self) -> bool: """Whether to call verbose callbacks even if verbose is False.""" return True [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Print out the prompts.""" pass [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Print out the token.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Collect token usage.""" if response.llm_output is None: return None self.successful_requests += 1 if "token_usage" not in response.llm_output: return None token_usage = response.llm_output["token_usage"] completion_tokens = token_usage.get("completion_tokens", 0) prompt_tokens = token_usage.get("prompt_tokens", 0)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html
47556be87f71-4
prompt_tokens = token_usage.get("prompt_tokens", 0) model_name = standardize_model_name(response.llm_output.get("model_name", "")) if model_name in MODEL_COST_PER_1K_TOKENS: completion_cost = get_openai_token_cost_for_model( model_name, completion_tokens, is_completion=True ) prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) self.total_cost += prompt_cost + completion_cost self.total_tokens += token_usage.get("total_tokens", 0) self.prompt_tokens += prompt_tokens self.completion_tokens += completion_tokens def __copy__(self) -> "OpenAICallbackHandler": """Return a copy of the callback handler.""" return self def __deepcopy__(self, memo: Any) -> "OpenAICallbackHandler": """Return a deep copy of the callback handler.""" return self
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/openai_info.html
4d3ab9a1f21f-0
Source code for langchain.callbacks.whylabs_callback from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, Generation, LLMResult from langchain.utils import get_from_env if TYPE_CHECKING: from whylogs.api.logger.logger import Logger diagnostic_logger = logging.getLogger(__name__) def import_langkit( sentiment: bool = False, toxicity: bool = False, themes: bool = False, ) -> Any: """Import the langkit python package and raise an error if it is not installed. Args: sentiment: Whether to import the langkit.sentiment module. Defaults to False. toxicity: Whether to import the langkit.toxicity module. Defaults to False. themes: Whether to import the langkit.themes module. Defaults to False. Returns: The imported langkit module. """ try: import langkit # noqa: F401 import langkit.regexes # noqa: F401 import langkit.textstat # noqa: F401 if sentiment: import langkit.sentiment # noqa: F401 if toxicity: import langkit.toxicity # noqa: F401 if themes: import langkit.themes # noqa: F401 except ImportError: raise ImportError( "To use the whylabs callback manager you need to have the `langkit` python " "package installed. Please install it with `pip install langkit`." ) return langkit [docs]class WhyLabsCallbackHandler(BaseCallbackHandler):
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html
4d3ab9a1f21f-1
return langkit [docs]class WhyLabsCallbackHandler(BaseCallbackHandler): """WhyLabs CallbackHandler.""" def __init__(self, logger: Logger): """Initiate the rolling logger""" super().__init__() self.logger = logger diagnostic_logger.info( "Initialized WhyLabs callback handler with configured whylogs Logger." ) def _profile_generations(self, generations: List[Generation]) -> None: for gen in generations: self.logger.log({"response": gen.text}) [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Pass the input prompts to the logger""" for prompt in prompts: self.logger.log({"prompt": prompt}) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Pass the generated response to the logger.""" for generations in response.generations: self._profile_generations(generations) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Do nothing.""" [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html
4d3ab9a1f21f-2
"""Do nothing.""" [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing.""" [docs] def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Do nothing.""" [docs] def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing.""" [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing.""" [docs] def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" pass [docs] def flush(self) -> None: self.logger._do_rollover() diagnostic_logger.info("Flushing WhyLabs logger, writing profile...") [docs] def close(self) -> None: self.logger.close()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html
4d3ab9a1f21f-3
[docs] def close(self) -> None: self.logger.close() diagnostic_logger.info("Closing WhyLabs logger, see you next time!") def __enter__(self) -> WhyLabsCallbackHandler: return self def __exit__( self, exception_type: Any, exception_value: Any, traceback: Any ) -> None: self.close() [docs] @classmethod def from_params( cls, *, api_key: Optional[str] = None, org_id: Optional[str] = None, dataset_id: Optional[str] = None, sentiment: bool = False, toxicity: bool = False, themes: bool = False, ) -> Logger: """Instantiate whylogs Logger from params. Args: api_key (Optional[str]): WhyLabs API key. Optional because the preferred way to specify the API key is with environment variable WHYLABS_API_KEY. org_id (Optional[str]): WhyLabs organization id to write profiles to. If not set must be specified in environment variable WHYLABS_DEFAULT_ORG_ID. dataset_id (Optional[str]): The model or dataset this callback is gathering telemetry for. If not set must be specified in environment variable WHYLABS_DEFAULT_DATASET_ID. sentiment (bool): If True will initialize a model to perform sentiment analysis compound score. Defaults to False and will not gather this metric. toxicity (bool): If True will initialize a model to score toxicity. Defaults to False and will not gather this metric. themes (bool): If True will initialize a model to calculate distance to configured themes. Defaults to None and will not gather this metric. """
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html
4d3ab9a1f21f-4
metric. """ # langkit library will import necessary whylogs libraries import_langkit(sentiment=sentiment, toxicity=toxicity, themes=themes) import whylogs as why from whylogs.api.writer.whylabs import WhyLabsWriter from whylogs.core.schema import DeclarativeSchema from whylogs.experimental.core.metrics.udf_metric import generate_udf_schema api_key = api_key or get_from_env("api_key", "WHYLABS_API_KEY") org_id = org_id or get_from_env("org_id", "WHYLABS_DEFAULT_ORG_ID") dataset_id = dataset_id or get_from_env( "dataset_id", "WHYLABS_DEFAULT_DATASET_ID" ) whylabs_writer = WhyLabsWriter( api_key=api_key, org_id=org_id, dataset_id=dataset_id ) langkit_schema = DeclarativeSchema(generate_udf_schema()) whylabs_logger = why.logger( mode="rolling", interval=5, when="M", schema=langkit_schema ) whylabs_logger.append_writer(writer=whylabs_writer) diagnostic_logger.info( "Started whylogs Logger with WhyLabsWriter and initialized LangKit. 📝" ) return cls(whylabs_logger)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/whylabs_callback.html
98e9b284c943-0
Source code for langchain.callbacks.streaming_aiter from __future__ import annotations import asyncio from typing import Any, AsyncIterator, Dict, List, Literal, Union, cast from langchain.callbacks.base import AsyncCallbackHandler from langchain.schema import LLMResult # TODO If used by two LLM runs in parallel this won't work as expected [docs]class AsyncIteratorCallbackHandler(AsyncCallbackHandler): """Callback handler that returns an async iterator.""" queue: asyncio.Queue[str] done: asyncio.Event @property def always_verbose(self) -> bool: return True def __init__(self) -> None: self.queue = asyncio.Queue() self.done = asyncio.Event() [docs] async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: # If two calls are made in a row, this resets the state self.done.clear() [docs] async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: self.queue.put_nowait(token) [docs] async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: self.done.set() [docs] async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: self.done.set() # TODO implement the other methods [docs] async def aiter(self) -> AsyncIterator[str]: while not self.queue.empty() or not self.done.is_set(): # Wait for the next token in the queue, # but stop waiting if the done event is set done, other = await asyncio.wait(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_aiter.html
98e9b284c943-1
done, other = await asyncio.wait( [ # NOTE: If you add other tasks here, update the code below, # which assumes each set has exactly one task each asyncio.ensure_future(self.queue.get()), asyncio.ensure_future(self.done.wait()), ], return_when=asyncio.FIRST_COMPLETED, ) # Cancel the other task if other: other.pop().cancel() # Extract the value of the first completed task token_or_done = cast(Union[str, Literal[True]], done.pop().result()) # If the extracted value is the boolean True, the done event was set if token_or_done is True: break # Otherwise, the extracted value is a token, which we yield yield token_or_done
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_aiter.html
1465dd734d2e-0
Source code for langchain.callbacks.streaming_stdout_final_only """Callback Handler streams to stdout on new llm token.""" import sys from typing import Any, Dict, List, Optional from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"] [docs]class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler): """Callback handler for streaming in agents. Only works with agents using LLMs that support streaming. Only the final output of the agent will be streamed. """ [docs] def append_to_last_tokens(self, token: str) -> None: self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0) [docs] def check_if_answer_reached(self) -> bool: if self.strip_tokens: return self.last_tokens_stripped == self.answer_prefix_tokens_stripped else: return self.last_tokens == self.answer_prefix_tokens def __init__( self, *, answer_prefix_tokens: Optional[List[str]] = None, strip_tokens: bool = True, stream_prefix: bool = False ) -> None: """Instantiate FinalStreamingStdOutCallbackHandler. Args: answer_prefix_tokens: Token sequence that prefixes the anwer. Default is ["Final", "Answer", ":"] strip_tokens: Ignore white spaces and new lines when comparing answer_prefix_tokens to last tokens? (to determine if answer has been reached) stream_prefix: Should answer prefix itself also be streamed? """ super().__init__()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_stdout_final_only.html
1465dd734d2e-1
""" super().__init__() if answer_prefix_tokens is None: self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS else: self.answer_prefix_tokens = answer_prefix_tokens if strip_tokens: self.answer_prefix_tokens_stripped = [ token.strip() for token in self.answer_prefix_tokens ] else: self.answer_prefix_tokens_stripped = self.answer_prefix_tokens self.last_tokens = [""] * len(self.answer_prefix_tokens) self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens) self.strip_tokens = strip_tokens self.stream_prefix = stream_prefix self.answer_reached = False [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts running.""" self.answer_reached = False [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run on new LLM token. Only available when streaming is enabled.""" # Remember the last n tokens, where n = len(answer_prefix_tokens) self.append_to_last_tokens(token) # Check if the last n tokens match the answer_prefix_tokens list ... if self.check_if_answer_reached(): self.answer_reached = True if self.stream_prefix: for t in self.last_tokens: sys.stdout.write(t) sys.stdout.flush() return # ... if yes, then print tokens from now on if self.answer_reached: sys.stdout.write(token) sys.stdout.flush()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_stdout_final_only.html
89d0039e3ef1-0
Source code for langchain.callbacks.arize_callback from datetime import datetime from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import import_pandas from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]class ArizeCallbackHandler(BaseCallbackHandler): """Callback Handler that logs to Arize.""" def __init__( self, model_id: Optional[str] = None, model_version: Optional[str] = None, SPACE_KEY: Optional[str] = None, API_KEY: Optional[str] = None, ) -> None: """Initialize callback handler.""" super().__init__() self.model_id = model_id self.model_version = model_version self.space_key = SPACE_KEY self.api_key = API_KEY self.prompt_records: List[str] = [] self.response_records: List[str] = [] self.prediction_ids: List[str] = [] self.pred_timestamps: List[int] = [] self.response_embeddings: List[float] = [] self.prompt_embeddings: List[float] = [] self.prompt_tokens = 0 self.completion_tokens = 0 self.total_tokens = 0 self.step = 0 from arize.pandas.embeddings import EmbeddingGenerator, UseCases from arize.pandas.logger import Client self.generator = EmbeddingGenerator.from_use_case( use_case=UseCases.NLP.SEQUENCE_CLASSIFICATION, model_name="distilbert-base-uncased", tokenizer_max_length=512, batch_size=256, ) self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html
89d0039e3ef1-1
self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY) if SPACE_KEY == "SPACE_KEY" or API_KEY == "API_KEY": raise ValueError("❌ CHANGE SPACE AND API KEYS") else: print("✅ Arize client setup done! Now you can start using Arize!") [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: for prompt in prompts: self.prompt_records.append(prompt.replace("\n", "")) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: pd = import_pandas() from arize.utils.types import ( EmbeddingColumnNames, Environments, ModelTypes, Schema, ) # Safe check if 'llm_output' and 'token_usage' exist if response.llm_output and "token_usage" in response.llm_output: self.prompt_tokens = response.llm_output["token_usage"].get( "prompt_tokens", 0 ) self.total_tokens = response.llm_output["token_usage"].get( "total_tokens", 0 ) self.completion_tokens = response.llm_output["token_usage"].get( "completion_tokens", 0 ) else: self.prompt_tokens = ( self.total_tokens ) = self.completion_tokens = 0 # assign default value for generations in response.generations: for generation in generations:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html
89d0039e3ef1-2
for generations in response.generations: for generation in generations: prompt = self.prompt_records[self.step] self.step = self.step + 1 prompt_embedding = pd.Series( self.generator.generate_embeddings( text_col=pd.Series(prompt.replace("\n", " ")) ).reset_index(drop=True) ) # Assigning text to response_text instead of response response_text = generation.text.replace("\n", " ") response_embedding = pd.Series( self.generator.generate_embeddings( text_col=pd.Series(generation.text.replace("\n", " ")) ).reset_index(drop=True) ) pred_timestamp = datetime.now().timestamp() # Define the columns and data columns = [ "prediction_ts", "response", "prompt", "response_vector", "prompt_vector", "prompt_token", "completion_token", "total_token", ] data = [ [ pred_timestamp, response_text, prompt, response_embedding[0], prompt_embedding[0], self.prompt_tokens, self.total_tokens, self.completion_tokens, ] ] # Create the DataFrame df = pd.DataFrame(data, columns=columns) # Declare prompt and response columns prompt_columns = EmbeddingColumnNames( vector_column_name="prompt_vector", data_column_name="prompt" ) response_columns = EmbeddingColumnNames( vector_column_name="response_vector", data_column_name="response" ) schema = Schema( timestamp_column_name="prediction_ts", tag_column_names=[ "prompt_token", "completion_token", "total_token", ],
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html
89d0039e3ef1-3
"completion_token", "total_token", ], prompt_column_names=prompt_columns, response_column_names=response_columns, ) response_from_arize = self.arize_client.log( dataframe=df, schema=schema, model_id=self.model_id, model_version=self.model_version, model_type=ModelTypes.GENERATIVE_LLM, environment=Environments.PRODUCTION, ) if response_from_arize.status_code == 200: print("✅ Successfully logged data to Arize!") else: print(f'❌ Logging failed "{response_from_arize.text}"') [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: pass [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: pass [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing.""" pass [docs] def on_tool_end( self,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html
89d0039e3ef1-4
pass [docs] def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: pass [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: pass [docs] def on_text(self, text: str, **kwargs: Any) -> None: pass [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: pass
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/arize_callback.html
9abc2199714e-0
Source code for langchain.callbacks.clearml_callback import tempfile from copy import deepcopy from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, hash_string, import_pandas, import_spacy, import_textstat, load_json, ) from langchain.schema import AgentAction, AgentFinish, LLMResult def import_clearml() -> Any: """Import the clearml python package and raise an error if it is not installed.""" try: import clearml # noqa: F401 except ImportError: raise ImportError( "To use the clearml callback manager you need to have the `clearml` python " "package installed. Please install it with `pip install clearml`" ) return clearml [docs]class ClearMLCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs to ClearML. Parameters: job_type (str): The type of clearml task such as "inference", "testing" or "qc" project_name (str): The clearml project name tags (list): Tags to add to the task task_name (str): Name of the clearml task visualize (bool): Whether to visualize the run. complexity_metrics (bool): Whether to log complexity metrics stream_logs (bool): Whether to stream callback actions to ClearML This handler will utilize the associated callback method and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-1
and adds the response to the list of records for both the {method}_records and action. It then logs the response to the ClearML console. """ def __init__( self, task_type: Optional[str] = "inference", project_name: Optional[str] = "langchain_callback_demo", tags: Optional[Sequence] = None, task_name: Optional[str] = None, visualize: bool = False, complexity_metrics: bool = False, stream_logs: bool = False, ) -> None: """Initialize callback handler.""" clearml = import_clearml() spacy = import_spacy() super().__init__() self.task_type = task_type self.project_name = project_name self.tags = tags self.task_name = task_name self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() # Check if ClearML task already exists (e.g. in pipeline) if clearml.Task.current_task(): self.task = clearml.Task.current_task() else: self.task = clearml.Task.init( # type: ignore task_type=self.task_type, project_name=self.project_name, tags=self.tags, task_name=self.task_name, output_uri=True, ) self.logger = self.task.get_logger() warning = ( "The clearml callback is currently in beta and is subject to change " "based on updates to `langchain`. Please report any issues to " "https://github.com/allegroai/clearml/issues with the tag `langchain`." )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-2
) self.logger.report_text(warning, level=30, print_console=True) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load("en_core_web_sm") def _init_resp(self) -> Dict: return {k: None for k in self.callback_columns} [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(resp) prompt_resp["prompts"] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self.logger.report_text(prompt_resp) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp() resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.get_custom_callback_meta()) self.on_llm_token_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-3
if self.stream_logs: self.logger.report_text(resp) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.step += 1 self.llm_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.get_custom_callback_meta()) for generations in response.generations: for generation in generations: generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) generation_resp.update(self.analyze_text(generation.text)) self.on_llm_end_records.append(generation_resp) self.action_records.append(generation_resp) if self.stream_logs: self.logger.report_text(generation_resp) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) chain_input = inputs["input"] if isinstance(chain_input, str):
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-4
chain_input = inputs["input"] if isinstance(chain_input, str): input_resp = deepcopy(resp) input_resp["input"] = chain_input self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs: self.logger.report_text(input_resp) elif isinstance(chain_input, list): for inp in chain_input: input_resp = deepcopy(resp) input_resp.update(inp) self.on_chain_start_records.append(input_resp) self.action_records.append(input_resp) if self.stream_logs: self.logger.report_text(input_resp) else: raise ValueError("Unexpected data format provided!") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_chain_end", "outputs": outputs["output"]}) resp.update(self.get_custom_callback_meta()) self.on_chain_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-5
self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.on_tool_start_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_tool_end", "output": output}) resp.update(self.get_custom_callback_meta()) self.on_tool_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.step += 1 self.text_ctr += 1 resp = self._init_resp() resp.update({"action": "on_text", "text": text}) resp.update(self.get_custom_callback_meta()) self.on_text_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-6
if self.stream_logs: self.logger.report_text(resp) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.step += 1 self.agent_ends += 1 self.ends += 1 resp = self._init_resp() resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_finish_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.get_custom_callback_meta()) self.on_agent_action_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp) [docs] def analyze_text(self, text: str) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze. Returns: (dict): A dictionary containing the complexity metrics. """ resp = {} textstat = import_textstat()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-7
""" resp = {} textstat = import_textstat() spacy = import_spacy() if self.complexity_metrics: text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index( text ), "dale_chall_readability_score": textstat.dale_chall_readability_score( text ), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update(text_complexity_metrics) if self.visualize and self.nlp and self.temp_dir.name is not None: doc = self.nlp(text) dep_out = spacy.displacy.render( # type: ignore
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-8
dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) dep_output_path = Path( self.temp_dir.name, hash_string(f"dep-{text}") + ".html" ) dep_output_path.open("w", encoding="utf-8").write(dep_out) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True ) ent_output_path = Path( self.temp_dir.name, hash_string(f"ent-{text}") + ".html" ) ent_output_path.open("w", encoding="utf-8").write(ent_out) self.logger.report_media( "Dependencies Plot", text, local_path=dep_output_path ) self.logger.report_media("Entities Plot", text, local_path=ent_output_path) return resp def _create_session_analysis_df(self) -> Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records) on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = ( on_llm_start_records_df[["step", "prompts", "name"]] .dropna(axis=1) .rename({"step": "prompt_step"}, axis=1) ) complexity_metrics_columns = [] visualizations_columns: List = [] if self.complexity_metrics: complexity_metrics_columns = [ "flesch_reading_ease", "flesch_kincaid_grade",
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-9
"flesch_kincaid_grade", "smog_index", "coleman_liau_index", "automated_readability_index", "dale_chall_readability_score", "difficult_words", "linsear_write_formula", "gunning_fog", "text_standard", "fernandez_huerta", "szigriszt_pazos", "gutierrez_polini", "crawford", "gulpease_index", "osman", ] llm_outputs_df = ( on_llm_end_records_df[ [ "step", "text", "token_usage_total_tokens", "token_usage_prompt_tokens", "token_usage_completion_tokens", ] + complexity_metrics_columns + visualizations_columns ] .dropna(axis=1) .rename({"step": "output_step", "text": "output"}, axis=1) ) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) # session_analysis_df["chat_html"] = session_analysis_df[ # ["prompts", "output"] # ].apply( # lambda row: construct_html_from_prompt_and_generation( # row["prompts"], row["output"] # ), # axis=1, # ) return session_analysis_df [docs] def flush_tracker( self, name: Optional[str] = None, langchain_asset: Any = None, finish: bool = False, ) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-10
finish: bool = False, ) -> None: """Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the preformed session so far so it is identifyable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: None """ pd = import_pandas() clearml = import_clearml() # Log the action records self.logger.report_table( "Action Records", name, table_plot=pd.DataFrame(self.action_records) ) # Session analysis session_analysis_df = self._create_session_analysis_df() self.logger.report_table( "Session Analysis", name, table_plot=session_analysis_df ) if self.stream_logs: self.logger.report_text( { "action_records": pd.DataFrame(self.action_records), "session_analysis": session_analysis_df, } ) if langchain_asset: langchain_asset_path = Path(self.temp_dir.name, "model.json") try: langchain_asset.save(langchain_asset_path) # Create output model and connect it to the task output_model = clearml.OutputModel( task=self.task, config_text=load_json(langchain_asset_path) ) output_model.update_weights( weights_filename=str(langchain_asset_path), auto_delete_file=False, target_filename=name, ) except ValueError: langchain_asset.save_agent(langchain_asset_path) output_model = clearml.OutputModel( task=self.task, config_text=load_json(langchain_asset_path) ) output_model.update_weights(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
9abc2199714e-11
) output_model.update_weights( weights_filename=str(langchain_asset_path), auto_delete_file=False, target_filename=name, ) except NotImplementedError as e: print("Could not save model.") print(repr(e)) pass # Cleanup after adding everything to ClearML self.task.flush(wait_for_uploads=True) self.temp_dir.cleanup() self.temp_dir = tempfile.TemporaryDirectory() self.reset_callback_meta() if finish: self.task.close()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/clearml_callback.html
2537e6d04782-0
Source code for langchain.callbacks.mlflow_callback import random import string import tempfile import traceback from copy import deepcopy from pathlib import Path from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, hash_string, import_pandas, import_spacy, import_textstat, ) from langchain.schema import AgentAction, AgentFinish, LLMResult from langchain.utils import get_from_dict_or_env def import_mlflow() -> Any: """Import the mlflow python package and raise an error if it is not installed.""" try: import mlflow except ImportError: raise ImportError( "To use the mlflow callback manager you need to have the `mlflow` python " "package installed. Please install it with `pip install mlflow>=2.3.0`" ) return mlflow def analyze_text( text: str, nlp: Any = None, ) -> dict: """Analyze text using textstat and spacy. Parameters: text (str): The text to analyze. nlp (spacy.lang): The spacy language model to use for visualization. Returns: (dict): A dictionary containing the complexity metrics and visualization files serialized to HTML string. """ resp: Dict[str, Any] = {} textstat = import_textstat() spacy = import_spacy() text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text),
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-1
"flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text), "automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), # "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } resp.update({"text_complexity_metrics": text_complexity_metrics}) resp.update(text_complexity_metrics) if nlp is not None: doc = nlp(text) dep_out = spacy.displacy.render( # type: ignore doc, style="dep", jupyter=False, page=True ) ent_out = spacy.displacy.render( # type: ignore doc, style="ent", jupyter=False, page=True )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-2
doc, style="ent", jupyter=False, page=True ) text_visualizations = { "dependency_tree": dep_out, "entities": ent_out, } resp.update(text_visualizations) return resp def construct_html_from_prompt_and_generation(prompt: str, generation: str) -> Any: """Construct an html element from a prompt and a generation. Parameters: prompt (str): The prompt. generation (str): The generation. Returns: (str): The html string.""" formatted_prompt = prompt.replace("\n", "<br>") formatted_generation = generation.replace("\n", "<br>") return f""" <p style="color:black;">{formatted_prompt}:</p> <blockquote> <p style="color:green;"> {formatted_generation} </p> </blockquote> """ class MlflowLogger: """Callback Handler that logs metrics and artifacts to mlflow server. Parameters: name (str): Name of the run. experiment (str): Name of the experiment. tags (dict): Tags to be attached for the run. tracking_uri (str): MLflow tracking server uri. This handler implements the helper functions to initialize, log metrics and artifacts to the mlflow server. """ def __init__(self, **kwargs: Any): self.mlflow = import_mlflow() tracking_uri = get_from_dict_or_env( kwargs, "tracking_uri", "MLFLOW_TRACKING_URI", "" ) self.mlflow.set_tracking_uri(tracking_uri) # User can set other env variables described here
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-3
# User can set other env variables described here # > https://www.mlflow.org/docs/latest/tracking.html#logging-to-a-tracking-server experiment_name = get_from_dict_or_env( kwargs, "experiment_name", "MLFLOW_EXPERIMENT_NAME" ) self.mlf_exp = self.mlflow.get_experiment_by_name(experiment_name) if self.mlf_exp is not None: self.mlf_expid = self.mlf_exp.experiment_id else: self.mlf_expid = self.mlflow.create_experiment(experiment_name) self.start_run(kwargs["run_name"], kwargs["run_tags"]) def start_run(self, name: str, tags: Dict[str, str]) -> None: """To start a new run, auto generates the random suffix for name""" if name.endswith("-%"): rname = "".join(random.choices(string.ascii_uppercase + string.digits, k=7)) name = name.replace("%", rname) self.run = self.mlflow.MlflowClient().create_run( self.mlf_expid, run_name=name, tags=tags ) def finish_run(self) -> None: """To finish the run.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.end_run() def metric(self, key: str, value: float) -> None: """To log metric to mlflow server.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_metric(key, value) def metrics(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-4
): self.mlflow.log_metric(key, value) def metrics( self, data: Union[Dict[str, float], Dict[str, int]], step: Optional[int] = 0 ) -> None: """To log all metrics in the input dict.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_metrics(data) def jsonf(self, data: Dict[str, Any], filename: str) -> None: """To log the input data as json file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_dict(data, f"{filename}.json") def table(self, name: str, dataframe) -> None: # type: ignore """To log the input pandas dataframe as a html table""" self.html(dataframe.to_html(), f"table_{name}") def html(self, html: str, filename: str) -> None: """To log the input html string as html file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_text(html, f"{filename}.html") def text(self, text: str, filename: str) -> None: """To log the input text as text file artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_text(text, f"{filename}.txt") def artifact(self, path: str) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-5
def artifact(self, path: str) -> None: """To upload the file from given path as artifact.""" with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.log_artifact(path) def langchain_artifact(self, chain: Any) -> None: with self.mlflow.start_run( run_id=self.run.info.run_id, experiment_id=self.mlf_expid ): self.mlflow.langchain.log_model(chain, "langchain-model") [docs]class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs metrics and artifacts to mlflow server. Parameters: name (str): Name of the run. experiment (str): Name of the experiment. tags (dict): Tags to be attached for the run. tracking_uri (str): MLflow tracking server uri. This handler will utilize the associated callback method called and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and action. It then logs the response to mlflow server. """ def __init__( self, name: Optional[str] = "langchainrun-%", experiment: Optional[str] = "langchain", tags: Optional[Dict] = {}, tracking_uri: Optional[str] = None, ) -> None: """Initialize callback handler.""" import_pandas() import_textstat() import_mlflow() spacy = import_spacy() super().__init__() self.name = name self.experiment = experiment
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-6
super().__init__() self.name = name self.experiment = experiment self.tags = tags self.tracking_uri = tracking_uri self.temp_dir = tempfile.TemporaryDirectory() self.mlflg = MlflowLogger( tracking_uri=self.tracking_uri, experiment_name=self.experiment, run_name=self.name, run_tags=self.tags, ) self.action_records: list = [] self.nlp = spacy.load("en_core_web_sm") self.metrics = { "step": 0, "starts": 0, "ends": 0, "errors": 0, "text_ctr": 0, "chain_starts": 0, "chain_ends": 0, "llm_starts": 0, "llm_ends": 0, "llm_streams": 0, "tool_starts": 0, "tool_ends": 0, "agent_ends": 0, } self.records: Dict[str, Any] = { "on_llm_start_records": [], "on_llm_token_records": [], "on_llm_end_records": [], "on_chain_start_records": [], "on_chain_end_records": [], "on_tool_start_records": [], "on_tool_end_records": [], "on_text_records": [], "on_agent_finish_records": [], "on_agent_action_records": [], "action_records": [], } def _reset(self) -> None: for k, v in self.metrics.items(): self.metrics[k] = 0 for k, v in self.records.items():
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-7
self.metrics[k] = 0 for k, v in self.records.items(): self.records[k] = [] [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.metrics["step"] += 1 self.metrics["llm_starts"] += 1 self.metrics["starts"] += 1 llm_starts = self.metrics["llm_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_start"}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) for idx, prompt in enumerate(prompts): prompt_resp = deepcopy(resp) prompt_resp["prompt"] = prompt self.records["on_llm_start_records"].append(prompt_resp) self.records["action_records"].append(prompt_resp) self.mlflg.jsonf(prompt_resp, f"llm_start_{llm_starts}_prompt_{idx}") [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.metrics["step"] += 1 self.metrics["llm_streams"] += 1 llm_streams = self.metrics["llm_streams"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_llm_token_records"].append(resp)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-8
self.records["on_llm_token_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"llm_new_tokens_{llm_streams}") [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.metrics["step"] += 1 self.metrics["llm_ends"] += 1 self.metrics["ends"] += 1 llm_ends = self.metrics["llm_ends"] resp: Dict[str, Any] = {} resp.update({"action": "on_llm_end"}) resp.update(flatten_dict(response.llm_output or {})) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) for generations in response.generations: for idx, generation in enumerate(generations): generation_resp = deepcopy(resp) generation_resp.update(flatten_dict(generation.dict())) generation_resp.update( analyze_text( generation.text, nlp=self.nlp, ) ) complexity_metrics: Dict[str, float] = generation_resp.pop("text_complexity_metrics") # type: ignore # noqa: E501 self.mlflg.metrics( complexity_metrics, step=self.metrics["step"], ) self.records["on_llm_end_records"].append(generation_resp) self.records["action_records"].append(generation_resp) self.mlflg.jsonf(resp, f"llm_end_{llm_ends}_generation_{idx}") dependency_tree = generation_resp["dependency_tree"] entities = generation_resp["entities"]
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-9
dependency_tree = generation_resp["dependency_tree"] entities = generation_resp["entities"] self.mlflg.html(dependency_tree, "dep-" + hash_string(generation.text)) self.mlflg.html(entities, "ent-" + hash_string(generation.text)) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.metrics["step"] += 1 self.metrics["chain_starts"] += 1 self.metrics["starts"] += 1 chain_starts = self.metrics["chain_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()]) input_resp = deepcopy(resp) input_resp["inputs"] = chain_input self.records["on_chain_start_records"].append(input_resp) self.records["action_records"].append(input_resp) self.mlflg.jsonf(input_resp, f"chain_start_{chain_starts}") [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.metrics["step"] += 1
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-10
"""Run when chain ends running.""" self.metrics["step"] += 1 self.metrics["chain_ends"] += 1 self.metrics["ends"] += 1 chain_ends = self.metrics["chain_ends"] resp: Dict[str, Any] = {} chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()]) resp.update({"action": "on_chain_end", "outputs": chain_output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_chain_end_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"chain_end_{chain_ends}") [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.metrics["step"] += 1 self.metrics["tool_starts"] += 1 self.metrics["starts"] += 1 tool_starts = self.metrics["tool_starts"] resp: Dict[str, Any] = {} resp.update({"action": "on_tool_start", "input_str": input_str}) resp.update(flatten_dict(serialized)) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_tool_start_records"].append(resp)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-11
self.records["on_tool_start_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"tool_start_{tool_starts}") [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.metrics["step"] += 1 self.metrics["tool_ends"] += 1 self.metrics["ends"] += 1 tool_ends = self.metrics["tool_ends"] resp: Dict[str, Any] = {} resp.update({"action": "on_tool_end", "output": output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_tool_end_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"tool_end_{tool_ends}") [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.metrics["step"] += 1 self.metrics["errors"] += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.metrics["step"] += 1 self.metrics["text_ctr"] += 1 text_ctr = self.metrics["text_ctr"] resp: Dict[str, Any] = {} resp.update({"action": "on_text", "text": text}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_text_records"].append(resp)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-12
self.records["on_text_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"on_text_{text_ctr}") [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.metrics["step"] += 1 self.metrics["agent_ends"] += 1 self.metrics["ends"] += 1 agent_ends = self.metrics["agent_ends"] resp: Dict[str, Any] = {} resp.update( { "action": "on_agent_finish", "output": finish.return_values["output"], "log": finish.log, } ) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_agent_finish_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"agent_finish_{agent_ends}") [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" self.metrics["step"] += 1 self.metrics["tool_starts"] += 1 self.metrics["starts"] += 1 tool_starts = self.metrics["tool_starts"] resp: Dict[str, Any] = {} resp.update( { "action": "on_agent_action", "tool": action.tool, "tool_input": action.tool_input, "log": action.log, } ) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics["step"])
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-13
self.mlflg.metrics(self.metrics, step=self.metrics["step"]) self.records["on_agent_action_records"].append(resp) self.records["action_records"].append(resp) self.mlflg.jsonf(resp, f"agent_action_{tool_starts}") def _create_session_analysis_df(self) -> Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.records["on_llm_start_records"]) on_llm_end_records_df = pd.DataFrame(self.records["on_llm_end_records"]) llm_input_prompts_df = ( on_llm_start_records_df[["step", "prompt", "name"]] .dropna(axis=1) .rename({"step": "prompt_step"}, axis=1) ) complexity_metrics_columns = [] visualizations_columns = [] complexity_metrics_columns = [ "flesch_reading_ease", "flesch_kincaid_grade", "smog_index", "coleman_liau_index", "automated_readability_index", "dale_chall_readability_score", "difficult_words", "linsear_write_formula", "gunning_fog", # "text_standard", "fernandez_huerta", "szigriszt_pazos", "gutierrez_polini", "crawford", "gulpease_index", "osman", ] visualizations_columns = ["dependency_tree", "entities"] llm_outputs_df = ( on_llm_end_records_df[ [ "step", "text",
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-14
[ "step", "text", "token_usage_total_tokens", "token_usage_prompt_tokens", "token_usage_completion_tokens", ] + complexity_metrics_columns + visualizations_columns ] .dropna(axis=1) .rename({"step": "output_step", "text": "output"}, axis=1) ) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) session_analysis_df["chat_html"] = session_analysis_df[ ["prompt", "output"] ].apply( lambda row: construct_html_from_prompt_and_generation( row["prompt"], row["output"] ), axis=1, ) return session_analysis_df [docs] def flush_tracker(self, langchain_asset: Any = None, finish: bool = False) -> None: pd = import_pandas() self.mlflg.table("action_records", pd.DataFrame(self.records["action_records"])) session_analysis_df = self._create_session_analysis_df() chat_html = session_analysis_df.pop("chat_html") chat_html = chat_html.replace("\n", "", regex=True) self.mlflg.table("session_analysis", pd.DataFrame(session_analysis_df)) self.mlflg.html("".join(chat_html.tolist()), "chat_html") if langchain_asset: # To avoid circular import error # mlflow only supports LLMChain asset if "langchain.chains.llm.LLMChain" in str(type(langchain_asset)): self.mlflg.langchain_artifact(langchain_asset) else: langchain_asset_path = str(Path(self.temp_dir.name, "model.json")) try:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
2537e6d04782-15
try: langchain_asset.save(langchain_asset_path) self.mlflg.artifact(langchain_asset_path) except ValueError: try: langchain_asset.save_agent(langchain_asset_path) self.mlflg.artifact(langchain_asset_path) except AttributeError: print("Could not save model.") traceback.print_exc() pass except NotImplementedError: print("Could not save model.") traceback.print_exc() pass except NotImplementedError: print("Could not save model.") traceback.print_exc() pass if finish: self.mlflg.finish_run() self._reset()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/mlflow_callback.html
7c11b81bf6c5-0
Source code for langchain.callbacks.infino_callback import time from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult def import_infino() -> Any: try: from infinopy import InfinoClient except ImportError: raise ImportError( "To use the Infino callbacks manager you need to have the" " `infinopy` python package installed." "Please install it with `pip install infinopy`" ) return InfinoClient() [docs]class InfinoCallbackHandler(BaseCallbackHandler): """Callback Handler that logs to Infino.""" def __init__( self, model_id: Optional[str] = None, model_version: Optional[str] = None, verbose: bool = False, ) -> None: # Set Infino client self.client = import_infino() self.model_id = model_id self.model_version = model_version self.verbose = verbose def _send_to_infino( self, key: str, value: Any, is_ts: bool = True, ) -> None: """Send the key-value to Infino. Parameters: key (str): the key to send to Infino. value (Any): the value to send to Infino. is_ts (bool): if True, the value is part of a time series, else it is sent as a log message. """ payload = { "date": int(time.time()), key: value, "labels": { "model_id": self.model_id,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/infino_callback.html
7c11b81bf6c5-1
"labels": { "model_id": self.model_id, "model_version": self.model_version, }, } if self.verbose: print(f"Tracking {key} with Infino: {payload}") # Append to Infino time series only if is_ts is True, otherwise # append to Infino log. if is_ts: self.client.append_ts(payload) else: self.client.append_log(payload) [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any, ) -> None: """Log the prompts to Infino, and set start time and error flag.""" for prompt in prompts: self._send_to_infino("prompt", prompt, is_ts=False) # Set the error flag to indicate no error (this will get overridden # in on_llm_error if an error occurs). self.error = 0 # Set the start time (so that we can calculate the request # duration in on_llm_end). self.start_time = time.time() [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Do nothing when a new token is generated.""" pass [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Log the latency, error, token usage, and response to Infino.""" # Calculate and track the request latency. self.end_time = time.time() duration = self.end_time - self.start_time self._send_to_infino("latency", duration) # Track success or error flag.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/infino_callback.html
7c11b81bf6c5-2
# Track success or error flag. self._send_to_infino("error", self.error) # Track token usage. if (response.llm_output is not None) and isinstance(response.llm_output, Dict): token_usage = response.llm_output["token_usage"] if token_usage is not None: prompt_tokens = token_usage["prompt_tokens"] total_tokens = token_usage["total_tokens"] completion_tokens = token_usage["completion_tokens"] self._send_to_infino("prompt_tokens", prompt_tokens) self._send_to_infino("total_tokens", total_tokens) self._send_to_infino("completion_tokens", completion_tokens) # Track prompt response. for generations in response.generations: for generation in generations: self._send_to_infino("prompt_response", generation.text, is_ts=False) [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Set the error flag.""" self.error = 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Do nothing when LLM chain starts.""" pass [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Do nothing when LLM chain ends.""" pass [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Need to log the error.""" pass [docs] def on_tool_start( self, serialized: Dict[str, Any],
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/infino_callback.html
7c11b81bf6c5-3
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any, ) -> None: """Do nothing when tool starts.""" pass [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Do nothing when agent takes a specific action.""" pass [docs] def on_tool_end( self, output: str, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """Do nothing when tool ends.""" pass [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Do nothing when tool outputs an error.""" pass [docs] def on_text(self, text: str, **kwargs: Any) -> None: """Do nothing.""" pass [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Do nothing.""" pass
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/infino_callback.html
9dc06427bf9c-0
Source code for langchain.callbacks.comet_ml_callback import tempfile from copy import deepcopy from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Sequence, Union import langchain from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.utils import ( BaseMetadataCallbackHandler, flatten_dict, import_pandas, import_spacy, import_textstat, ) from langchain.schema import AgentAction, AgentFinish, Generation, LLMResult LANGCHAIN_MODEL_NAME = "langchain-model" def import_comet_ml() -> Any: try: import comet_ml # noqa: F401 except ImportError: raise ImportError( "To use the comet_ml callback manager you need to have the " "`comet_ml` python package installed. Please install it with" " `pip install comet_ml`" ) return comet_ml def _get_experiment( workspace: Optional[str] = None, project_name: Optional[str] = None ) -> Any: comet_ml = import_comet_ml() experiment = comet_ml.Experiment( # type: ignore workspace=workspace, project_name=project_name, ) return experiment def _fetch_text_complexity_metrics(text: str) -> dict: textstat = import_textstat() text_complexity_metrics = { "flesch_reading_ease": textstat.flesch_reading_ease(text), "flesch_kincaid_grade": textstat.flesch_kincaid_grade(text), "smog_index": textstat.smog_index(text), "coleman_liau_index": textstat.coleman_liau_index(text),
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-1
"automated_readability_index": textstat.automated_readability_index(text), "dale_chall_readability_score": textstat.dale_chall_readability_score(text), "difficult_words": textstat.difficult_words(text), "linsear_write_formula": textstat.linsear_write_formula(text), "gunning_fog": textstat.gunning_fog(text), "text_standard": textstat.text_standard(text), "fernandez_huerta": textstat.fernandez_huerta(text), "szigriszt_pazos": textstat.szigriszt_pazos(text), "gutierrez_polini": textstat.gutierrez_polini(text), "crawford": textstat.crawford(text), "gulpease_index": textstat.gulpease_index(text), "osman": textstat.osman(text), } return text_complexity_metrics def _summarize_metrics_for_generated_outputs(metrics: Sequence) -> dict: pd = import_pandas() metrics_df = pd.DataFrame(metrics) metrics_summary = metrics_df.describe() return metrics_summary.to_dict() [docs]class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): """Callback Handler that logs to Comet. Parameters: job_type (str): The type of comet_ml task such as "inference", "testing" or "qc" project_name (str): The comet_ml project name tags (list): Tags to add to the task task_name (str): Name of the comet_ml task visualize (bool): Whether to visualize the run. complexity_metrics (bool): Whether to log complexity metrics stream_logs (bool): Whether to stream callback actions to Comet
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-2
stream_logs (bool): Whether to stream callback actions to Comet This handler will utilize the associated callback method and formats the input of each callback function with metadata regarding the state of LLM run, and adds the response to the list of records for both the {method}_records and action. It then logs the response to Comet. """ def __init__( self, task_type: Optional[str] = "inference", workspace: Optional[str] = None, project_name: Optional[str] = None, tags: Optional[Sequence] = None, name: Optional[str] = None, visualizations: Optional[List[str]] = None, complexity_metrics: bool = False, custom_metrics: Optional[Callable] = None, stream_logs: bool = True, ) -> None: """Initialize callback handler.""" self.comet_ml = import_comet_ml() super().__init__() self.task_type = task_type self.workspace = workspace self.project_name = project_name self.tags = tags self.visualizations = visualizations self.complexity_metrics = complexity_metrics self.custom_metrics = custom_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() self.experiment = _get_experiment(workspace, project_name) self.experiment.log_other("Created from", "langchain") if tags: self.experiment.add_tags(tags) self.name = name if self.name: self.experiment.set_name(self.name) warning = ( "The comet_ml callback is currently in beta and is subject to change " "based on updates to `langchain`. Please report any issues to "
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-3
"based on updates to `langchain`. Please report any issues to " "https://github.com/comet-ml/issue-tracking/issues with the tag " "`langchain`." ) self.comet_ml.LOGGER.warning(warning) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics if self.visualizations: spacy = import_spacy() self.nlp = spacy.load("en_core_web_sm") else: self.nlp = None def _init_resp(self) -> Dict: return {k: None for k in self.callback_columns} [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts.""" self.step += 1 self.llm_starts += 1 self.starts += 1 metadata = self._init_resp() metadata.update({"action": "on_llm_start"}) metadata.update(flatten_dict(serialized)) metadata.update(self.get_custom_callback_meta()) for prompt in prompts: prompt_resp = deepcopy(metadata) prompt_resp["prompts"] = prompt self.on_llm_start_records.append(prompt_resp) self.action_records.append(prompt_resp) if self.stream_logs: self._log_stream(prompt, metadata, self.step) [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-4
self.llm_streams += 1 resp = self._init_resp() resp.update({"action": "on_llm_new_token", "token": token}) resp.update(self.get_custom_callback_meta()) self.action_records.append(resp) [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" self.step += 1 self.llm_ends += 1 self.ends += 1 metadata = self._init_resp() metadata.update({"action": "on_llm_end"}) metadata.update(flatten_dict(response.llm_output or {})) metadata.update(self.get_custom_callback_meta()) output_complexity_metrics = [] output_custom_metrics = [] for prompt_idx, generations in enumerate(response.generations): for gen_idx, generation in enumerate(generations): text = generation.text generation_resp = deepcopy(metadata) generation_resp.update(flatten_dict(generation.dict())) complexity_metrics = self._get_complexity_metrics(text) if complexity_metrics: output_complexity_metrics.append(complexity_metrics) generation_resp.update(complexity_metrics) custom_metrics = self._get_custom_metrics( generation, prompt_idx, gen_idx ) if custom_metrics: output_custom_metrics.append(custom_metrics) generation_resp.update(custom_metrics) if self.stream_logs: self._log_stream(text, metadata, self.step) self.action_records.append(generation_resp) self.on_llm_end_records.append(generation_resp) self._log_text_metrics(output_complexity_metrics, step=self.step) self._log_text_metrics(output_custom_metrics, step=self.step) [docs] def on_llm_error(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-5
[docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" self.step += 1 self.errors += 1 [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" self.step += 1 self.chain_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_chain_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) for chain_input_key, chain_input_val in inputs.items(): if isinstance(chain_input_val, str): input_resp = deepcopy(resp) if self.stream_logs: self._log_stream(chain_input_val, resp, self.step) input_resp.update({chain_input_key: chain_input_val}) self.action_records.append(input_resp) else: self.comet_ml.LOGGER.warning( f"Unexpected data format provided! " f"Input Value for {chain_input_key} will not be logged" ) [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_chain_end"}) resp.update(self.get_custom_callback_meta()) for chain_output_key, chain_output_val in outputs.items(): if isinstance(chain_output_val, str):
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-6
if isinstance(chain_output_val, str): output_resp = deepcopy(resp) if self.stream_logs: self._log_stream(chain_output_val, resp, self.step) output_resp.update({chain_output_key: chain_output_val}) self.action_records.append(output_resp) else: self.comet_ml.LOGGER.warning( f"Unexpected data format provided! " f"Output Value for {chain_output_key} will not be logged" ) [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors.""" self.step += 1 self.errors += 1 [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({"action": "on_tool_start"}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(input_str, resp, self.step) resp.update({"input_str": input_str}) self.action_records.append(resp) [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" self.step += 1 self.tool_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({"action": "on_tool_end"}) resp.update(self.get_custom_callback_meta())
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-7
resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(output, resp, self.step) resp.update({"output": output}) self.action_records.append(resp) [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" self.step += 1 self.errors += 1 [docs] def on_text(self, text: str, **kwargs: Any) -> None: """ Run when agent is ending. """ self.step += 1 self.text_ctr += 1 resp = self._init_resp() resp.update({"action": "on_text"}) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(text, resp, self.step) resp.update({"text": text}) self.action_records.append(resp) [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run when agent ends running.""" self.step += 1 self.agent_ends += 1 self.ends += 1 resp = self._init_resp() output = finish.return_values["output"] log = finish.log resp.update({"action": "on_agent_finish", "log": log}) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(output, resp, self.step) resp.update({"output": output}) self.action_records.append(resp) [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-8
"""Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 tool = action.tool tool_input = str(action.tool_input) log = action.log resp = self._init_resp() resp.update({"action": "on_agent_action", "log": log, "tool": tool}) resp.update(self.get_custom_callback_meta()) if self.stream_logs: self._log_stream(tool_input, resp, self.step) resp.update({"tool_input": tool_input}) self.action_records.append(resp) def _get_complexity_metrics(self, text: str) -> dict: """Compute text complexity metrics using textstat. Parameters: text (str): The text to analyze. Returns: (dict): A dictionary containing the complexity metrics. """ resp = {} if self.complexity_metrics: text_complexity_metrics = _fetch_text_complexity_metrics(text) resp.update(text_complexity_metrics) return resp def _get_custom_metrics( self, generation: Generation, prompt_idx: int, gen_idx: int ) -> dict: """Compute Custom Metrics for an LLM Generated Output Args: generation (LLMResult): Output generation from an LLM prompt_idx (int): List index of the input prompt gen_idx (int): List index of the generated output Returns: dict: A dictionary containing the custom metrics. """ resp = {} if self.custom_metrics: custom_metrics = self.custom_metrics(generation, prompt_idx, gen_idx) resp.update(custom_metrics) return resp [docs] def flush_tracker( self,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-9
return resp [docs] def flush_tracker( self, langchain_asset: Any = None, task_type: Optional[str] = "inference", workspace: Optional[str] = None, project_name: Optional[str] = "comet-langchain-demo", tags: Optional[Sequence] = None, name: Optional[str] = None, visualizations: Optional[List[str]] = None, complexity_metrics: bool = False, custom_metrics: Optional[Callable] = None, finish: bool = False, reset: bool = False, ) -> None: """Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the preformed session so far so it is identifyable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: None """ self._log_session(langchain_asset) if langchain_asset: try: self._log_model(langchain_asset) except Exception: self.comet_ml.LOGGER.error( "Failed to export agent or LLM to Comet", exc_info=True, extra={"show_traceback": True}, ) if finish: self.experiment.end() if reset: self._reset( task_type, workspace, project_name, tags, name, visualizations, complexity_metrics, custom_metrics, ) def _log_stream(self, prompt: str, metadata: dict, step: int) -> None: self.experiment.log_text(prompt, metadata=metadata, step=step)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-10
self.experiment.log_text(prompt, metadata=metadata, step=step) def _log_model(self, langchain_asset: Any) -> None: model_parameters = self._get_llm_parameters(langchain_asset) self.experiment.log_parameters(model_parameters, prefix="model") langchain_asset_path = Path(self.temp_dir.name, "model.json") model_name = self.name if self.name else LANGCHAIN_MODEL_NAME try: if hasattr(langchain_asset, "save"): langchain_asset.save(langchain_asset_path) self.experiment.log_model(model_name, str(langchain_asset_path)) except (ValueError, AttributeError, NotImplementedError) as e: if hasattr(langchain_asset, "save_agent"): langchain_asset.save_agent(langchain_asset_path) self.experiment.log_model(model_name, str(langchain_asset_path)) else: self.comet_ml.LOGGER.error( f"{e}" " Could not save Langchain Asset " f"for {langchain_asset.__class__.__name__}" ) def _log_session(self, langchain_asset: Optional[Any] = None) -> None: try: llm_session_df = self._create_session_analysis_dataframe(langchain_asset) # Log the cleaned dataframe as a table self.experiment.log_table("langchain-llm-session.csv", llm_session_df) except Exception: self.comet_ml.LOGGER.warning( "Failed to log session data to Comet", exc_info=True, extra={"show_traceback": True}, ) try: metadata = {"langchain_version": str(langchain.__version__)} # Log the langchain low-level records as a JSON file directly
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-11
# Log the langchain low-level records as a JSON file directly self.experiment.log_asset_data( self.action_records, "langchain-action_records.json", metadata=metadata ) except Exception: self.comet_ml.LOGGER.warning( "Failed to log session data to Comet", exc_info=True, extra={"show_traceback": True}, ) try: self._log_visualizations(llm_session_df) except Exception: self.comet_ml.LOGGER.warning( "Failed to log visualizations to Comet", exc_info=True, extra={"show_traceback": True}, ) def _log_text_metrics(self, metrics: Sequence[dict], step: int) -> None: if not metrics: return metrics_summary = _summarize_metrics_for_generated_outputs(metrics) for key, value in metrics_summary.items(): self.experiment.log_metrics(value, prefix=key, step=step) def _log_visualizations(self, session_df: Any) -> None: if not (self.visualizations and self.nlp): return spacy = import_spacy() prompts = session_df["prompts"].tolist() outputs = session_df["text"].tolist() for idx, (prompt, output) in enumerate(zip(prompts, outputs)): doc = self.nlp(output) sentence_spans = list(doc.sents) for visualization in self.visualizations: try: html = spacy.displacy.render( sentence_spans, style=visualization, options={"compact": True}, jupyter=False, page=True, ) self.experiment.log_asset_data( html,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-12
) self.experiment.log_asset_data( html, name=f"langchain-viz-{visualization}-{idx}.html", metadata={"prompt": prompt}, step=idx, ) except Exception as e: self.comet_ml.LOGGER.warning( e, exc_info=True, extra={"show_traceback": True} ) return def _reset( self, task_type: Optional[str] = None, workspace: Optional[str] = None, project_name: Optional[str] = None, tags: Optional[Sequence] = None, name: Optional[str] = None, visualizations: Optional[List[str]] = None, complexity_metrics: bool = False, custom_metrics: Optional[Callable] = None, ) -> None: _task_type = task_type if task_type else self.task_type _workspace = workspace if workspace else self.workspace _project_name = project_name if project_name else self.project_name _tags = tags if tags else self.tags _name = name if name else self.name _visualizations = visualizations if visualizations else self.visualizations _complexity_metrics = ( complexity_metrics if complexity_metrics else self.complexity_metrics ) _custom_metrics = custom_metrics if custom_metrics else self.custom_metrics self.__init__( # type: ignore task_type=_task_type, workspace=_workspace, project_name=_project_name, tags=_tags, name=_name, visualizations=_visualizations, complexity_metrics=_complexity_metrics, custom_metrics=_custom_metrics, ) self.reset_callback_meta() self.temp_dir = tempfile.TemporaryDirectory()
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
9dc06427bf9c-13
self.reset_callback_meta() self.temp_dir = tempfile.TemporaryDirectory() def _create_session_analysis_dataframe(self, langchain_asset: Any = None) -> dict: pd = import_pandas() llm_parameters = self._get_llm_parameters(langchain_asset) num_generations_per_prompt = llm_parameters.get("n", 1) llm_start_records_df = pd.DataFrame(self.on_llm_start_records) # Repeat each input row based on the number of outputs generated per prompt llm_start_records_df = llm_start_records_df.loc[ llm_start_records_df.index.repeat(num_generations_per_prompt) ].reset_index(drop=True) llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_session_df = pd.merge( llm_start_records_df, llm_end_records_df, left_index=True, right_index=True, suffixes=["_llm_start", "_llm_end"], ) return llm_session_df def _get_llm_parameters(self, langchain_asset: Any = None) -> dict: if not langchain_asset: return {} try: if hasattr(langchain_asset, "agent"): llm_parameters = langchain_asset.agent.llm_chain.llm.dict() elif hasattr(langchain_asset, "llm_chain"): llm_parameters = langchain_asset.llm_chain.llm.dict() elif hasattr(langchain_asset, "llm"): llm_parameters = langchain_asset.llm.dict() else: llm_parameters = langchain_asset.dict() except Exception: return {} return llm_parameters
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/comet_ml_callback.html
4f6878992750-0
Source code for langchain.callbacks.streaming_stdout """Callback Handler streams to stdout on new llm token.""" import sys from typing import Any, Dict, List, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult [docs]class StreamingStdOutCallbackHandler(BaseCallbackHandler): """Callback handler for streaming. Only works with LLMs that support streaming.""" [docs] def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: """Run when LLM starts running.""" [docs] def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run on new LLM token. Only available when streaming is enabled.""" sys.stdout.write(token) sys.stdout.flush() [docs] def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running.""" [docs] def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when LLM errors.""" [docs] def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Run when chain starts running.""" [docs] def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running.""" [docs] def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when chain errors."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_stdout.html
4f6878992750-1
) -> None: """Run when chain errors.""" [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, **kwargs: Any ) -> None: """Run when tool starts running.""" [docs] def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run on agent action.""" pass [docs] def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running.""" [docs] def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: """Run when tool errors.""" [docs] def on_text(self, text: str, **kwargs: Any) -> None: """Run on arbitrary text.""" [docs] def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run on agent end."""
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/streaming_stdout.html
8962eee068ca-0
Source code for langchain.callbacks.human from typing import Any, Callable, Dict, Optional from uuid import UUID from langchain.callbacks.base import BaseCallbackHandler def _default_approve(_input: str) -> bool: msg = ( "Do you approve of the following input? " "Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no." ) msg += "\n\n" + _input + "\n" resp = input(msg) return resp.lower() in ("yes", "y") def _default_true(_: Dict[str, Any]) -> bool: return True class HumanRejectedException(Exception): """Exception to raise when a person manually review and rejects a value.""" [docs]class HumanApprovalCallbackHandler(BaseCallbackHandler): """Callback for manually validating values.""" raise_error: bool = True def __init__( self, approve: Callable[[Any], bool] = _default_approve, should_check: Callable[[Dict[str, Any]], bool] = _default_true, ): self._approve = approve self._should_check = should_check [docs] def on_tool_start( self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: if self._should_check(serialized) and not self._approve(input_str): raise HumanRejectedException( f"Inputs {input_str} to tool {serialized} were rejected." )
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/human.html
0e790c0dcca7-0
Source code for langchain.callbacks.manager from __future__ import annotations import asyncio import functools import logging import os import warnings from contextlib import asynccontextmanager, contextmanager from contextvars import ContextVar from typing import ( Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast, ) from uuid import UUID, uuid4 import langchain from langchain.callbacks.base import ( BaseCallbackHandler, BaseCallbackManager, ChainManagerMixin, LLMManagerMixin, RunManagerMixin, ToolManagerMixin, ) from langchain.callbacks.openai_info import OpenAICallbackHandler from langchain.callbacks.stdout import StdOutCallbackHandler from langchain.callbacks.tracers.langchain import LangChainTracer from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1 from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler from langchain.callbacks.tracers.wandb import WandbTracer from langchain.schema import ( AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string, ) logger = logging.getLogger(__name__) Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar( "openai_callback", default=None ) tracing_callback_var: ContextVar[ Optional[LangChainTracerV1] ] = ContextVar( # noqa: E501 "tracing_callback", default=None ) wandb_tracing_callback_var: ContextVar[ Optional[WandbTracer]
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-1
wandb_tracing_callback_var: ContextVar[ Optional[WandbTracer] ] = ContextVar( # noqa: E501 "tracing_wandb_callback", default=None ) tracing_v2_callback_var: ContextVar[ Optional[LangChainTracer] ] = ContextVar( # noqa: E501 "tracing_callback_v2", default=None ) def _get_debug() -> bool: return langchain.debug [docs]@contextmanager def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]: """Get the OpenAI callback handler in a context manager. which conveniently exposes token and cost information. Returns: OpenAICallbackHandler: The OpenAI callback handler. Example: >>> with get_openai_callback() as cb: ... # Use the OpenAI callback handler """ cb = OpenAICallbackHandler() openai_callback_var.set(cb) yield cb openai_callback_var.set(None) [docs]@contextmanager def tracing_enabled( session_name: str = "default", ) -> Generator[TracerSessionV1, None, None]: """Get the Deprecated LangChainTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: TracerSessionV1: The LangChainTracer session. Example: >>> with tracing_enabled() as session: ... # Use the LangChainTracer session """ cb = LangChainTracerV1() session = cast(TracerSessionV1, cb.load_session(session_name)) tracing_callback_var.set(cb)
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-2
tracing_callback_var.set(cb) yield session tracing_callback_var.set(None) [docs]@contextmanager def wandb_tracing_enabled( session_name: str = "default", ) -> Generator[None, None, None]: """Get the WandbTracer in a context manager. Args: session_name (str, optional): The name of the session. Defaults to "default". Returns: None Example: >>> with wandb_tracing_enabled() as session: ... # Use the WandbTracer session """ cb = WandbTracer() wandb_tracing_callback_var.set(cb) yield None wandb_tracing_callback_var.set(None) @contextmanager def tracing_v2_enabled( project_name: Optional[str] = None, *, example_id: Optional[Union[str, UUID]] = None, ) -> Generator[None, None, None]: """Instruct LangChain to log all runs in context to LangSmith. Args: project_name (str, optional): The name of the project. Defaults to "default". example_id (str or UUID, optional): The ID of the example. Defaults to None. Returns: None Example: >>> with tracing_v2_enabled(): ... # LangChain code will automatically be traced """ # Issue a warning that this is experimental warnings.warn( "The tracing v2 API is in development. " "This is not yet stable and may change in the future." ) if isinstance(example_id, str): example_id = UUID(example_id) cb = LangChainTracer(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-3
example_id = UUID(example_id) cb = LangChainTracer( example_id=example_id, project_name=project_name, ) tracing_v2_callback_var.set(cb) yield tracing_v2_callback_var.set(None) @contextmanager def trace_as_chain_group( group_name: str, *, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, ) -> Generator[CallbackManager, None, None]: """Get a callback manager for a chain group in a context manager. Useful for grouping different calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: CallbackManager: The callback manager for the chain group. Example: >>> with trace_as_chain_group("group_name") as manager: ... # Use the callback manager for the chain group ... llm.predict("Foo", callbacks=manager) """ cb = LangChainTracer( project_name=project_name, example_id=example_id, ) cm = CallbackManager.configure( inheritable_callbacks=[cb], inheritable_tags=tags, ) run_manager = cm.on_chain_start({"name": group_name}, {})
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-4
) run_manager = cm.on_chain_start({"name": group_name}, {}) yield run_manager.get_child() run_manager.on_chain_end({}) @asynccontextmanager async def atrace_as_chain_group( group_name: str, *, project_name: Optional[str] = None, example_id: Optional[Union[str, UUID]] = None, tags: Optional[List[str]] = None, ) -> AsyncGenerator[AsyncCallbackManager, None]: """Get an async callback manager for a chain group in a context manager. Useful for grouping different async calls together as a single run even if they aren't composed in a single chain. Args: group_name (str): The name of the chain group. project_name (str, optional): The name of the project. Defaults to None. example_id (str or UUID, optional): The ID of the example. Defaults to None. tags (List[str], optional): The inheritable tags to apply to all runs. Defaults to None. Returns: AsyncCallbackManager: The async callback manager for the chain group. Example: >>> async with atrace_as_chain_group("group_name") as manager: ... # Use the async callback manager for the chain group ... await llm.apredict("Foo", callbacks=manager) """ cb = LangChainTracer( project_name=project_name, example_id=example_id, ) cm = AsyncCallbackManager.configure( inheritable_callbacks=[cb], inheritable_tags=tags ) run_manager = await cm.on_chain_start({"name": group_name}, {}) try: yield run_manager.get_child() finally:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-5
try: yield run_manager.get_child() finally: await run_manager.on_chain_end({}) def _handle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for CallbackManager.""" message_strings: Optional[List[str]] = None for handler in handlers: try: if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): getattr(handler, event_name)(*args, **kwargs) except NotImplementedError as e: if event_name == "on_chat_model_start": if message_strings is None: message_strings = [get_buffer_string(m) for m in args[1]] _handle_event( [handler], "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event_for_handler( handler: BaseCallbackHandler, event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-6
if ignore_condition_name is None or not getattr(handler, ignore_condition_name): event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) else: if handler.run_inline: event(*args, **kwargs) else: await asyncio.get_event_loop().run_in_executor( None, functools.partial(event, *args, **kwargs) ) except NotImplementedError as e: if event_name == "on_chat_model_start": message_strings = [get_buffer_string(m) for m in args[1]] await _ahandle_event_for_handler( handler, "on_llm_start", "ignore_llm", args[0], message_strings, *args[2:], **kwargs, ) else: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) except Exception as e: logger.warning( f"Error in {handler.__class__.__name__}.{event_name} callback: {e}" ) if handler.raise_error: raise e async def _ahandle_event( handlers: List[BaseCallbackHandler], event_name: str, ignore_condition_name: Optional[str], *args: Any, **kwargs: Any, ) -> None: """Generic event handler for AsyncCallbackManager.""" for handler in [h for h in handlers if h.run_inline]: await _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) await asyncio.gather( *(
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-7
) await asyncio.gather( *( _ahandle_event_for_handler( handler, event_name, ignore_condition_name, *args, **kwargs ) for handler in handlers if not handler.run_inline ) ) BRM = TypeVar("BRM", bound="BaseRunManager") class BaseRunManager(RunManagerMixin): """Base class for run manager (a bound callback manager).""" def __init__( self, *, run_id: UUID, handlers: List[BaseCallbackHandler], inheritable_handlers: List[BaseCallbackHandler], parent_run_id: Optional[UUID] = None, tags: List[str], inheritable_tags: List[str], ) -> None: """Initialize the run manager. Args: run_id (UUID): The ID of the run. handlers (List[BaseCallbackHandler]): The list of handlers. inheritable_handlers (List[BaseCallbackHandler]): The list of inheritable handlers. parent_run_id (UUID, optional): The ID of the parent run. Defaults to None. tags (List[str]): The list of tags. inheritable_tags (List[str]): The list of inheritable tags. """ self.run_id = run_id self.handlers = handlers self.inheritable_handlers = inheritable_handlers self.tags = tags self.inheritable_tags = inheritable_tags self.parent_run_id = parent_run_id @classmethod def get_noop_manager(cls: Type[BRM]) -> BRM: """Return a manager that doesn't perform any operations. Returns: BaseRunManager: The noop manager. """
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-8
Returns: BaseRunManager: The noop manager. """ return cls( run_id=uuid4(), handlers=[], inheritable_handlers=[], tags=[], inheritable_tags=[], ) class RunManager(BaseRunManager): """Sync Run Manager.""" def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncRunManager(BaseRunManager): """Async Run Manager.""" async def on_text( self, text: str, **kwargs: Any, ) -> Any: """Run when text is received. Args: text (str): The received text. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_text", None, text, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): """Callback manager for LLM run.""" def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-9
) -> None: """Run when LLM generates a new token. Args: token (str): The new token. """ _handle_event( self.handlers, "on_llm_new_token", "ignore_llm", token=token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ _handle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): """Async callback manager for LLM run.""" async def on_llm_new_token( self, token: str, **kwargs: Any, ) -> None: """Run when LLM generates a new token. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-10
"""Run when LLM generates a new token. Args: token (str): The new token. """ await _ahandle_event( self.handlers, "on_llm_new_token", "ignore_llm", token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. Args: response (LLMResult): The LLM result. """ await _ahandle_event( self.handlers, "on_llm_end", "ignore_llm", response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_llm_error", "ignore_llm", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class CallbackManagerForChainRun(RunManager, ChainManagerMixin): """Callback manager for chain run.""" def get_child(self, tag: Optional[str] = None) -> CallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None.
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-11
Defaults to None. Returns: CallbackManager: The child callback manager. """ manager = CallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Dict[str, Any]): The outputs of the chain. """ _handle_event( self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ _handle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_action", "ignore_agent", action,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-12
"on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ _handle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin): """Async callback manager for chain run.""" def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager: """Get a child callback manager. Args: tag (str, optional): The tag for the child callback manager. Defaults to None. Returns: AsyncCallbackManager: The child callback manager. """ manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id) manager.set_handlers(self.inheritable_handlers) manager.add_tags(self.inheritable_tags) if tag is not None: manager.add_tags([tag], False) return manager async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. Args: outputs (Dict[str, Any]): The outputs of the chain. """ await _ahandle_event( self.handlers, "on_chain_end", "ignore_chain",
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html
0e790c0dcca7-13
self.handlers, "on_chain_end", "ignore_chain", outputs, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any, ) -> None: """Run when chain errors. Args: error (Exception or KeyboardInterrupt): The error. """ await _ahandle_event( self.handlers, "on_chain_error", "ignore_chain", error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: """Run when agent action is received. Args: action (AgentAction): The agent action. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_action", "ignore_agent", action, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs, ) async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run when agent finish is received. Args: finish (AgentFinish): The agent finish. Returns: Any: The result of the callback. """ await _ahandle_event( self.handlers, "on_agent_finish", "ignore_agent", finish, run_id=self.run_id, parent_run_id=self.parent_run_id,
https://api.python.langchain.com/en/latest/_modules/langchain/callbacks/manager.html