Spaces:
Running
Running
File size: 5,937 Bytes
31add3b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
"""
Implementations of different persona reasoning types.
"""
from .base import PersonaReasoning
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from typing import Dict, List, Optional, Any
from langchain_core.documents import Document
class LLMPersonaReasoning(PersonaReasoning):
"""Base implementation that uses LLM to generate responses"""
def __init__(self, config: Dict[str, Any], llm=None):
super().__init__(config)
# Use shared LLM instance if provided, otherwise create one
self.llm = llm or ChatOpenAI(model="gpt-4o-mini", temperature=0.4)
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective using LLM with persona's system prompt"""
# Build prompt with context if available
context_text = ""
if context and len(context) > 0:
context_text = "\n\nRelevant information:\n" + "\n".join([doc.page_content for doc in context])
# Build messages
messages = [
SystemMessage(content=self.system_prompt),
HumanMessage(content=f"Query: {query}{context_text}\n\nPlease provide your perspective on this query based on your unique approach.")
]
# Get response from LLM
response = self.llm.invoke(messages)
return response.content
# Specialized implementations for each persona type
class AnalyticalReasoning(LLMPersonaReasoning):
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective using analytical reasoning approach"""
# For MVP, we'll use the base implementation
# In a full implementation, add analytical-specific enhancements
return super().generate_perspective(query, context)
class ScientificReasoning(LLMPersonaReasoning):
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective using scientific reasoning approach"""
# For MVP, we'll use the base implementation
# In a full implementation, add scientific-specific enhancements
return super().generate_perspective(query, context)
class PhilosophicalReasoning(LLMPersonaReasoning):
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective using philosophical reasoning approach"""
# For MVP, we'll use the base implementation
# In a full implementation, add philosophical-specific enhancements
return super().generate_perspective(query, context)
class FactualReasoning(LLMPersonaReasoning):
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective using factual reasoning approach"""
# For MVP, we'll use the base implementation
# In a full implementation, add factual-specific enhancements
return super().generate_perspective(query, context)
class MetaphoricalReasoning(LLMPersonaReasoning):
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective using metaphorical reasoning approach"""
# For MVP, we'll use the base implementation
# In a full implementation, add metaphorical-specific enhancements
return super().generate_perspective(query, context)
class FuturisticReasoning(LLMPersonaReasoning):
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective using futuristic reasoning approach"""
# For MVP, we'll use the base implementation
# In a full implementation, add futuristic-specific enhancements
return super().generate_perspective(query, context)
# Personality implementations (second tier of two-tier system)
class HolmesReasoning(LLMPersonaReasoning):
"""Sherlock Holmes personality implementation"""
def __init__(self, config: Dict[str, Any], parent_config: Dict[str, Any], llm=None):
super().__init__(config, llm)
self.parent_config = parent_config
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective in Sherlock Holmes' style"""
# For MVP, we'll use the base implementation with Holmes' system prompt
# In a full implementation, add Holmes-specific reasoning patterns
return super().generate_perspective(query, context)
class FeynmanReasoning(LLMPersonaReasoning):
"""Richard Feynman personality implementation"""
def __init__(self, config: Dict[str, Any], parent_config: Dict[str, Any], llm=None):
super().__init__(config, llm)
self.parent_config = parent_config
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective in Richard Feynman's style"""
# For MVP, we'll use the base implementation with Feynman's system prompt
# In a full implementation, add Feynman-specific reasoning patterns
return super().generate_perspective(query, context)
class FryReasoning(LLMPersonaReasoning):
"""Hannah Fry personality implementation"""
def __init__(self, config: Dict[str, Any], parent_config: Dict[str, Any], llm=None):
super().__init__(config, llm)
self.parent_config = parent_config
def generate_perspective(self, query: str, context: Optional[List[Document]] = None) -> str:
"""Generate perspective in Hannah Fry's style"""
# For MVP, we'll use the base implementation with Fry's system prompt
# In a full implementation, add Fry-specific reasoning patterns
return super().generate_perspective(query, context) |