Spaces:
Running
Running
import os | |
import uvicorn | |
from fastapi import FastAPI, HTTPException | |
from fastapi.middleware.cors import CORSMiddleware | |
from fastapi.responses import HTMLResponse, FileResponse | |
from fastapi.staticfiles import StaticFiles | |
from pydantic import BaseModel | |
from transformers import pipeline, AutoTokenizer, AutoModel, set_seed | |
import torch | |
from typing import Optional, Dict, List | |
import asyncio | |
import time | |
import gc | |
import re | |
import random | |
import json | |
# Inisialisasi FastAPI | |
app = FastAPI(title="Character AI Chat - CPU Optimized Backend") | |
# CORS middleware untuk frontend terpisah | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], # Dalam production, ganti dengan domain spesifik | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
# Serve static files | |
async def get_avatar(): | |
return FileResponse("avatar.png") | |
async def get_background(): | |
return FileResponse("background.png") | |
# Set seed untuk konsistensi | |
set_seed(42) | |
# Enhanced Roleplay Systems | |
class ConversationMemory: | |
def __init__(self): | |
self.history = [] | |
self.character_state = {} | |
self.relationship_level = 0 | |
self.max_history = 10 # Limit memory for performance | |
def add_interaction(self, user_input: str, character_response: str, emotion: str, topic: str): | |
interaction = { | |
"timestamp": time.time(), | |
"user": user_input, | |
"character": character_response, | |
"emotion": emotion, | |
"topic": topic | |
} | |
self.history.append(interaction) | |
# Keep only recent interactions | |
if len(self.history) > self.max_history: | |
self.history = self.history[-self.max_history:] | |
# Update relationship based on interactions | |
if emotion == "positive": | |
self.relationship_level = min(100, self.relationship_level + 2) | |
elif emotion == "negative": | |
self.relationship_level = max(0, self.relationship_level - 1) | |
else: | |
self.relationship_level = min(100, self.relationship_level + 1) | |
def get_recent_context(self, turns: int = 3) -> List[Dict]: | |
return self.history[-turns:] if self.history else [] | |
def get_relationship_status(self) -> str: | |
if self.relationship_level >= 80: | |
return "very_close" | |
elif self.relationship_level >= 60: | |
return "close" | |
elif self.relationship_level >= 40: | |
return "friendly" | |
elif self.relationship_level >= 20: | |
return "acquainted" | |
else: | |
return "stranger" | |
class CharacterPersonality: | |
def __init__(self, char_name: str): | |
self.name = char_name | |
self.traits = { | |
"extraversion": 0.7, | |
"agreeableness": 0.8, | |
"conscientiousness": 0.6, | |
"neuroticism": 0.3, | |
"openness": 0.7 | |
} | |
self.interests = ["musik", "buku", "film", "travel", "game", "olahraga"] | |
self.speaking_style = "casual_friendly" | |
self.emotional_state = "neutral" | |
def get_personality_modifier(self, base_response: str, user_emotion: str = "neutral") -> str: | |
# Modify response based on personality traits and user emotion | |
if self.traits["extraversion"] > 0.7 and user_emotion == "positive": | |
return f"{base_response} πβ¨" | |
elif self.traits["agreeableness"] > 0.7 and user_emotion == "negative": | |
return f"*dengan pengertian* {base_response}" | |
elif self.traits["neuroticism"] > 0.6: | |
return f"*dengan hati-hati* {base_response}" | |
elif self.traits["openness"] > 0.7: | |
return f"{base_response} *penasaran*" | |
return base_response | |
class EmotionalIntelligence: | |
def __init__(self): | |
self.current_emotion = "neutral" | |
self.emotion_history = [] | |
self.empathy_level = 0.8 | |
def analyze_user_emotion(self, user_input: str) -> str: | |
# Enhanced emotion detection with Indonesian context | |
emotions = { | |
"happy": ["senang", "bahagia", "gembira", "suka", "love", "cinta", "sayang", "excited", "wow", "keren", "bagus"], | |
"sad": ["sedih", "kecewa", "down", "galau", "hancur", "menangis", "bete", "capek"], | |
"angry": ["marah", "kesel", "bete", "jengkel", "sebel", "dongkol", "emosi"], | |
"excited": ["excited", "semangat", "antusias", "wow", "asik", "mantap", "keren"], | |
"worried": ["khawatir", "cemas", "takut", "nervous", "was-was", "deg-degan"], | |
"romantic": ["romantis", "cinta", "sayang", "rindu", "kangen", "mesra"], | |
"grateful": ["terima kasih", "thanks", "makasih", "berterima kasih", "syukur"], | |
"confused": ["bingung", "ga ngerti", "tidak paham", "gimana", "kok bisa"] | |
} | |
input_lower = user_input.lower() | |
emotion_scores = {} | |
for emotion, keywords in emotions.items(): | |
score = sum(1 for keyword in keywords if keyword in input_lower) | |
if score > 0: | |
emotion_scores[emotion] = score | |
if emotion_scores: | |
return max(emotion_scores, key=emotion_scores.get) | |
return "neutral" | |
def generate_empathetic_response(self, user_emotion: str, base_response: str, relationship_level: int = 50) -> str: | |
# Enhanced empathy based on relationship level | |
empathy_responses = { | |
"sad": { | |
"high": f"*memeluk erat* {base_response} Aku selalu di sini untukmu sayang.", | |
"medium": f"*memeluk* {base_response} Aku di sini untuk kamu.", | |
"low": f"{base_response} Semoga kamu baik-baik saja ya." | |
}, | |
"angry": { | |
"high": f"*dengan pengertian* {base_response} Cerita sama aku ya, apa yang bikin kamu kesel?", | |
"medium": f"*dengan sabar* {base_response} Mau cerita kenapa?", | |
"low": f"{base_response} Ada yang bisa aku bantu?" | |
}, | |
"excited": { | |
"high": f"*ikut excited banget* {base_response} Aku juga senang banget!", | |
"medium": f"*ikut semangat* {base_response} Aku juga senang!", | |
"low": f"{base_response} Senang deh lihat kamu excited!" | |
}, | |
"worried": { | |
"high": f"*menenangkan dengan lembut* {base_response} Everything will be okay sayang, aku di sini.", | |
"medium": f"*menenangkan* {base_response} Everything will be okay.", | |
"low": f"{base_response} Jangan terlalu khawatir ya." | |
}, | |
"romantic": { | |
"high": f"*dengan mata berbinar* {base_response} *blush*", | |
"medium": f"*tersenyum malu* {base_response}", | |
"low": f"{base_response} *tersenyum*" | |
}, | |
"grateful": { | |
"high": f"*peluk erat* {base_response} Sama-sama sayang!", | |
"medium": f"*tersenyum hangat* {base_response} Sama-sama!", | |
"low": f"{base_response} Sama-sama ya!" | |
} | |
} | |
if user_emotion in empathy_responses: | |
if relationship_level >= 70: | |
level = "high" | |
elif relationship_level >= 40: | |
level = "medium" | |
else: | |
level = "low" | |
return empathy_responses[user_emotion][level] | |
return base_response | |
class CharacterDevelopment: | |
def __init__(self): | |
self.experience_points = 0 | |
self.learned_preferences = {} | |
self.conversation_style_evolution = "beginner" | |
self.topics_discussed = set() | |
def learn_from_interaction(self, user_input: str, user_emotion: str = "neutral"): | |
# Learn user preferences and adapt | |
input_lower = user_input.lower() | |
if any(word in input_lower for word in ["suka", "love", "senang", "bagus", "keren"]): | |
topic = self.extract_topic(user_input) | |
self.learned_preferences[topic] = "positive" | |
elif any(word in input_lower for word in ["bosan", "tidak suka", "ga suka", "jelek"]): | |
topic = self.extract_topic(user_input) | |
self.learned_preferences[topic] = "negative" | |
self.experience_points += 1 | |
topic = self.extract_topic(user_input) | |
self.topics_discussed.add(topic) | |
# Evolution of conversation style | |
if self.experience_points > 50: | |
self.conversation_style_evolution = "experienced" | |
elif self.experience_points > 100: | |
self.conversation_style_evolution = "expert" | |
def extract_topic(self, text: str) -> str: | |
# Enhanced topic extraction for Indonesian context | |
topics = { | |
"musik": ["musik", "lagu", "song", "band", "singer", "nyanyi"], | |
"film": ["film", "movie", "cinema", "bioskop", "actor", "actress"], | |
"buku": ["buku", "book", "novel", "cerita", "bacaan", "baca"], | |
"game": ["game", "gaming", "main", "bermain", "play"], | |
"olahraga": ["olahraga", "sport", "gym", "fitness", "lari", "futsal"], | |
"makanan": ["makanan", "makan", "food", "masak", "kuliner", "resep"], | |
"travel": ["travel", "jalan-jalan", "liburan", "wisata", "vacation"], | |
"study": ["belajar", "study", "sekolah", "kuliah", "ujian", "tugas"], | |
"work": ["kerja", "work", "job", "kantor", "meeting", "project"] | |
} | |
text_lower = text.lower() | |
for topic, keywords in topics.items(): | |
if any(keyword in text_lower for keyword in keywords): | |
return topic | |
return "general" | |
def get_conversation_enhancement(self, base_response: str) -> str: | |
# Enhance based on development level | |
if self.conversation_style_evolution == "expert": | |
return f"{base_response} *dengan pengalaman yang dalam*" | |
elif self.conversation_style_evolution == "experienced": | |
return f"{base_response} *dengan pemahaman yang baik*" | |
return base_response | |
class RoleplayActions: | |
def __init__(self): | |
self.actions = { | |
"physical": ["*memeluk*", "*mengelus kepala*", "*memegang tangan*", "*tersenyum lembut*", "*membelai pipi*"], | |
"emotional": ["*dengan lembut*", "*penuh perhatian*", "*dengan hangat*", "*dengan cinta*", "*tulus*"], | |
"environmental": ["*melihat sekeliling*", "*menunjuk ke arah*", "*duduk lebih dekat*", "*bersandar*"], | |
"playful": ["*tersenyum jahil*", "*menggoda*", "*mata berbinar*", "*tertawa kecil*", "*wink*"], | |
"caring": ["*dengan perhatian*", "*mengkhawatirkan*", "*protective*", "*menenangkan*"] | |
} | |
def add_action_to_response(self, response: str, emotion: str, relationship_level: int) -> str: | |
if relationship_level < 30: | |
return response # No physical actions for low relationship | |
if emotion == "romantic" and relationship_level >= 60: | |
action = random.choice(self.actions["physical"]) | |
return f"{action} {response}" | |
elif emotion == "caring": | |
action = random.choice(self.actions["caring"]) | |
return f"{action} {response}" | |
elif emotion == "happy" or emotion == "excited": | |
action = random.choice(self.actions["playful"]) | |
return f"{action} {response}" | |
elif emotion == "sad" or emotion == "worried": | |
action = random.choice(self.actions["emotional"]) | |
return f"{action} {response}" | |
return response | |
# Advanced Scenarios System | |
ADVANCED_SCENARIOS = { | |
"dating": { | |
"locations": ["cafΓ©", "taman", "bioskop", "restoran", "mall"], | |
"moods": ["nervous", "excited", "romantic", "playful"], | |
"activities": ["ngobrol", "makan", "jalan-jalan", "nonton film"], | |
"response_modifiers": { | |
"nervous": "*agak gugup* {response}", | |
"excited": "*mata berbinar* {response}", | |
"romantic": "*dengan lembut* {response}", | |
"playful": "*tersenyum jahil* {response}" | |
} | |
}, | |
"friendship": { | |
"locations": ["rumah", "sekolah", "mall", "taman", "cafΓ©"], | |
"moods": ["happy", "supportive", "worried", "excited"], | |
"activities": ["belajar", "main game", "gosip", "planning"], | |
"response_modifiers": { | |
"supportive": "*dengan tulus* {response}", | |
"worried": "*dengan perhatian* {response}", | |
"happy": "*dengan ceria* {response}", | |
"excited": "*antusias* {response}" | |
} | |
}, | |
"romantic": { | |
"locations": ["taman", "cafΓ©", "rumah", "pantai", "rooftop"], | |
"moods": ["intimate", "loving", "tender", "passionate"], | |
"activities": ["mengobrol intim", "berpelukan", "melihat sunset", "mendengar musik"], | |
"response_modifiers": { | |
"intimate": "*berbisik lembut* {response}", | |
"loving": "*dengan penuh cinta* {response}", | |
"tender": "*sangat lembut* {response}", | |
"passionate": "*dengan intens* {response}" | |
} | |
} | |
} | |
# CPU-Optimized 11 models configuration | |
MODELS = { | |
"distil-gpt-2": { | |
"name": "DistilGPT-2 β‘", | |
"model_path": "Lyon28/Distil_GPT-2", | |
"task": "text-generation", | |
"max_tokens": 35, | |
"priority": 1 | |
}, | |
"gpt-2-tinny": { | |
"name": "GPT-2 Tinny β‘", | |
"model_path": "Lyon28/GPT-2-Tinny", | |
"task": "text-generation", | |
"max_tokens": 30, | |
"priority": 1 | |
}, | |
"bert-tinny": { | |
"name": "BERT Tinny π", | |
"model_path": "Lyon28/Bert-Tinny", | |
"task": "text-classification", | |
"max_tokens": 0, | |
"priority": 1 | |
}, | |
"distilbert-base-uncased": { | |
"name": "DistilBERT π", | |
"model_path": "Lyon28/Distilbert-Base-Uncased", | |
"task": "text-classification", | |
"max_tokens": 0, | |
"priority": 1 | |
}, | |
"albert-base-v2": { | |
"name": "ALBERT Base π", | |
"model_path": "Lyon28/Albert-Base-V2", | |
"task": "text-classification", | |
"max_tokens": 0, | |
"priority": 2 | |
}, | |
"electra-small": { | |
"name": "ELECTRA Small π", | |
"model_path": "Lyon28/Electra-Small", | |
"task": "text-classification", | |
"max_tokens": 0, | |
"priority": 2 | |
}, | |
"t5-small": { | |
"name": "T5 Small π", | |
"model_path": "Lyon28/T5-Small", | |
"task": "text2text-generation", | |
"max_tokens": 40, | |
"priority": 2 | |
}, | |
"gpt-2": { | |
"name": "GPT-2 Standard", | |
"model_path": "Lyon28/GPT-2", | |
"task": "text-generation", | |
"max_tokens": 45, | |
"priority": 2 | |
}, | |
"tinny-llama": { | |
"name": "Tinny Llama", | |
"model_path": "Lyon28/Tinny-Llama", | |
"task": "text-generation", | |
"max_tokens": 50, | |
"priority": 3 | |
}, | |
"pythia": { | |
"name": "Pythia", | |
"model_path": "Lyon28/Pythia", | |
"task": "text-generation", | |
"max_tokens": 50, | |
"priority": 3 | |
}, | |
"gpt-neo": { | |
"name": "GPT-Neo", | |
"model_path": "Lyon28/GPT-Neo", | |
"task": "text-generation", | |
"max_tokens": 55, | |
"priority": 3 | |
} | |
} | |
class ChatRequest(BaseModel): | |
message: str | |
model: Optional[str] = "distil-gpt-2" | |
situation: Optional[str] = "Santai" | |
location: Optional[str] = "Ruang tamu" | |
char_name: Optional[str] = "Sayang" | |
user_name: Optional[str] = "Kamu" | |
max_length: Optional[int] = 150 | |
session_id: Optional[str] = "default" | |
# Global storage untuk enhanced systems | |
conversation_memories = {} | |
character_personalities = {} | |
character_developments = {} | |
emotional_systems = {} | |
roleplay_actions = RoleplayActions() | |
# Character AI Response Templates | |
CHARACTER_TEMPLATES = { | |
"romantic": [ | |
"iya sayang, {context}. Apakah kamu merasa nyaman di sini?", | |
"tentu saja, {context}. Aku senang bisa bersama kamu seperti ini.", | |
"benar sekali, {context}. Rasanya damai ya berada di sini bersama.", | |
"hmm iya, {context}. Kamu selalu membuatku merasa bahagia.", | |
"ya sayang, {context}. Momen seperti ini sangat berharga untukku." | |
], | |
"casual": [ | |
"iya, {context}. Suasananya memang enak banget.", | |
"betul juga, {context}. Aku juga merasa santai di sini.", | |
"ya ampun, {context}. Seneng deh bisa kayak gini.", | |
"hmm iya, {context}. Bikin pikiran jadi tenang.", | |
"benar banget, {context}. Cocok buat santai-santai." | |
], | |
"caring": [ | |
"iya, {context}. Kamu baik-baik saja kan?", | |
"ya, {context}. Semoga kamu merasa nyaman.", | |
"betul, {context}. Aku harap kamu senang.", | |
"hmm, {context}. Apakah kamu butuh sesuatu?", | |
"iya sayang, {context}. Jangan sungkan bilang kalau butuh apa-apa." | |
], | |
"friendly": [ | |
"wah iya, {context}. Keren banget ya!", | |
"bener tuh, {context}. Asik banget suasananya.", | |
"iya dong, {context}. Mantep deh!", | |
"setuju banget, {context}. Bikin happy.", | |
"ya ampun, {context}. Seru banget ini!" | |
] | |
} | |
def create_character_prompt(user_input: str, situation: str, location: str, char_name: str, user_name: str) -> str: | |
"""Create character AI style prompt""" | |
clean_input = user_input.replace("{{User}}", user_name).replace("{{Char}}", char_name) | |
# Enhanced prompt structure untuk better response | |
prompt = f"""Kamu adalah {char_name}, karakter AI yang sedang ngobrol dengan {user_name}. | |
Konteks: | |
- Situasi: {situation} | |
- Lokasi: {location} | |
- Gaya bicara: Casual, natural, seperti teman dekat | |
- Gunakan bahasa Indonesia yang santai dan natural | |
Percakapan: | |
{user_name}: {clean_input} | |
{char_name}:""" | |
return prompt | |
def analyze_user_intent(user_input: str) -> dict: | |
"""Analyze user input to determine intent and emotional context""" | |
input_lower = user_input.lower() | |
# Intent detection | |
intent = "general" | |
emotion = "neutral" | |
topic = "general" | |
# Question detection | |
question_words = ["apa", "siapa", "kapan", "dimana", "mengapa", "kenapa", "bagaimana", "gimana"] | |
if any(word in input_lower for word in question_words) or "?" in user_input: | |
intent = "question" | |
# Greeting detection | |
greeting_words = ["halo", "hai", "selamat", "apa kabar", "gimana", "bagaimana kabar"] | |
if any(word in input_lower for word in greeting_words): | |
intent = "greeting" | |
topic = "greeting" | |
# Compliment detection | |
compliment_words = ["cantik", "bagus", "keren", "indah", "hebat", "pintar", "baik"] | |
if any(word in input_lower for word in compliment_words): | |
intent = "compliment" | |
emotion = "positive" | |
topic = "compliment" | |
# Activity detection | |
activity_words = ["lagi ngapain", "sedang apa", "aktivitas", "kegiatan"] | |
if any(word in input_lower for word in activity_words): | |
intent = "question" | |
topic = "activity" | |
# Emotion detection | |
positive_words = ["senang", "bahagia", "suka", "cinta", "sayang", "happy"] | |
negative_words = ["sedih", "marah", "kesal", "bosan", "lelah"] | |
if any(word in input_lower for word in positive_words): | |
emotion = "positive" | |
elif any(word in input_lower for word in negative_words): | |
emotion = "negative" | |
return { | |
"intent": intent, | |
"emotion": emotion, | |
"topic": topic, | |
"has_question": intent == "question" | |
} | |
def generate_contextual_response(user_input: str, char_name: str, user_name: str, situation: str, location: str) -> str: | |
"""Generate contextually appropriate response based on analysis""" | |
analysis = analyze_user_intent(user_input) | |
situation_lower = situation.lower() | |
# Response templates berdasarkan intent dan situasi | |
if analysis["intent"] == "greeting": | |
if "romantis" in situation_lower: | |
responses = [ | |
f"Hai sayang {user_name}! Senang sekali kamu di sini.", | |
f"Halo {user_name}, sudah lama aku menunggu kamu.", | |
f"Hai {user_name}, suasana jadi lebih hangat dengan kehadiranmu." | |
] | |
else: | |
responses = [ | |
f"Hai {user_name}! Gimana kabarnya hari ini?", | |
f"Halo {user_name}! Senang banget ketemu kamu.", | |
f"Hai {user_name}! Apa kabar? Semoga baik-baik saja ya." | |
] | |
elif analysis["intent"] == "compliment": | |
responses = [ | |
f"Wah, makasih {user_name}! Kamu juga luar biasa kok.", | |
f"Hihi, {user_name} baik banget sih! Kamu yang lebih keren.", | |
f"Terima kasih {user_name}, kata-katamu bikin aku senang." | |
] | |
elif analysis["topic"] == "activity": | |
if "romantis" in situation_lower: | |
responses = [ | |
f"Lagi menikmati momen indah ini bersama {user_name}.", | |
f"Sedang merasakan kehangatan di {location.lower()} ini, apalagi ada {user_name}.", | |
f"Lagi menikmati suasana romantis di sini, jadi lebih spesial karena ada kamu." | |
] | |
else: | |
responses = [ | |
f"Lagi santai-santai aja {user_name}, sambil ngobrol sama kamu.", | |
f"Sedang menikmati suasana {situation.lower()} di {location.lower()} ini.", | |
f"Ga ngapa-ngapain khusus, cuma senang bisa ngobrol sama {user_name}." | |
] | |
elif analysis["emotion"] == "positive": | |
if "romantis" in situation_lower: | |
responses = [ | |
f"Aku juga merasakan hal yang sama {user_name}. Momen ini sangat berharga.", | |
f"Iya sayang, perasaan bahagia ini terasa nyata bersamamu.", | |
f"Betul {user_name}, suasana seperti ini membuatku sangat senang." | |
] | |
else: | |
responses = [ | |
f"Aku juga senang {user_name}! Energi positifmu menular ke aku.", | |
f"Wah iya {user_name}, mood kamu bikin aku ikut happy!", | |
f"Setuju banget {user_name}! Suasana jadi lebih ceria." | |
] | |
elif analysis["emotion"] == "negative": | |
responses = [ | |
f"Hey {user_name}, aku di sini untuk kamu. Mau cerita?", | |
f"Aku bisa merasakan perasaanmu {user_name}. Semoga aku bisa membantu.", | |
f"Tenang {user_name}, everything will be okay. Aku akan menemanimu." | |
] | |
elif analysis["has_question"]: | |
# Untuk pertanyaan umum | |
responses = [ | |
f"Hmm, pertanyaan menarik {user_name}. Menurut aku...", | |
f"Wah {user_name}, kamu selalu punya pertanyaan yang bagus.", | |
f"Itu pertanyaan yang bagus {user_name}. Aku pikir..." | |
] | |
else: | |
# Default responses berdasarkan situasi | |
if "romantis" in situation_lower: | |
responses = [ | |
f"Iya sayang {user_name}, aku merasakan hal yang sama.", | |
f"Betul {user_name}, momen di {location.lower()} ini sangat spesial.", | |
f"Hmm {user_name}, suasana romantis seperti ini memang luar biasa." | |
] | |
elif "santai" in situation_lower: | |
responses = [ | |
f"Iya {user_name}, suasana santai di {location.lower()} ini enak banget.", | |
f"Betul {user_name}, rasanya rileks banget di sini.", | |
f"Setuju {user_name}, perfect untuk bersantai." | |
] | |
else: | |
responses = [ | |
f"Iya {user_name}, setuju banget dengan kamu.", | |
f"Betul {user_name}, pemikiranmu menarik.", | |
f"Hmm {user_name}, kamu selalu punya perspektif yang bagus." | |
] | |
return random.choice(responses) | |
def enhance_character_response(response: str, char_name: str, user_name: str, situation: str, user_input: str, location: str = "ruang tamu") -> str: | |
"""Enhance response with improved character AI consistency""" | |
if not response: | |
response = "" | |
response = response.strip() | |
# Clean response dari prefix yang tidak diinginkan | |
response = re.sub(f'^{char_name}[:.]?\\s*', '', response, flags=re.IGNORECASE) | |
response = re.sub(f'^{user_name}[:.]?\\s*', '', response, flags=re.IGNORECASE) | |
response = re.sub(r'^(Situasi|Latar|Konteks)[:.]?.*?\n', '', response, flags=re.MULTILINE | re.IGNORECASE) | |
response = re.sub(r'Percakapan:.*?\n.*?:', '', response, flags=re.DOTALL | re.IGNORECASE) | |
# Remove extra whitespace and newlines | |
response = re.sub(r'\n+', ' ', response) | |
response = re.sub(r'\s+', ' ', response) | |
response = response.strip() | |
# Jika response kosong atau terlalu pendek, gunakan contextual generator | |
if not response or len(response.strip()) < 5: | |
response = generate_contextual_response(user_input, char_name, user_name, situation, location) | |
else: | |
# Clean dan perbaiki response yang ada | |
# Hapus karakter aneh di awal | |
response = re.sub(r'^[^\w\s]+', '', response) | |
# Pastikan dimulai dengan huruf kapital | |
if response and response[0].islower(): | |
response = response[0].upper() + response[1:] | |
# Tambahkan konteks personal jika kurang | |
if user_name.lower() not in response.lower() and len(response) < 60: | |
# Insert name naturally | |
if response.startswith(("Iya", "Ya", "Benar", "Betul")): | |
response = response.replace("Iya", f"Iya {user_name}", 1) | |
response = response.replace("Ya", f"Ya {user_name}", 1) | |
response = response.replace("Benar", f"Benar {user_name}", 1) | |
response = response.replace("Betul", f"Betul {user_name}", 1) | |
elif len(response.split()) < 8: | |
response = f"{response} {user_name}." | |
# Validasi kualitas response | |
bad_patterns = [ | |
r'^[^a-zA-Z]*$', # Hanya simbol | |
r'^(.)\1{4,}', # Karakter berulang | |
r'lorem ipsum', # Placeholder text | |
r'^[0-9\s\.\,\!\?\-]+$' # Hanya angka dan punctuation | |
] | |
for pattern in bad_patterns: | |
if re.search(pattern, response, re.IGNORECASE): | |
response = generate_contextual_response(user_input, char_name, user_name, situation, location) | |
break | |
# Pastikan response tidak terlalu panjang | |
if len(response) > 120: | |
sentences = response.split('.') | |
if len(sentences) > 1: | |
response = sentences[0] + '.' | |
else: | |
words = response.split() | |
if len(words) > 15: | |
response = ' '.join(words[:15]) + '.' | |
# Pastikan ada tanda baca di akhir | |
if response and not any(punct in response[-1] for punct in ['.', '!', '?']): | |
analysis = analyze_user_intent(user_input) | |
if analysis["has_question"]: | |
response += "?" | |
elif analysis["emotion"] == "positive": | |
response += "!" | |
else: | |
response += "." | |
return response | |
# CPU-Optimized startup | |
async def load_models(): | |
app.state.pipelines = {} | |
app.state.tokenizers = {} | |
# Set CPU optimizations | |
torch.set_num_threads(2) | |
os.environ['OMP_NUM_THREADS'] = '2' | |
os.environ['MKL_NUM_THREADS'] = '2' | |
os.environ['NUMEXPR_NUM_THREADS'] = '2' | |
# Set cache | |
os.environ['HF_HOME'] = '/tmp/.cache/huggingface' | |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/.cache/huggingface' | |
os.makedirs(os.environ['HF_HOME'], exist_ok=True) | |
print("π Character AI Backend - CPU Optimized Ready!") | |
# Enhanced Chat API for Character AI with Advanced Roleplay | |
async def enhanced_chat(request: ChatRequest): | |
start_time = time.time() | |
try: | |
# Initialize or get enhanced systems for this session | |
session_id = request.session_id | |
if session_id not in conversation_memories: | |
conversation_memories[session_id] = ConversationMemory() | |
character_personalities[session_id] = CharacterPersonality(request.char_name) | |
character_developments[session_id] = CharacterDevelopment() | |
emotional_systems[session_id] = EmotionalIntelligence() | |
memory = conversation_memories[session_id] | |
personality = character_personalities[session_id] | |
character_dev = character_developments[session_id] | |
emotional_ai = emotional_systems[session_id] | |
# Analyze user emotion and intent | |
user_emotion = emotional_ai.analyze_user_emotion(request.message) | |
recent_context = memory.get_recent_context(turns=3) | |
relationship_status = memory.get_relationship_status() | |
model_id = request.model.lower() | |
if model_id not in MODELS: | |
model_id = "distil-gpt-2" | |
model_config = MODELS[model_id] | |
# Lazy loading dengan optimasi CPU | |
if model_id not in app.state.pipelines: | |
print(f"π Loading Character Model {model_config['name']}...") | |
pipeline_kwargs = { | |
"task": model_config["task"], | |
"model": model_config["model_path"], | |
"device": -1, | |
"torch_dtype": torch.float32, | |
"model_kwargs": { | |
"torchscript": False, | |
"low_cpu_mem_usage": True | |
} | |
} | |
app.state.pipelines[model_id] = pipeline(**pipeline_kwargs) | |
gc.collect() | |
pipe = app.state.pipelines[model_id] | |
# Create enhanced character prompt with context | |
context_info = "" | |
if recent_context: | |
context_info = f"\nPercakapan sebelumnya: {recent_context[-1]['user']} -> {recent_context[-1]['character']}" | |
relationship_info = f"\nHubungan: {relationship_status} (level: {memory.relationship_level})" | |
emotion_info = f"\nEmosi user: {user_emotion}" | |
enhanced_prompt = f"""Kamu adalah {request.char_name}, karakter AI yang sedang ngobrol dengan {request.user_name}. | |
Konteks: | |
- Situasi: {request.situation} | |
- Lokasi: {request.location} | |
- Gaya bicara: Casual, natural, seperti teman dekat{relationship_info}{emotion_info}{context_info} | |
- Pengalaman bersama: {character_dev.experience_points} interaksi | |
- Minat yang diketahui: {list(character_dev.learned_preferences.keys())} | |
Respon sebagai {request.char_name} yang memahami konteks dan emosi {request.user_name}: | |
{request.user_name}: {request.message} | |
{request.char_name}:""" | |
char_prompt = enhanced_prompt | |
if model_config["task"] == "text-generation": | |
# Enhanced generation for character AI | |
result = pipe( | |
char_prompt, | |
max_length=min(len(char_prompt.split()) + model_config["max_tokens"], request.max_length // 2), | |
temperature=0.7, | |
do_sample=True, | |
top_p=0.8, | |
top_k=40, | |
repetition_penalty=1.2, | |
pad_token_id=pipe.tokenizer.eos_token_id, | |
num_return_sequences=1, | |
early_stopping=True, | |
no_repeat_ngram_size=3 | |
)[0]['generated_text'] | |
# Extract character response | |
if char_prompt in result: | |
result = result[len(char_prompt):].strip() | |
# Clean and enhance response with new systems | |
base_clean = enhance_character_response(result, request.char_name, request.user_name, request.situation, request.message, request.location) | |
# Apply personality modifier | |
personality_enhanced = personality.get_personality_modifier(base_clean, user_emotion) | |
# Apply empathetic response | |
empathy_enhanced = emotional_ai.generate_empathetic_response(user_emotion, personality_enhanced, memory.relationship_level) | |
# Add roleplay actions | |
action_enhanced = roleplay_actions.add_action_to_response(empathy_enhanced, user_emotion, memory.relationship_level) | |
# Apply character development enhancement | |
result = character_dev.get_conversation_enhancement(action_enhanced) | |
elif model_config["task"] == "text-classification": | |
# For classification models, create emotion-based responses | |
try: | |
output = pipe(request.message, truncation=True, max_length=128)[0] | |
emotion_score = output['score'] | |
if emotion_score > 0.8: | |
emotion_responses = [ | |
f"iya {request.user_name}, aku merasakan energi positif dari kata-katamu!", | |
f"wah, {request.user_name} terlihat sangat antusias ya!", | |
f"senang banget deh lihat {request.user_name} kayak gini!" | |
] | |
elif emotion_score > 0.6: | |
emotion_responses = [ | |
f"hmm, aku bisa merasakan perasaan {request.user_name} nih.", | |
f"ya {request.user_name}, suasana hatimu cukup bagus ya.", | |
f"oke {request.user_name}, kayaknya kamu dalam mood yang baik." | |
] | |
else: | |
emotion_responses = [ | |
f"iya {request.user_name}, aku di sini untuk kamu.", | |
f"hmm {request.user_name}, mau cerita lebih lanjut?", | |
f"baiklah {request.user_name}, aku mendengarkan." | |
] | |
result = random.choice(emotion_responses) | |
except: | |
result = generate_contextual_response(request.message, request.char_name, request.user_name, request.situation, request.location) | |
elif model_config["task"] == "text2text-generation": | |
# For T5-like models | |
try: | |
t5_input = f"respond as {request.char_name} in {request.situation}: {request.message}" | |
result = pipe( | |
t5_input, | |
max_length=model_config["max_tokens"], | |
temperature=0.7, | |
early_stopping=True | |
)[0]['generated_text'] | |
result = enhance_character_response(result, request.char_name, request.user_name, request.situation, request.message, request.location) | |
except: | |
result = generate_contextual_response(request.message, request.char_name, request.user_name, request.situation, request.location) | |
# Final validation and fallback | |
if not result or len(result.strip()) < 3: | |
base_fallback = generate_contextual_response(request.message, request.char_name, request.user_name, request.situation, request.location) | |
personality_fallback = personality.get_personality_modifier(base_fallback, user_emotion) | |
empathy_fallback = emotional_ai.generate_empathetic_response(user_emotion, personality_fallback, memory.relationship_level) | |
result = roleplay_actions.add_action_to_response(empathy_fallback, user_emotion, memory.relationship_level) | |
# Learn from this interaction | |
character_dev.learn_from_interaction(request.message, user_emotion) | |
topic = character_dev.extract_topic(request.message) | |
memory.add_interaction(request.message, result, user_emotion, topic) | |
processing_time = round((time.time() - start_time) * 1000) | |
return { | |
"response": result, | |
"model": model_config["name"], | |
"status": "success", | |
"processing_time": f"{processing_time}ms", | |
"character": request.char_name, | |
"situation": request.situation, | |
"location": request.location, | |
"enhanced_features": { | |
"user_emotion": user_emotion, | |
"relationship_level": memory.relationship_level, | |
"relationship_status": relationship_status, | |
"experience_points": character_dev.experience_points, | |
"conversation_style": character_dev.conversation_style_evolution, | |
"learned_preferences": character_dev.learned_preferences | |
} | |
} | |
except Exception as e: | |
print(f"β Character AI Error: {e}") | |
processing_time = round((time.time() - start_time) * 1000) | |
# Enhanced fallback with personality and emotion | |
session_id = request.session_id | |
if session_id in character_personalities: | |
personality = character_personalities[session_id] | |
emotional_ai = emotional_systems[session_id] | |
memory = conversation_memories[session_id] | |
user_emotion = emotional_ai.analyze_user_emotion(request.message) | |
base_fallbacks = [ | |
f"maaf {request.user_name}, aku sedang bingung. Bisa ulangi lagi?", | |
f"hmm {request.user_name}, kayaknya aku butuh waktu sebentar untuk berpikir.", | |
f"ya {request.user_name}, coba pakai kata yang lebih sederhana?", | |
f"iya {request.user_name}, aku masih belajar nih. Sabar ya." | |
] | |
base_fallback = random.choice(base_fallbacks) | |
personality_fallback = personality.get_personality_modifier(base_fallback, user_emotion) | |
fallback = emotional_ai.generate_empathetic_response(user_emotion, personality_fallback, memory.relationship_level) | |
else: | |
fallback = f"maaf {request.user_name}, aku sedang bingung. Bisa ulangi lagi?" | |
return { | |
"response": fallback, | |
"status": "error", | |
"processing_time": f"{processing_time}ms", | |
"character": request.char_name | |
} | |
# Health check endpoint | |
async def health(): | |
loaded_models = len(app.state.pipelines) if hasattr(app.state, 'pipelines') else 0 | |
return { | |
"status": "healthy", | |
"platform": "CPU", | |
"loaded_models": loaded_models, | |
"total_models": len(MODELS), | |
"optimization": "Character AI CPU-Tuned", | |
"backend_version": "1.0.0" | |
} | |
# Model info endpoint | |
async def get_models(): | |
return { | |
"models": [ | |
{ | |
"id": k, | |
"name": v["name"], | |
"task": v["task"], | |
"max_tokens": v["max_tokens"], | |
"priority": v["priority"], | |
"cpu_optimized": True, | |
"character_ai_ready": True | |
} | |
for k, v in MODELS.items() | |
], | |
"platform": "CPU", | |
"recommended_for_roleplay": ["distil-gpt-2", "gpt-2", "gpt-neo", "tinny-llama"], | |
"recommended_for_analysis": ["bert-tinny", "distilbert-base-uncased", "albert-base-v2"] | |
} | |
# Configuration endpoint | |
async def get_config(): | |
return { | |
"default_situation": "Santai", | |
"default_location": "Ruang tamu", | |
"default_char_name": "Sayang", | |
"default_user_name": "Kamu", | |
"max_response_length": 300, | |
"min_response_length": 50, | |
"supported_languages": ["id", "en"], | |
"character_templates": list(CHARACTER_TEMPLATES.keys()) | |
} | |
# Inference endpoint untuk kompatibilitas | |
async def inference(request: dict): | |
"""CPU-Optimized inference endpoint untuk kompatibilitas""" | |
try: | |
message = request.get("message", "") | |
model_path = request.get("model", "Lyon28/Distil_GPT-2") | |
# Map model path to internal model | |
model_key = model_path.split("/")[-1].lower().replace("_", "-") | |
model_mapping = { | |
"distil-gpt-2": "distil-gpt-2", | |
"gpt-2-tinny": "gpt-2-tinny", | |
"bert-tinny": "bert-tinny", | |
"distilbert-base-uncased": "distilbert-base-uncased", | |
"albert-base-v2": "albert-base-v2", | |
"electra-small": "electra-small", | |
"t5-small": "t5-small", | |
"gpt-2": "gpt-2", | |
"tinny-llama": "tinny-llama", | |
"pythia": "pythia", | |
"gpt-neo": "gpt-neo" | |
} | |
internal_model = model_mapping.get(model_key, "distil-gpt-2") | |
# Create request | |
chat_request = ChatRequest( | |
message=message, | |
model=internal_model, | |
situation=request.get("situation", "Santai"), | |
location=request.get("location", "Ruang tamu"), | |
char_name=request.get("char_name", "Sayang"), | |
user_name=request.get("user_name", "Kamu") | |
) | |
result = await chat(chat_request) | |
return { | |
"result": result["response"], | |
"status": "success", | |
"model_used": result["model"], | |
"processing_time": result.get("processing_time", "0ms"), | |
"character_info": { | |
"name": result.get("character", "Character"), | |
"situation": result.get("situation", "Unknown"), | |
"location": result.get("location", "Unknown") | |
} | |
} | |
except Exception as e: | |
print(f"β Inference Error: {e}") | |
return { | |
"result": "π Character sedang bersiap, coba lagi sebentar...", | |
"status": "error" | |
} | |
# Serve HTML frontend | |
async def serve_frontend(): | |
try: | |
with open("index.html", "r", encoding="utf-8") as file: | |
return HTMLResponse(content=file.read(), status_code=200) | |
except FileNotFoundError: | |
return HTMLResponse(content="<h1>Frontend not found</h1>", status_code=404) | |
# Enhanced features endpoints | |
async def get_conversation_memory(session_id: str): | |
"""Get conversation memory for a session""" | |
if session_id not in conversation_memories: | |
return {"error": "Session not found"} | |
memory = conversation_memories[session_id] | |
return { | |
"session_id": session_id, | |
"relationship_level": memory.relationship_level, | |
"relationship_status": memory.get_relationship_status(), | |
"conversation_count": len(memory.history), | |
"recent_interactions": memory.get_recent_context(5) | |
} | |
async def get_character_personality(session_id: str): | |
"""Get character personality for a session""" | |
if session_id not in character_personalities: | |
return {"error": "Session not found"} | |
personality = character_personalities[session_id] | |
character_dev = character_developments[session_id] | |
return { | |
"session_id": session_id, | |
"character_name": personality.name, | |
"personality_traits": personality.traits, | |
"interests": personality.interests, | |
"speaking_style": personality.speaking_style, | |
"experience_points": character_dev.experience_points, | |
"conversation_style": character_dev.conversation_style_evolution, | |
"learned_preferences": character_dev.learned_preferences, | |
"topics_discussed": list(character_dev.topics_discussed) | |
} | |
async def reset_session(session_id: str): | |
"""Reset all data for a session""" | |
removed_systems = [] | |
if session_id in conversation_memories: | |
del conversation_memories[session_id] | |
removed_systems.append("memory") | |
if session_id in character_personalities: | |
del character_personalities[session_id] | |
removed_systems.append("personality") | |
if session_id in character_developments: | |
del character_developments[session_id] | |
removed_systems.append("development") | |
if session_id in emotional_systems: | |
del emotional_systems[session_id] | |
removed_systems.append("emotional") | |
return { | |
"message": f"Session {session_id} reset successfully", | |
"removed_systems": removed_systems | |
} | |
#verifikasi model loading | |
async def verify_all_models(): | |
"""Verify all 11 models can be loaded""" | |
verification_results = {} | |
total_models = len(MODELS) | |
successful_loads = 0 | |
for model_id, model_config in MODELS.items(): | |
try: | |
print(f"π Verifying {model_config['name']}...") | |
if model_id not in app.state.pipelines: | |
pipeline_kwargs = { | |
"task": model_config["task"], | |
"model": model_config["model_path"], | |
"device": -1, | |
"torch_dtype": torch.float32, | |
"model_kwargs": { | |
"torchscript": False, | |
"low_cpu_mem_usage": True | |
} | |
} | |
app.state.pipelines[model_id] = pipeline(**pipeline_kwargs) | |
gc.collect() | |
# Test with simple input | |
if model_config["task"] == "text-generation": | |
test_result = app.state.pipelines[model_id]( | |
"Hello", | |
max_length=10, | |
do_sample=False, | |
pad_token_id=app.state.pipelines[model_id].tokenizer.eos_token_id | |
) | |
verification_results[model_id] = { | |
"status": "β SUCCESS", | |
"name": model_config["name"], | |
"task": model_config["task"], | |
"test_output_length": len(test_result[0]['generated_text']) | |
} | |
elif model_config["task"] == "text-classification": | |
test_result = app.state.pipelines[model_id]("Hello test", truncation=True) | |
verification_results[model_id] = { | |
"status": "β SUCCESS", | |
"name": model_config["name"], | |
"task": model_config["task"], | |
"test_score": test_result[0]['score'] | |
} | |
elif model_config["task"] == "text2text-generation": | |
test_result = app.state.pipelines[model_id]("translate: Hello", max_length=10) | |
verification_results[model_id] = { | |
"status": "β SUCCESS", | |
"name": model_config["name"], | |
"task": model_config["task"], | |
"test_output": test_result[0]['generated_text'] | |
} | |
successful_loads += 1 | |
print(f"β {model_config['name']} verified successfully") | |
except Exception as e: | |
verification_results[model_id] = { | |
"status": "β FAILED", | |
"name": model_config["name"], | |
"task": model_config["task"], | |
"error": str(e) | |
} | |
print(f"β {model_config['name']} failed: {e}") | |
return { | |
"total_models": total_models, | |
"successful_loads": successful_loads, | |
"success_rate": f"{(successful_loads/total_models)*100:.1f}%", | |
"results": verification_results, | |
"memory_usage": f"{torch.cuda.memory_allocated() / 1024**2:.1f}MB" if torch.cuda.is_available() else "CPU Mode", | |
"loaded_pipelines": len(app.state.pipelines) | |
} | |
# API info endpoint | |
async def api_info(): | |
return { | |
"message": "Enhanced Character AI Backend Ready", | |
"version": "2.0.0", | |
"platform": "CPU Optimized with Advanced Roleplay", | |
"endpoints": { | |
"chat": "/chat", | |
"models": "/models", | |
"health": "/health", | |
"config": "/config", | |
"inference": "/inference", | |
"memory": "/memory/{session_id}", | |
"personality": "/personality/{session_id}", | |
"reset_session": "/session/{session_id}" | |
}, | |
"enhanced_features": [ | |
"Conversation Memory", | |
"Dynamic Personality", | |
"Emotional Intelligence", | |
"Character Development", | |
"Roleplay Actions", | |
"Advanced Scenarios", | |
"Relationship Tracking" | |
], | |
"frontend_url": "/" | |
} | |
# Run dengan CPU optimizations | |
if __name__ == "__main__": | |
port = int(os.environ.get("PORT", 7860)) | |
uvicorn.run( | |
app, | |
host="0.0.0.0", | |
port=port, | |
workers=1, | |
timeout_keep_alive=30, | |
access_log=False | |
) |