|
import os |
|
import json |
|
import logging |
|
import requests |
|
from uuid import uuid4 |
|
from flask import Flask, request, Response, jsonify, send_from_directory |
|
from webscout.Provider.Deepinfra import DeepInfra |
|
|
|
|
|
app = Flask(__name__) |
|
AUDIO_DIR = "static/audio" |
|
os.makedirs(AUDIO_DIR, exist_ok=True) |
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", |
|
handlers=[ |
|
logging.StreamHandler() |
|
] |
|
) |
|
logger = logging.getLogger("TrueSyncAI") |
|
|
|
|
|
SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT") |
|
VOICE = os.getenv("VOICE") |
|
BASE_MODEL = DeepInfra(is_conversation=False, update_file=False, system_prompt=SYSTEM_PROMPT) |
|
|
|
|
|
|
|
def generate_tts(text): |
|
try: |
|
headers = { |
|
'sec-ch-ua-platform': '"Windows"', |
|
'Referer': 'https://www.openai.fm/', |
|
'sec-ch-ua': '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"', |
|
'sec-ch-ua-mobile': '?0', |
|
'User-Agent': 'Mozilla/5.0', |
|
'DNT': '1', |
|
'Range': 'bytes=0-', |
|
} |
|
params = { |
|
'input': text, |
|
'prompt': VOICE, |
|
'voice': 'alloy', |
|
'generation': str(uuid4()) |
|
} |
|
|
|
response = requests.get('https://www.openai.fm/api/generate', params=params, headers=headers) |
|
if response.status_code == 200: |
|
filename = f"{uuid4().hex}.mp3" |
|
filepath = os.path.join(AUDIO_DIR, filename) |
|
with open(filepath, 'wb') as f: |
|
f.write(response.content) |
|
logger.info(f"TTS audio generated: {filename}") |
|
return f"static/audio/{filename}" |
|
else: |
|
logger.warning(f"TTS failed with status {response.status_code}: {response.text}") |
|
except Exception as e: |
|
logger.exception("TTS generation error") |
|
return None |
|
|
|
|
|
|
|
@app.route("/chat", methods=["POST"]) |
|
def chat(): |
|
try: |
|
data = request.get_json(force=True) |
|
prompt = str(data.get("prompt", "")).strip() |
|
if not prompt: |
|
logger.warning("Missing prompt in request") |
|
return jsonify({"error": "Missing prompt"}), 400 |
|
|
|
logger.info(f"Received prompt: {prompt[:60]}...") |
|
|
|
def generate(): |
|
collected_response = "" |
|
try: |
|
for chunk in BASE_MODEL.chat(prompt=prompt, stream=True): |
|
collected_response += chunk |
|
yield f"data: {json.dumps({'response': chunk})}\n\n" |
|
|
|
|
|
audio_url = generate_tts(collected_response) |
|
yield f"data: {json.dumps({'done': True, 'full_response': collected_response, 'audio_url': audio_url})}\n\n" |
|
|
|
except Exception as stream_err: |
|
logger.exception("Error during streaming chat response") |
|
yield f"data: {json.dumps({'error': 'Streaming error occurred'})}\n\n" |
|
|
|
return Response(generate(), mimetype='text/event-stream') |
|
|
|
except Exception as e: |
|
logger.exception("Chat endpoint failed") |
|
return jsonify({"error": str(e)}), 500 |
|
|
|
|
|
|
|
@app.route("/static/audio/<filename>") |
|
def serve_audio(filename): |
|
try: |
|
response = send_from_directory(AUDIO_DIR, filename) |
|
response.headers['Cache-Control'] = 'public, max-age=3600' |
|
logger.info(f"Serving audio file: {filename}") |
|
return response |
|
except Exception as e: |
|
logger.exception(f"Failed to serve audio: {filename}") |
|
return jsonify({"error": "Audio file not found"}), 404 |
|
|
|
|
|
|
|
@app.route("/") |
|
def index(): |
|
return "π TrueSyncAI Streaming API is live!" |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
app.run(host="0.0.0.0", port=7860) |