sujalrajpoot commited on
Commit
a9df902
·
verified ·
1 Parent(s): 0f218bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -25
app.py CHANGED
@@ -1,49 +1,119 @@
1
- from flask import Flask, request, jsonify, Response
2
- from webscout.Provider.Deepinfra import DeepInfra
3
  import os
4
  import json
 
 
 
 
 
5
 
6
- # Initialize Flask app
7
  app = Flask(__name__)
 
 
8
 
9
- # Initialize System Prompt
10
- SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
 
 
 
 
 
 
 
11
 
12
- # Instantiate the DeepInfra model
 
 
13
  BASE_MODEL = DeepInfra(is_conversation=False, update_file=False, system_prompt=SYSTEM_PROMPT)
14
 
15
- @app.route("/")
16
- def index():
17
- return "🚀 TrueSyncAI Customer Support API is running!"
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  @app.route("/chat", methods=["POST"])
20
  def chat():
21
  try:
22
- data = request.get_json()
23
- prompt = data.get("prompt", "")
24
- stream = data.get("stream", False)
25
-
26
  if not prompt:
 
27
  return jsonify({"error": "Missing prompt"}), 400
28
 
29
- # If streaming is enabled, return a streaming response
30
- if stream:
31
- def generate():
 
 
32
  for chunk in BASE_MODEL.chat(prompt=prompt, stream=True):
 
33
  yield f"data: {json.dumps({'response': chunk})}\n\n"
34
 
35
- return Response(generate(), mimetype="text/event-stream")
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- # Non-streaming response
38
- response = BASE_MODEL.chat(prompt=prompt, stream=False)
39
- return jsonify({
40
- "prompt": prompt,
41
- "response": response
42
- })
43
 
 
 
 
 
 
 
 
 
44
  except Exception as e:
45
- return jsonify({"error": str(e)}), 200
46
- # return jsonify({"error": str(e)}), 500
 
 
 
 
 
 
 
47
 
 
48
  if __name__ == "__main__":
49
  app.run(host="0.0.0.0", port=7860)
 
 
 
1
  import os
2
  import json
3
+ import logging
4
+ import requests
5
+ from uuid import uuid4
6
+ from flask import Flask, request, Response, jsonify, send_from_directory
7
+ from webscout.Provider.Deepinfra import DeepInfra
8
 
9
+ # -------------------- Flask & Config Setup --------------------
10
  app = Flask(__name__)
11
+ AUDIO_DIR = "static/audio"
12
+ os.makedirs(AUDIO_DIR, exist_ok=True)
13
 
14
+ # -------------------- Logging Setup --------------------
15
+ logging.basicConfig(
16
+ level=logging.INFO,
17
+ format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
18
+ handlers=[
19
+ logging.StreamHandler()
20
+ ]
21
+ )
22
+ logger = logging.getLogger("TrueSyncAI")
23
 
24
+ # -------------------- AI Model Setup --------------------
25
+ SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
26
+ VOICE = os.getenv("VOICE")
27
  BASE_MODEL = DeepInfra(is_conversation=False, update_file=False, system_prompt=SYSTEM_PROMPT)
28
 
 
 
 
29
 
30
+ # -------------------- TTS Generator --------------------
31
+ def generate_tts(text):
32
+ try:
33
+ headers = {
34
+ 'sec-ch-ua-platform': '"Windows"',
35
+ 'Referer': 'https://www.openai.fm/',
36
+ 'sec-ch-ua': '"Microsoft Edge";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
37
+ 'sec-ch-ua-mobile': '?0',
38
+ 'User-Agent': 'Mozilla/5.0',
39
+ 'DNT': '1',
40
+ 'Range': 'bytes=0-',
41
+ }
42
+ params = {
43
+ 'input': text,
44
+ 'prompt': VOICE,
45
+ 'voice': 'alloy',
46
+ 'generation': str(uuid4())
47
+ }
48
+
49
+ response = requests.get('https://www.openai.fm/api/generate', params=params, headers=headers)
50
+ if response.status_code == 200:
51
+ filename = f"{uuid4().hex}.mp3"
52
+ filepath = os.path.join(AUDIO_DIR, filename)
53
+ with open(filepath, 'wb') as f:
54
+ f.write(response.content)
55
+ logger.info(f"TTS audio generated: {filename}")
56
+ return f"static/audio/{filename}"
57
+ else:
58
+ logger.warning(f"TTS failed with status {response.status_code}: {response.text}")
59
+ except Exception as e:
60
+ logger.exception("TTS generation error")
61
+ return None
62
+
63
+
64
+ # -------------------- Chat Route --------------------
65
  @app.route("/chat", methods=["POST"])
66
  def chat():
67
  try:
68
+ data = request.get_json(force=True)
69
+ prompt = str(data.get("prompt", "")).strip()
 
 
70
  if not prompt:
71
+ logger.warning("Missing prompt in request")
72
  return jsonify({"error": "Missing prompt"}), 400
73
 
74
+ logger.info(f"Received prompt: {prompt[:60]}...")
75
+
76
+ def generate():
77
+ collected_response = ""
78
+ try:
79
  for chunk in BASE_MODEL.chat(prompt=prompt, stream=True):
80
+ collected_response += chunk
81
  yield f"data: {json.dumps({'response': chunk})}\n\n"
82
 
83
+ # After all chunks are sent
84
+ audio_url = generate_tts(collected_response)
85
+ yield f"data: {json.dumps({'done': True, 'full_response': collected_response, 'audio_url': audio_url})}\n\n"
86
+
87
+ except Exception as stream_err:
88
+ logger.exception("Error during streaming chat response")
89
+ yield f"data: {json.dumps({'error': 'Streaming error occurred'})}\n\n"
90
+
91
+ return Response(generate(), mimetype='text/event-stream')
92
+
93
+ except Exception as e:
94
+ logger.exception("Chat endpoint failed")
95
+ return jsonify({"error": str(e)}), 500
96
 
 
 
 
 
 
 
97
 
98
+ # -------------------- Serve Audio --------------------
99
+ @app.route("/static/audio/<filename>")
100
+ def serve_audio(filename):
101
+ try:
102
+ response = send_from_directory(AUDIO_DIR, filename)
103
+ response.headers['Cache-Control'] = 'public, max-age=3600'
104
+ logger.info(f"Serving audio file: {filename}")
105
+ return response
106
  except Exception as e:
107
+ logger.exception(f"Failed to serve audio: {filename}")
108
+ return jsonify({"error": "Audio file not found"}), 404
109
+
110
+
111
+ # -------------------- Health Check --------------------
112
+ @app.route("/")
113
+ def index():
114
+ return "🚀 TrueSyncAI Streaming API is live!"
115
+
116
 
117
+ # -------------------- Run Server --------------------
118
  if __name__ == "__main__":
119
  app.run(host="0.0.0.0", port=7860)