Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -14,6 +14,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
|
14 |
import PyPDF2
|
15 |
import traceback
|
16 |
import os
|
|
|
|
|
17 |
|
18 |
import shutil
|
19 |
from pathlib import Path
|
@@ -50,6 +52,7 @@ model = None
|
|
50 |
tokenizer = None
|
51 |
generation_config = None
|
52 |
|
|
|
53 |
def test_llm_generation():
|
54 |
try:
|
55 |
test_prompt = "Hello, how are you today?"
|
@@ -67,7 +70,7 @@ def test_llm_generation():
|
|
67 |
except Exception as e:
|
68 |
add_log(f"❌ LLM quick test failed: {e}")
|
69 |
|
70 |
-
|
71 |
def initialize_model():
|
72 |
global model, tokenizer, generation_config
|
73 |
|
@@ -167,7 +170,7 @@ class PodcastGenerator:
|
|
167 |
error_msg = f"❌ PDF extraction failed: {str(e)}"
|
168 |
add_log(error_msg)
|
169 |
raise Exception(error_msg)
|
170 |
-
|
171 |
async def postprocess_conversation(self, raw_text: str) -> str:
|
172 |
"""Run LLM again to enforce strict Speaker 1/2 format"""
|
173 |
prompt = f"""
|
@@ -345,7 +348,8 @@ Now format the following:
|
|
345 |
|
346 |
add_log(f"✅ Fallback podcast created with {len(podcast_lines)} lines")
|
347 |
return result
|
348 |
-
|
|
|
349 |
async def generate_script(self, prompt: str, language: str, file_obj=None, progress=None) -> Dict:
|
350 |
"""Improved script generation with better error handling"""
|
351 |
if not model_loaded or not self.model or not self.tokenizer:
|
|
|
14 |
import PyPDF2
|
15 |
import traceback
|
16 |
import os
|
17 |
+
pip install spaces
|
18 |
+
from spaces import GPU
|
19 |
|
20 |
import shutil
|
21 |
from pathlib import Path
|
|
|
52 |
tokenizer = None
|
53 |
generation_config = None
|
54 |
|
55 |
+
@spaces.GPU
|
56 |
def test_llm_generation():
|
57 |
try:
|
58 |
test_prompt = "Hello, how are you today?"
|
|
|
70 |
except Exception as e:
|
71 |
add_log(f"❌ LLM quick test failed: {e}")
|
72 |
|
73 |
+
@spaces.GPU
|
74 |
def initialize_model():
|
75 |
global model, tokenizer, generation_config
|
76 |
|
|
|
170 |
error_msg = f"❌ PDF extraction failed: {str(e)}"
|
171 |
add_log(error_msg)
|
172 |
raise Exception(error_msg)
|
173 |
+
@spaces.GPU
|
174 |
async def postprocess_conversation(self, raw_text: str) -> str:
|
175 |
"""Run LLM again to enforce strict Speaker 1/2 format"""
|
176 |
prompt = f"""
|
|
|
348 |
|
349 |
add_log(f"✅ Fallback podcast created with {len(podcast_lines)} lines")
|
350 |
return result
|
351 |
+
|
352 |
+
@spaces.GPU
|
353 |
async def generate_script(self, prompt: str, language: str, file_obj=None, progress=None) -> Dict:
|
354 |
"""Improved script generation with better error handling"""
|
355 |
if not model_loaded or not self.model or not self.tokenizer:
|