yellowcandle commited on
Commit
a28c209
·
unverified ·
1 Parent(s): 7f92c21

try to fix error

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -8,7 +8,7 @@ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, Aut
8
  transcribe_model = None
9
  proofread_model = None
10
 
11
- @spaces.GPU(duration=60)
12
  def transcribe_audio(audio):
13
  global transcribe_model
14
  if audio is None:
@@ -16,7 +16,7 @@ def transcribe_audio(audio):
16
  if transcribe_model is None:
17
  return "Please select a model."
18
 
19
- device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.mps.is_available() else "cpu"
20
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
21
 
22
  processor = AutoProcessor.from_pretrained(transcribe_model)
@@ -36,13 +36,13 @@ def transcribe_audio(audio):
36
  result = pipe(audio)
37
  return result["text"]
38
 
39
- @spaces.GPU(duration=120)
40
  def proofread(text):
41
  global proofread_model
42
  if text is None:
43
  return "Please provide the transcribed text for proofreading."
44
 
45
- device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.mps.is_available() else "cpu"
46
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
47
 
48
  messages = [
@@ -63,7 +63,7 @@ def proofread(text):
63
 
64
  def load_models(transcribe_model_id, proofread_model_id):
65
  global transcribe_model, proofread_model
66
- device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.mps.is_available() else "cpu"
67
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
68
 
69
  transcribe_model = AutoModelForSpeechSeq2Seq.from_pretrained(
@@ -84,7 +84,7 @@ with gr.Blocks() as demo:
84
  """)
85
 
86
  with gr.Row():
87
- transcribe_model_dropdown = gr.Dropdown(choices=["openai/whisper-large-v3", "alvanlii/whisper-small-cantonese"], value="alvanlii/whisper-small-cantonese", label="Select Transcription Model")
88
  proofread_model_dropdown = gr.Dropdown(choices=["hfl/llama-3-chinese-8b-instruct-v3"], value="hfl/llama-3-chinese-8b-instruct-v3", label="Select Proofreading Model")
89
  load_button = gr.Button("Load Models")
90
 
 
8
  transcribe_model = None
9
  proofread_model = None
10
 
11
+ @spaces.gpu(duration=60)
12
  def transcribe_audio(audio):
13
  global transcribe_model
14
  if audio is None:
 
16
  if transcribe_model is None:
17
  return "Please select a model."
18
 
19
+ device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
20
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
21
 
22
  processor = AutoProcessor.from_pretrained(transcribe_model)
 
36
  result = pipe(audio)
37
  return result["text"]
38
 
39
+ @spaces.gpu(duration=120)
40
  def proofread(text):
41
  global proofread_model
42
  if text is None:
43
  return "Please provide the transcribed text for proofreading."
44
 
45
+ device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
46
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
47
 
48
  messages = [
 
63
 
64
  def load_models(transcribe_model_id, proofread_model_id):
65
  global transcribe_model, proofread_model
66
+ device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
67
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
68
 
69
  transcribe_model = AutoModelForSpeechSeq2Seq.from_pretrained(
 
84
  """)
85
 
86
  with gr.Row():
87
+ transcribe_model_dropdown = gr.Dropdown(choices=["openai/whisper-large-v2", "alvanlii/whisper-small-cantonese"], value="alvanlii/whisper-small-cantonese", label="Select Transcription Model")
88
  proofread_model_dropdown = gr.Dropdown(choices=["hfl/llama-3-chinese-8b-instruct-v3"], value="hfl/llama-3-chinese-8b-instruct-v3", label="Select Proofreading Model")
89
  load_button = gr.Button("Load Models")
90