cheesecz commited on
Commit
63183b6
·
verified ·
1 Parent(s): cdf1d82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -2,13 +2,16 @@ import os
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  import torch
 
 
 
 
5
 
6
  os.environ["TRANSFORMERS_CACHE"] = "/tmp"
7
 
8
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
  if torch.cuda.is_available():
10
  os.environ["CUDA_VISIBLE_DEVICES"] = "0"
11
- torch.cuda.init()
12
 
13
  MODEL_NAME = "s-nlp/roberta-base-formality-ranker"
14
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
@@ -17,9 +20,7 @@ model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
17
  model = model.to(device)
18
 
19
  def calculate_formality_percentages(score):
20
- # Convert score to grayscale percentage (0-100)
21
  grayscale = int(score * 100)
22
- # Use grayscale to determine formal/informal percentages
23
  formal_percent = grayscale
24
  informal_percent = 100 - grayscale
25
  return formal_percent, informal_percent
@@ -37,7 +38,6 @@ def predict_formality(text):
37
  # Calculate percentages using grayscale
38
  formal_percent, informal_percent = calculate_formality_percentages(score)
39
 
40
- # Create response in the new format
41
  response = {
42
  "formality_score": round(score, 3),
43
  "formal_percent": formal_percent,
@@ -47,7 +47,6 @@ def predict_formality(text):
47
 
48
  return response
49
 
50
- # Create Gradio interface
51
  demo = gr.Interface(
52
  fn=predict_formality,
53
  inputs=gr.Textbox(label="Enter your text", lines=3),
@@ -63,9 +62,9 @@ demo = gr.Interface(
63
 
64
  # Launch the app
65
  if __name__ == "__main__":
 
66
  demo.launch(
67
  server_name="0.0.0.0",
68
  server_port=7860,
69
- share=True,
70
- enable_queue=True
71
  )
 
2
  import gradio as gr
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  import torch
5
+ import warnings
6
+
7
+ # Suppress NVML warning
8
+ warnings.filterwarnings("ignore", message="Can't initialize NVML")
9
 
10
  os.environ["TRANSFORMERS_CACHE"] = "/tmp"
11
 
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
  if torch.cuda.is_available():
14
  os.environ["CUDA_VISIBLE_DEVICES"] = "0"
 
15
 
16
  MODEL_NAME = "s-nlp/roberta-base-formality-ranker"
17
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
 
20
  model = model.to(device)
21
 
22
  def calculate_formality_percentages(score):
 
23
  grayscale = int(score * 100)
 
24
  formal_percent = grayscale
25
  informal_percent = 100 - grayscale
26
  return formal_percent, informal_percent
 
38
  # Calculate percentages using grayscale
39
  formal_percent, informal_percent = calculate_formality_percentages(score)
40
 
 
41
  response = {
42
  "formality_score": round(score, 3),
43
  "formal_percent": formal_percent,
 
47
 
48
  return response
49
 
 
50
  demo = gr.Interface(
51
  fn=predict_formality,
52
  inputs=gr.Textbox(label="Enter your text", lines=3),
 
62
 
63
  # Launch the app
64
  if __name__ == "__main__":
65
+ demo.queue()
66
  demo.launch(
67
  server_name="0.0.0.0",
68
  server_port=7860,
69
+ share=True
 
70
  )