narainkumbari commited on
Commit
dd07134
·
1 Parent(s): 8bb7e48

Initial Gradio version with GPU

Browse files
Files changed (2) hide show
  1. app.py +25 -8
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import streamlit as st
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import re
@@ -9,14 +9,31 @@ import io
9
 
10
  # Load model and tokenizer from local fine-tuned directory
11
  # Define base and adapter model paths
12
- BASE_MODEL = "stanford-crfm/BioMedLM" # or the path you used originally
13
- ADAPTER_PATH = "Tufan1/BioMedLM-Cardio-Fold4-CPU"
14
- tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
15
-
16
  # Force CPU-safe model loading
17
- base_model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, torch_dtype=torch.float32)
18
- model = PeftModel.from_pretrained(base_model, ADAPTER_PATH, device_map=None).to("cpu")
19
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  # Dictionaries to decode user inputs
22
  gender_map = {1: "Female", 2: "Male"}
 
1
+ import gradio as gr
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import re
 
9
 
10
  # Load model and tokenizer from local fine-tuned directory
11
  # Define base and adapter model paths
12
+ #BASE_MODEL = "stanford-crfm/BioMedLM" # or the path you used originally
13
+ #ADAPTER_PATH = "Tufan1/BioMedLM-Cardio-Fold4-CPU"
14
+ #tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
15
+ MODEL_PATH = "Tufan1/BioMedLM-Cardio-Fold4-CPU"
16
  # Force CPU-safe model loading
17
+ #base_model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, torch_dtype=torch.float32)
18
+ #model = PeftModel.from_pretrained(base_model, ADAPTER_PATH, device_map=None).to("cpu")
19
+
20
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
21
+ model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, torch_dtype=torch.float16).to("cuda")
22
+
23
+ def predict_disease(prompt):
24
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
25
+ outputs = model.generate(**inputs, max_new_tokens=100)
26
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
27
+
28
+ iface = gr.Interface(
29
+ fn=predict_disease,
30
+ inputs=gr.Textbox(label="Enter Symptoms"),
31
+ outputs=gr.Textbox(label="Predicted Diagnosis"),
32
+ title="Cardiovascular Disease Predictor",
33
+ description="Enter patient symptoms to receive diagnosis based on BioMedLM."
34
+ )
35
+
36
+ iface.launch()
37
 
38
  # Dictionaries to decode user inputs
39
  gender_map = {1: "Female", 2: "Male"}
requirements.txt CHANGED
@@ -11,7 +11,8 @@ huggingface-hub==0.30.1
11
  datasets==3.4.1
12
 
13
  # Web App
14
- streamlit==1.33.0
 
15
 
16
  # Audio Processing
17
  pydub==0.25.1
 
11
  datasets==3.4.1
12
 
13
  # Web App
14
+ # streamlit==1.33.0
15
+ gradio==5.24.0
16
 
17
  # Audio Processing
18
  pydub==0.25.1