narainkumbari commited on
Commit
d692a69
·
1 Parent(s): c1375d5

Fix: Load CPU-safe model for HF Space

Browse files
Files changed (1) hide show
  1. app.py +9 -19
app.py CHANGED
@@ -6,26 +6,16 @@ from pydub import AudioSegment
6
  import speech_recognition as sr
7
  import io
8
 
9
- # NEW: Update model path (make sure it's uploaded to HF Hub or copied to Space)
10
- MODEL_PATH = "Tufan1/BioMedLM-Cardio-Fold2-CPU"
11
-
12
- # ✅ NEW: Load model and tokenizer safely with CPU fallback
13
- @st.cache_resource
14
- def load_model():
15
- tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
16
- model = AutoModelForCausalLM.from_pretrained(
17
- MODEL_PATH,
18
- device_map="auto", # Will load on CPU in HF Space
19
- trust_remote_code=True
20
- )
21
- return tokenizer, model
22
-
23
- tokenizer, model = load_model()
24
-
25
 
26
  # Dictionaries to decode user inputs
27
  gender_map = {1: "Female", 2: "Male"}
28
- cholesterol_map = {1: "Normal", 2: "High", 3: "Extreme"}
29
  glucose_map = {1: "Normal", 2: "High", 3: "Extreme"}
30
  binary_map = {0: "No", 1: "Yes"}
31
 
@@ -47,7 +37,7 @@ def get_prediction(age, gender, height, weight, ap_hi, ap_lo,
47
 
48
  Diagnosis:"""
49
 
50
- inputs = tokenizer(input_text, return_tensors="pt").to(device)
51
  model.eval()
52
  with torch.no_grad():
53
  outputs = model.generate(**inputs, max_new_tokens=4)
@@ -84,7 +74,7 @@ if input_mode == "Manual Input":
84
  weight = st.number_input("Weight (kg)", min_value=10, max_value=200)
85
  ap_hi = st.number_input("Systolic BP", min_value=80, max_value=250)
86
  ap_lo = st.number_input("Diastolic BP", min_value=40, max_value=150)
87
- cholesterol = st.selectbox("Cholesterol", [("Normal", 1), ("Peak", 2), ("Elevated", 3)], format_func=lambda x: x[0])[1]
88
  glucose = st.selectbox("Glucose", [("Normal", 1), ("High", 2), ("Extreme", 3)], format_func=lambda x: x[0])[1]
89
  smoke = st.radio("Smoker?", [("No", 0), ("Yes", 1)], format_func=lambda x: x[0])[1]
90
  alco = st.radio("Alcohol Intake?", [("No", 0), ("Yes", 1)], format_func=lambda x: x[0])[1]
 
6
  import speech_recognition as sr
7
  import io
8
 
9
+ # Load model and tokenizer from local fine-tuned directory
10
+ MODEL_PATH = "Tufan1/BioMedLM-Cardio-Fold1-CPU"
11
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
12
+ model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")
13
+ #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+ #model.to(device)
 
 
 
 
 
 
 
 
 
 
15
 
16
  # Dictionaries to decode user inputs
17
  gender_map = {1: "Female", 2: "Male"}
18
+ cholesterol_map = {1: "Normal", 2: "Elevated", 3: "Peak"}
19
  glucose_map = {1: "Normal", 2: "High", 3: "Extreme"}
20
  binary_map = {0: "No", 1: "Yes"}
21
 
 
37
 
38
  Diagnosis:"""
39
 
40
+ inputs = tokenizer(input_text, return_tensors="pt")#.to(device)
41
  model.eval()
42
  with torch.no_grad():
43
  outputs = model.generate(**inputs, max_new_tokens=4)
 
74
  weight = st.number_input("Weight (kg)", min_value=10, max_value=200)
75
  ap_hi = st.number_input("Systolic BP", min_value=80, max_value=250)
76
  ap_lo = st.number_input("Diastolic BP", min_value=40, max_value=150)
77
+ cholesterol = st.selectbox("Cholesterol", [("Normal", 1), ("High", 2), ("Extreme", 3)], format_func=lambda x: x[0])[1]
78
  glucose = st.selectbox("Glucose", [("Normal", 1), ("High", 2), ("Extreme", 3)], format_func=lambda x: x[0])[1]
79
  smoke = st.radio("Smoker?", [("No", 0), ("Yes", 1)], format_func=lambda x: x[0])[1]
80
  alco = st.radio("Alcohol Intake?", [("No", 0), ("Yes", 1)], format_func=lambda x: x[0])[1]