Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ import torch
|
|
17 |
import faiss
|
18 |
import numpy as np
|
19 |
import gradio as gr
|
20 |
-
from google.colab import drive
|
21 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
22 |
from sentence_transformers import SentenceTransformer
|
23 |
from peft import PeftModel
|
@@ -54,9 +54,9 @@ peft_model_path = "Jaamie/gemma-mental-health-qlora"
|
|
54 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
55 |
|
56 |
embedding_model_bge = "BAAI/bge-base-en-v1.5"
|
57 |
-
save_path_bge = "
|
58 |
-
faiss_index_path = "
|
59 |
-
chunked_text_path = "
|
60 |
READER_MODEL_NAME = "google/gemma-2-9b-it"
|
61 |
log_file_path = "./diagnosis_logs.csv"
|
62 |
feedback_file_path = "./feedback_logs.csv"
|
@@ -89,15 +89,18 @@ os.makedirs(save_path_bge, exist_ok=True)
|
|
89 |
# -------------------------------
|
90 |
|
91 |
# Load Sentence Transformer Model
|
92 |
-
if not os.path.exists(os.path.join(save_path_bge, "config.json")):
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
else:
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
101 |
|
102 |
# Load FAISS Index
|
103 |
faiss_index = faiss.read_index(faiss_index_path)
|
|
|
17 |
import faiss
|
18 |
import numpy as np
|
19 |
import gradio as gr
|
20 |
+
# from google.colab import drive
|
21 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
22 |
from sentence_transformers import SentenceTransformer
|
23 |
from peft import PeftModel
|
|
|
54 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
55 |
|
56 |
embedding_model_bge = "BAAI/bge-base-en-v1.5"
|
57 |
+
#save_path_bge = "./models/bge-base-en-v1.5"
|
58 |
+
faiss_index_path = "./qa_faiss_embedding.index"
|
59 |
+
chunked_text_path = "./chunked_text_RAG_text.txt"
|
60 |
READER_MODEL_NAME = "google/gemma-2-9b-it"
|
61 |
log_file_path = "./diagnosis_logs.csv"
|
62 |
feedback_file_path = "./feedback_logs.csv"
|
|
|
89 |
# -------------------------------
|
90 |
|
91 |
# Load Sentence Transformer Model
|
92 |
+
# if not os.path.exists(os.path.join(save_path_bge, "config.json")):
|
93 |
+
# print("Saving model to Google Drive...")
|
94 |
+
# embedding_model = SentenceTransformer(embedding_model_bge)
|
95 |
+
# embedding_model.save(save_path_bge)
|
96 |
+
# print("Model saved successfully!")
|
97 |
+
# else:
|
98 |
+
# print("Loading model from Google Drive...")
|
99 |
+
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
100 |
+
# embedding_model = SentenceTransformer(save_path_bge, device=device)
|
101 |
+
|
102 |
+
embedding_model = SentenceTransformer(embedding_model_bge, device=device)
|
103 |
+
print("✅ BGE Embedding model loaded from Hugging Face.")
|
104 |
|
105 |
# Load FAISS Index
|
106 |
faiss_index = faiss.read_index(faiss_index_path)
|