Spaces:
Sleeping
Sleeping
Commit
·
d89669b
1
Parent(s):
18c34ed
deepseek R1
Browse files- app.py +54 -11
- requirements.txt +4 -5
app.py
CHANGED
@@ -1,28 +1,71 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
st.set_page_config(page_title="Educational Chatbot")
|
6 |
st.title("🎓 Educational Chatbot")
|
7 |
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
|
|
|
|
|
|
|
|
10 |
if "history" not in st.session_state:
|
11 |
st.session_state.history = []
|
12 |
|
|
|
|
|
|
|
|
|
13 |
if user_input:
|
14 |
try:
|
15 |
-
|
16 |
-
|
17 |
-
huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
|
18 |
-
temperature=0.5,
|
19 |
-
max_new_tokens=100,
|
20 |
-
)
|
21 |
-
response = llm.invoke(user_input)
|
22 |
st.session_state.history.append(("You", user_input))
|
23 |
-
st.session_state.history.append(("Bot",
|
24 |
except Exception as e:
|
25 |
-
st.
|
26 |
|
|
|
27 |
for sender, msg in reversed(st.session_state.history):
|
28 |
st.markdown(f"**{sender}:** {msg}")
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from transformers import (
|
5 |
+
pipeline,
|
6 |
+
AutoConfig,
|
7 |
+
AutoTokenizer,
|
8 |
+
AutoModelForCausalLM
|
9 |
+
)
|
10 |
+
|
11 |
+
# Load environment variables from .env (if you’re using one)
|
12 |
+
load_dotenv()
|
13 |
|
14 |
st.set_page_config(page_title="Educational Chatbot")
|
15 |
st.title("🎓 Educational Chatbot")
|
16 |
|
17 |
+
@st.cache_resource(show_spinner=False)
|
18 |
+
def load_model():
|
19 |
+
# 1. Load the remote config (with trust_remote_code)
|
20 |
+
config = AutoConfig.from_pretrained(
|
21 |
+
"deepseek-ai/DeepSeek-R1",
|
22 |
+
trust_remote_code=True
|
23 |
+
)
|
24 |
+
# 2. Remove unsupported fp8 quantization
|
25 |
+
if hasattr(config, "quantization_config"):
|
26 |
+
config.quantization_config = None
|
27 |
+
|
28 |
+
# 3. Load tokenizer and model with patched config
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
30 |
+
"deepseek-ai/DeepSeek-R1",
|
31 |
+
trust_remote_code=True
|
32 |
+
)
|
33 |
+
model = AutoModelForCausalLM.from_pretrained(
|
34 |
+
"deepseek-ai/DeepSeek-R1",
|
35 |
+
trust_remote_code=True,
|
36 |
+
config=config
|
37 |
+
)
|
38 |
+
|
39 |
+
# 4. Build the text-generation pipeline
|
40 |
+
gen = pipeline(
|
41 |
+
"text-generation",
|
42 |
+
model=model,
|
43 |
+
tokenizer=tokenizer,
|
44 |
+
trust_remote_code=True,
|
45 |
+
device_map="auto" # or remove for CPU-only
|
46 |
+
)
|
47 |
+
return gen
|
48 |
|
49 |
+
# Load the model once
|
50 |
+
generator = load_model()
|
51 |
+
|
52 |
+
# Initialize chat history
|
53 |
if "history" not in st.session_state:
|
54 |
st.session_state.history = []
|
55 |
|
56 |
+
# User input box
|
57 |
+
user_input = st.text_input("Ask me anything:")
|
58 |
+
|
59 |
+
# When user enters a question
|
60 |
if user_input:
|
61 |
try:
|
62 |
+
outputs = generator(user_input, return_full_text=False)
|
63 |
+
reply = outputs[0]["generated_text"].strip()
|
|
|
|
|
|
|
|
|
|
|
64 |
st.session_state.history.append(("You", user_input))
|
65 |
+
st.session_state.history.append(("Bot", reply))
|
66 |
except Exception as e:
|
67 |
+
st.session_state.history.append(("Bot", f"⚠️ Error: {e}"))
|
68 |
|
69 |
+
# Display chat history
|
70 |
for sender, msg in reversed(st.session_state.history):
|
71 |
st.markdown(f"**{sender}:** {msg}")
|
requirements.txt
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
-
streamlit
|
2 |
-
langchain
|
3 |
-
langchain-community
|
4 |
-
langchain-huggingface
|
5 |
-
huggingface_hub
|
6 |
python-dotenv
|
|
|
|
|
|
|
|
1 |
+
streamlit>=1.20.0
|
|
|
|
|
|
|
|
|
2 |
python-dotenv
|
3 |
+
transformers>=4.40.0
|
4 |
+
torch>=2.1.0
|
5 |
+
accelerate
|