Spaces:
Sleeping
Sleeping
Commit
·
92cf3e7
1
Parent(s):
88c84bb
deepseek R1
Browse files- app.py +22 -51
- requirements.txt +2 -0
app.py
CHANGED
@@ -1,70 +1,41 @@
|
|
1 |
-
import streamlit as st
|
2 |
import os
|
|
|
|
|
3 |
from dotenv import load_dotenv
|
4 |
-
from transformers import (
|
5 |
-
pipeline,
|
6 |
-
AutoConfig,
|
7 |
-
AutoTokenizer,
|
8 |
-
AutoModelForCausalLM
|
9 |
-
)
|
10 |
|
11 |
-
# Load
|
12 |
load_dotenv()
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
st.set_page_config(page_title="Educational Chatbot")
|
15 |
st.title("🎓 Educational Chatbot")
|
16 |
|
17 |
-
@st.cache_resource(show_spinner=False)
|
18 |
-
def load_model():
|
19 |
-
# 1. Load the remote config (with trust_remote_code)
|
20 |
-
config = AutoConfig.from_pretrained(
|
21 |
-
"deepseek-ai/DeepSeek-R1",
|
22 |
-
trust_remote_code=True
|
23 |
-
)
|
24 |
-
if hasattr(config, "quantization_config"):
|
25 |
-
config.quantization_config = {} # ← use {} instead of None
|
26 |
-
|
27 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
28 |
-
"deepseek-ai/DeepSeek-R1",
|
29 |
-
trust_remote_code=True
|
30 |
-
)
|
31 |
-
model = AutoModelForCausalLM.from_pretrained(
|
32 |
-
"deepseek-ai/DeepSeek-R1",
|
33 |
-
trust_remote_code=True,
|
34 |
-
config=config
|
35 |
-
)
|
36 |
-
|
37 |
-
|
38 |
-
# 4. Build the text-generation pipeline
|
39 |
-
gen = pipeline(
|
40 |
-
"text-generation",
|
41 |
-
model=model,
|
42 |
-
tokenizer=tokenizer,
|
43 |
-
trust_remote_code=True,
|
44 |
-
device_map="auto" # or remove for CPU-only
|
45 |
-
)
|
46 |
-
return gen
|
47 |
-
|
48 |
-
# Load the model once
|
49 |
-
generator = load_model()
|
50 |
-
|
51 |
-
# Initialize chat history
|
52 |
if "history" not in st.session_state:
|
53 |
st.session_state.history = []
|
54 |
|
55 |
-
#
|
56 |
user_input = st.text_input("Ask me anything:")
|
57 |
|
58 |
-
# When user enters a question
|
59 |
if user_input:
|
60 |
try:
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
65 |
except Exception as e:
|
66 |
-
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
#
|
69 |
for sender, msg in reversed(st.session_state.history):
|
70 |
st.markdown(f"**{sender}:** {msg}")
|
|
|
|
|
1 |
import os
|
2 |
+
import streamlit as st
|
3 |
+
from huggingface_hub import InferenceClient
|
4 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
+
# 1. Load .env (if using)
|
7 |
load_dotenv()
|
8 |
|
9 |
+
# 2. Instantiate the InferenceClient once
|
10 |
+
client = InferenceClient(
|
11 |
+
provider="auto",
|
12 |
+
api_key=os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
13 |
+
)
|
14 |
+
|
15 |
st.set_page_config(page_title="Educational Chatbot")
|
16 |
st.title("🎓 Educational Chatbot")
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
if "history" not in st.session_state:
|
19 |
st.session_state.history = []
|
20 |
|
21 |
+
# 3. Chat input
|
22 |
user_input = st.text_input("Ask me anything:")
|
23 |
|
|
|
24 |
if user_input:
|
25 |
try:
|
26 |
+
# 4. Make the chat-completion API call
|
27 |
+
completion = client.chat.completions.create(
|
28 |
+
model="deepseek-ai/DeepSeek-R1",
|
29 |
+
messages=[{"role": "user", "content": user_input}],
|
30 |
+
)
|
31 |
+
reply = completion.choices[0].message["content"]
|
32 |
except Exception as e:
|
33 |
+
reply = f"❌ API Error: {e}"
|
34 |
+
|
35 |
+
# 5. Append to session history
|
36 |
+
st.session_state.history.append(("You", user_input))
|
37 |
+
st.session_state.history.append(("Bot", reply))
|
38 |
|
39 |
+
# 6. Render chat history
|
40 |
for sender, msg in reversed(st.session_state.history):
|
41 |
st.markdown(f"**{sender}:** {msg}")
|
requirements.txt
CHANGED
@@ -3,3 +3,5 @@ python-dotenv
|
|
3 |
transformers>=4.40.0
|
4 |
torch>=2.1.0
|
5 |
accelerate
|
|
|
|
|
|
3 |
transformers>=4.40.0
|
4 |
torch>=2.1.0
|
5 |
accelerate
|
6 |
+
huggingface-hub>=0.14.1
|
7 |
+
|