Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request, Query
|
2 |
+
from fastapi.responses import StreamingResponse
|
3 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
4 |
+
|
5 |
+
import torch
|
6 |
+
|
7 |
+
import time
|
8 |
+
app = FastAPI()
|
9 |
+
model_name = "prajjwal1/bert-tiny" # Pretrained BERT-Tiny on Hugging Face
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
# SSE generator
|
16 |
+
def event_stream(text: str):
|
17 |
+
|
18 |
+
time.sleep(1)
|
19 |
+
|
20 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
21 |
+
with torch.no_grad():
|
22 |
+
outputs = model(**inputs)
|
23 |
+
probs = torch.nn.functional.softmax(outputs.logits, dim=1)
|
24 |
+
prediction = torch.argmax(probs, dim=1).item()
|
25 |
+
|
26 |
+
yield f"data: {prediction}\n\n"
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
@app.get("/chatstrm")
|
31 |
+
async def chat(query: str = Query(..., description="User's message")):
|
32 |
+
return StreamingResponse(event_stream(query) , media_type="text/event-stream")
|
33 |
+
|
34 |
+
# Entry point
|
35 |
+
if __name__ == "__main__":
|
36 |
+
import uvicorn
|
37 |
+
uvicorn.run("app:app", host="0.0.0.0", port=7899)
|