Update handler.py
Browse files- handler.py +37 -21
handler.py
CHANGED
@@ -1,23 +1,21 @@
|
|
1 |
# handler.py
|
2 |
|
3 |
-
import os
|
4 |
import requests
|
|
|
5 |
from fastapi import APIRouter, HTTPException
|
6 |
from pydantic import BaseModel
|
7 |
from typing import Optional
|
8 |
-
from dotenv import load_dotenv
|
9 |
|
10 |
-
#
|
11 |
-
|
|
|
12 |
|
13 |
-
# Securely read environment variables
|
14 |
-
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
|
15 |
-
HUGGINGFACE_MODEL_URL = os.getenv("HUGGINGFACE_MODEL_URL")
|
16 |
-
|
17 |
-
# FastAPI router setup
|
18 |
router = APIRouter()
|
19 |
|
20 |
-
#
|
|
|
|
|
|
|
21 |
class PromptInput(BaseModel):
|
22 |
prompt: str
|
23 |
max_tokens: Optional[int] = 250
|
@@ -25,17 +23,28 @@ class PromptInput(BaseModel):
|
|
25 |
top_p: Optional[float] = 0.95
|
26 |
top_k: Optional[int] = 50
|
27 |
repetition_penalty: Optional[float] = 1.2
|
|
|
28 |
|
29 |
-
#
|
30 |
-
@router.
|
31 |
-
async def
|
32 |
-
|
33 |
-
raise HTTPException(status_code=500, detail="Hugging Face API token or model URL not configured.")
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
|
|
|
|
|
|
39 |
payload = {
|
40 |
"inputs": input_data.prompt,
|
41 |
"parameters": {
|
@@ -48,18 +57,25 @@ async def generate_text(input_data: PromptInput):
|
|
48 |
}
|
49 |
|
50 |
try:
|
51 |
-
|
|
|
52 |
|
53 |
if response.status_code != 200:
|
|
|
54 |
raise HTTPException(status_code=response.status_code, detail=response.json())
|
55 |
|
56 |
result = response.json()
|
57 |
-
|
|
|
|
|
|
|
|
|
58 |
|
59 |
return {
|
60 |
"status": "success",
|
61 |
-
"output":
|
62 |
}
|
63 |
|
64 |
except Exception as e:
|
|
|
65 |
raise HTTPException(status_code=500, detail=f"Text generation failed: {str(e)}")
|
|
|
1 |
# handler.py
|
2 |
|
|
|
3 |
import requests
|
4 |
+
import logging
|
5 |
from fastapi import APIRouter, HTTPException
|
6 |
from pydantic import BaseModel
|
7 |
from typing import Optional
|
|
|
8 |
|
9 |
+
# Setup logger
|
10 |
+
logging.basicConfig(level=logging.INFO)
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
|
|
|
|
|
|
|
|
|
|
|
13 |
router = APIRouter()
|
14 |
|
15 |
+
# Your Hugging Face model URL – must be public
|
16 |
+
MODEL_URL = "https://api-inference.huggingface.co/models/CLASSIFIED-HEX/X"
|
17 |
+
|
18 |
+
# Input model
|
19 |
class PromptInput(BaseModel):
|
20 |
prompt: str
|
21 |
max_tokens: Optional[int] = 250
|
|
|
23 |
top_p: Optional[float] = 0.95
|
24 |
top_k: Optional[int] = 50
|
25 |
repetition_penalty: Optional[float] = 1.2
|
26 |
+
trim_output: Optional[bool] = False # New feature to remove prompt from result
|
27 |
|
28 |
+
# Root health check
|
29 |
+
@router.get("/")
|
30 |
+
async def root():
|
31 |
+
return {"message": "AI text generation backend is running 🚀"}
|
|
|
32 |
|
33 |
+
# Ping model check
|
34 |
+
@router.get("/ping-model")
|
35 |
+
async def ping_model():
|
36 |
+
try:
|
37 |
+
response = requests.post(MODEL_URL, json={"inputs": "ping test"})
|
38 |
+
if response.status_code == 200:
|
39 |
+
return {"status": "Model is online ✅"}
|
40 |
+
else:
|
41 |
+
return {"status": "Model responded with error ❌", "details": response.json()}
|
42 |
+
except Exception as e:
|
43 |
+
raise HTTPException(status_code=500, detail=f"Could not reach model: {str(e)}")
|
44 |
|
45 |
+
# Main generation route
|
46 |
+
@router.post("/generate")
|
47 |
+
async def generate_text(input_data: PromptInput):
|
48 |
payload = {
|
49 |
"inputs": input_data.prompt,
|
50 |
"parameters": {
|
|
|
57 |
}
|
58 |
|
59 |
try:
|
60 |
+
logger.info(f"Sending prompt to model: {input_data.prompt}")
|
61 |
+
response = requests.post(MODEL_URL, json=payload)
|
62 |
|
63 |
if response.status_code != 200:
|
64 |
+
logger.error(f"Model error: {response.status_code} - {response.text}")
|
65 |
raise HTTPException(status_code=response.status_code, detail=response.json())
|
66 |
|
67 |
result = response.json()
|
68 |
+
raw_output = result[0].get("generated_text") if isinstance(result, list) else result.get("generated_text", "")
|
69 |
+
|
70 |
+
# Optionally trim prompt from beginning
|
71 |
+
if input_data.trim_output and raw_output.startswith(input_data.prompt):
|
72 |
+
raw_output = raw_output[len(input_data.prompt):].lstrip()
|
73 |
|
74 |
return {
|
75 |
"status": "success",
|
76 |
+
"output": raw_output
|
77 |
}
|
78 |
|
79 |
except Exception as e:
|
80 |
+
logger.exception("Text generation failed")
|
81 |
raise HTTPException(status_code=500, detail=f"Text generation failed: {str(e)}")
|