File size: 10,280 Bytes
e8434f3
 
25f8aca
 
1f216d0
e8434f3
 
 
 
 
 
 
 
 
 
 
e8c4686
 
 
 
1f216d0
 
 
 
e8c4686
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f216d0
 
 
 
 
e8434f3
e8c4686
 
 
1f216d0
e8434f3
 
25f8aca
e8434f3
 
25f8aca
e8434f3
 
 
 
 
 
 
 
 
25f8aca
 
e8c4686
25f8aca
e8434f3
 
 
ea5000d
e8c4686
 
 
 
e8434f3
 
 
 
 
 
ea5000d
e8c4686
e8434f3
 
 
 
 
 
 
 
 
 
25f8aca
e8434f3
 
25f8aca
e8434f3
 
25f8aca
e8434f3
e8c4686
 
 
acb8402
e8c4686
 
ea5000d
e8c4686
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8434f3
 
 
 
 
 
25f8aca
 
e8434f3
 
 
 
 
 
e8c4686
25f8aca
 
e8434f3
 
 
 
 
 
 
e8c4686
e8434f3
 
 
e8c4686
 
 
 
 
 
 
 
e8434f3
 
 
 
 
 
e8c4686
e8434f3
ea5000d
e8c4686
 
e8434f3
 
 
e8c4686
e8434f3
 
 
 
 
 
 
 
 
e8c4686
25f8aca
e8c4686
acb8402
e8c4686
 
 
 
 
1f216d0
e8c4686
 
e8434f3
 
e8c4686
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8434f3
1f216d0
25f8aca
e8434f3
aefa341
e8434f3
25f8aca
e8434f3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
import os
import logging
from typing import Optional
from datetime import datetime
from contextlib import asynccontextmanager

from fastapi import FastAPI, HTTPException, Depends, Security, status
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
import uvicorn

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Global variables for model
model = None
tokenizer = None
model_loaded = False

@asynccontextmanager
async def lifespan(app: FastAPI):
    # Startup
    global model, tokenizer, model_loaded
    logger.info("Real LLM AI Assistant starting up...")
    
    try:
        # Try to load actual LLM model
        from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
        import torch
        
        # Use a better conversational model
        model_name = os.getenv("MODEL_NAME", "microsoft/DialoGPT-medium")
        logger.info(f"Loading real LLM model: {model_name}")
        
        # Load tokenizer
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        # Load model with optimizations
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            torch_dtype=torch.float32,
            low_cpu_mem_usage=True,
            pad_token_id=tokenizer.eos_token_id
        )
        
        model_loaded = True
        logger.info("Real LLM model loaded successfully!")
        
    except Exception as e:
        logger.warning(f"Could not load LLM model: {e}")
        logger.info("Will use fallback responses")
        model_loaded = False
    
    yield
    # Shutdown
    logger.info("AI Assistant shutting down...")

# Initialize FastAPI app with lifespan
app = FastAPI(
    title="Real LLM AI Agent API",
    description="AI Agent powered by actual LLM models",
    version="4.0.0",
    lifespan=lifespan
)

# CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Security
security = HTTPBearer()

# Configuration
API_KEYS = {
    os.getenv("API_KEY_1", "27Eud5J73j6SqPQAT2ioV-CtiCg-p0WNqq6I4U0Ig6E"): "user1",
    os.getenv("API_KEY_2", "QbzG2CqHU1Nn6F1EogZ1d3dp8ilRTMJQBzS-U"): "user2",
}

# Request/Response models
class ChatRequest(BaseModel):
    message: str = Field(..., min_length=1, max_length=2000)
    max_length: Optional[int] = Field(200, ge=50, le=500)
    temperature: Optional[float] = Field(0.8, ge=0.1, le=1.5)
    top_p: Optional[float] = Field(0.9, ge=0.1, le=1.0)
    do_sample: Optional[bool] = Field(True)

class ChatResponse(BaseModel):
    response: str
    model_used: str
    timestamp: str
    processing_time: float
    tokens_used: int
    model_loaded: bool

class HealthResponse(BaseModel):
    status: str
    model_loaded: bool
    timestamp: str

def verify_api_key(credentials: HTTPAuthorizationCredentials = Security(security)) -> str:
    """Verify API key authentication"""
    api_key = credentials.credentials
    
    if api_key not in API_KEYS:
        raise HTTPException(
            status_code=status.HTTP_401_UNAUTHORIZED,
            detail="Invalid API key"
        )
    
    return API_KEYS[api_key]

def generate_llm_response(message: str, max_length: int = 200, temperature: float = 0.8, top_p: float = 0.9, do_sample: bool = True) -> tuple:
    """Generate response using actual LLM model"""
    global model, tokenizer, model_loaded
    
    if not model_loaded or model is None or tokenizer is None:
        return "I'm currently running in demo mode. The LLM model couldn't be loaded, but I'm still here to help! Please try asking your question again.", "demo_mode", 0
    
    try:
        # Prepare input with conversation format
        input_text = f"Human: {message}\nAssistant:"
        
        # Tokenize input
        inputs = tokenizer.encode(input_text, return_tensors="pt")
        
        # Generate response
        with torch.no_grad():
            outputs = model.generate(
                inputs,
                max_length=inputs.shape[1] + max_length,
                temperature=temperature,
                top_p=top_p,
                do_sample=do_sample,
                pad_token_id=tokenizer.eos_token_id,
                eos_token_id=tokenizer.eos_token_id,
                num_return_sequences=1,
                repetition_penalty=1.1,
                length_penalty=1.0
            )
        
        # Decode response
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # Extract only the assistant's response
        if "Assistant:" in response:
            response = response.split("Assistant:")[-1].strip()
        
        # Remove the input text if it's still there
        if input_text.replace("Assistant:", "").strip() in response:
            response = response.replace(input_text.replace("Assistant:", "").strip(), "").strip()
        
        # Clean up the response
        response = response.strip()
        if not response:
            response = "I understand your question, but I'm having trouble generating a proper response right now. Could you please rephrase your question?"
        
        # Count tokens
        tokens_used = len(tokenizer.encode(response))
        
        return response, os.getenv("MODEL_NAME", "microsoft/DialoGPT-medium"), tokens_used
        
    except Exception as e:
        logger.error(f"Error generating LLM response: {str(e)}")
        return f"I encountered an issue while processing your request. Error: {str(e)}", "error_mode", 0

@app.get("/", response_model=HealthResponse)
async def root():
    """Health check endpoint"""
    return HealthResponse(
        status="healthy",
        model_loaded=model_loaded,
        timestamp=datetime.now().isoformat()
    )

@app.get("/health", response_model=HealthResponse)
async def health_check():
    """Detailed health check"""
    return HealthResponse(
        status="healthy" if model_loaded else "demo_mode",
        model_loaded=model_loaded,
        timestamp=datetime.now().isoformat()
    )

@app.post("/chat", response_model=ChatResponse)
async def chat(
    request: ChatRequest,
    user: str = Depends(verify_api_key)
):
    """Main chat endpoint using real LLM model"""
    start_time = datetime.now()
    
    try:
        # Generate response using actual LLM
        response_text, model_used, tokens_used = generate_llm_response(
            request.message,
            request.max_length,
            request.temperature,
            request.top_p,
            request.do_sample
        )
        
        # Calculate processing time
        processing_time = (datetime.now() - start_time).total_seconds()
        
        return ChatResponse(
            response=response_text,
            model_used=model_used,
            timestamp=datetime.now().isoformat(),
            processing_time=processing_time,
            tokens_used=tokens_used,
            model_loaded=model_loaded
        )
        
    except Exception as e:
        logger.error(f"Error in chat endpoint: {str(e)}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Error generating response: {str(e)}"
        )

@app.get("/models")
async def get_model_info(user: str = Depends(verify_api_key)):
    """Get information about the loaded model"""
    return {
        "model_name": os.getenv("MODEL_NAME", "microsoft/DialoGPT-medium"),
        "model_loaded": model_loaded,
        "status": "active" if model_loaded else "demo_mode",
        "capabilities": [
            "Real LLM text generation",
            "Conversational AI responses",
            "Dynamic response generation",
            "Adjustable temperature and top_p",
            "Natural language understanding"
        ],
        "version": "4.0.0",
        "type": "Real LLM Model" if model_loaded else "Demo Mode"
    }

@app.post("/generate")
async def generate_text(
    request: ChatRequest,
    user: str = Depends(verify_api_key)
):
    """Direct text generation endpoint"""
    start_time = datetime.now()
    
    try:
        # Generate using LLM without conversation formatting
        if model_loaded and model is not None and tokenizer is not None:
            inputs = tokenizer.encode(request.message, return_tensors="pt")
            
            with torch.no_grad():
                outputs = model.generate(
                    inputs,
                    max_length=inputs.shape[1] + request.max_length,
                    temperature=request.temperature,
                    top_p=request.top_p,
                    do_sample=request.do_sample,
                    pad_token_id=tokenizer.eos_token_id,
                    num_return_sequences=1
                )
            
            response = tokenizer.decode(outputs[0], skip_special_tokens=True)
            # Remove input text
            response = response[len(request.message):].strip()
            tokens_used = len(tokenizer.encode(response))
            model_used = os.getenv("MODEL_NAME", "microsoft/DialoGPT-medium")
        else:
            response = "Model not loaded. Running in demo mode."
            tokens_used = 0
            model_used = "demo_mode"
        
        processing_time = (datetime.now() - start_time).total_seconds()
        
        return ChatResponse(
            response=response,
            model_used=model_used,
            timestamp=datetime.now().isoformat(),
            processing_time=processing_time,
            tokens_used=tokens_used,
            model_loaded=model_loaded
        )
        
    except Exception as e:
        logger.error(f"Error in generate endpoint: {str(e)}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Error generating text: {str(e)}"
        )

if __name__ == "__main__":
    # For Hugging Face Spaces
    port = int(os.getenv("PORT", "7860"))
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=port,
        reload=False
    )