File size: 8,843 Bytes
4986fe4
d38c2eb
ef215d3
9fa0f10
378f2c3
8834a20
fc764d5
a111cf9
 
1d32d66
a111cf9
 
31eab42
378f2c3
d38c2eb
4986fe4
378f2c3
 
b955cc1
4986fe4
 
 
b955cc1
a111cf9
4986fe4
a111cf9
 
b955cc1
 
 
 
378f2c3
 
 
 
b955cc1
006f05b
 
 
 
 
 
9422734
aeb51aa
9422734
 
 
 
 
a111cf9
 
 
 
9422734
a111cf9
9422734
a111cf9
 
 
 
 
 
 
9422734
 
a111cf9
 
9422734
 
4d88866
fa65dba
a111cf9
4e5813e
aeb51aa
1ba1f47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d88866
8ce4a3a
a111cf9
 
7bee578
a01be99
9422734
a111cf9
9422734
a111cf9
 
 
 
 
9422734
a111cf9
 
 
7bee578
9422734
a111cf9
2c1c62a
7ef5d89
c613f2b
 
 
 
 
 
 
 
 
7ef5d89
a68045e
614a889
 
 
 
 
 
 
 
 
7ef5d89
ac4bad0
3109050
 
 
9bfb2c6
 
d7b15ab
 
9bfb2c6
1d32d66
a0270ea
 
924ff1d
9bfb2c6
 
 
 
 
 
 
 
 
b955cc1
c5cedd6
9bfb2c6
ba11b8c
 
31eab42
 
 
ba11b8c
9bfb2c6
3109050
1d32d66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0270ea
 
6a3f7b0
 
 
 
 
 
 
 
 
 
 
7ef5d89
 
 
 
 
a111cf9
 
6a84e5c
 
 
8e4491b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
import os
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import StreamingResponse, HTMLResponse, JSONResponse, FileResponse  
from pydantic import BaseModel
import httpx
from pathlib import Path  # Import Path from pathlib
import requests
import re
import cloudscraper
import json
from typing import Optional
import datetime

load_dotenv()

app = FastAPI()

# Get API keys and secret endpoint from environment variables
api_keys_str = os.getenv('API_KEYS')
valid_api_keys = api_keys_str.split(',') if api_keys_str else []
secret_api_endpoint = os.getenv('SECRET_API_ENDPOINT')
secret_api_endpoint_2 = os.getenv('SECRET_API_ENDPOINT_2')
secret_api_endpoint_3 = os.getenv('SECRET_API_ENDPOINT_3')  # New endpoint for searchgpt

# Validate if the main secret API endpoints are set
if not secret_api_endpoint or not secret_api_endpoint_2 or not secret_api_endpoint_3:
    raise HTTPException(status_code=500, detail="API endpoint(s) are not configured in environment variables.")

# Define models that should use the secondary endpoint
alternate_models = {"gpt-4o-mini", "claude-3-haiku", "llama-3.1-70b", "mixtral-8x7b"}

class Payload(BaseModel):
    model: str
    messages: list
    stream: bool
@app.get("/favicon.ico")
async def favicon():
    # The favicon.ico file is in the same directory as the app
    favicon_path = Path(__file__).parent / "favicon.ico"
    return FileResponse(favicon_path, media_type="image/x-icon")
    
def generate_search(query: str, systemprompt: Optional[str] = None, stream: bool = True) -> str:
    headers = {"User-Agent": ""}
    
    # Use the provided system prompt, or default to "Be Helpful and Friendly"
    system_message = systemprompt or "Be Helpful and Friendly"
    
    # Create the prompt history with the user query and system message
    prompt = [
        {"role": "user", "content": query},
    ]
    
    prompt.insert(0, {"content": system_message, "role": "system"})
    
    # Prepare the payload for the API request
    payload = {
        "is_vscode_extension": True,
        "message_history": prompt,
        "requested_model": "searchgpt",
        "user_input": prompt[-1]["content"],
    }
    
    # Send the request to the chat endpoint
    response = requests.post(secret_api_endpoint_3, headers=headers, json=payload, stream=True)
    
    streaming_text = ""
    
    # Process the streaming response
    for value in response.iter_lines(decode_unicode=True):
        if value.startswith("data: "):  
            try:
                json_modified_value = json.loads(value[6:])
                content = json_modified_value.get("choices", [{}])[0].get("delta", {}).get("content", "")

                if content.strip():  # Only process non-empty content
                    cleaned_response = {
                        "created": json_modified_value.get("created"),
                        "id": json_modified_value.get("id"),
                        "model": "searchgpt",
                        "object": "chat.completion",
                        "choices": [
                            {
                                "message": {
                                    "content": content
                                }
                            }
                        ]
                    }
                    
                    if stream:
                        yield f"data: {json.dumps(cleaned_response)}\n\n"
                    
                    streaming_text += content
            except json.JSONDecodeError:
                continue
    
    if not stream:
        yield streaming_text


@app.get("/searchgpt")
async def search_gpt(q: str, stream: Optional[bool] = False, systemprompt: Optional[str] = None):
    if not q:
        raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
    
    if stream:
        return StreamingResponse(
            generate_search(q, systemprompt=systemprompt, stream=True),
            media_type="text/event-stream"
        )
    else:
        # For non-streaming, collect the text and return as JSON response
        response_text = "".join([chunk for chunk in generate_search(q, systemprompt=systemprompt, stream=False)])
        return JSONResponse(content={"response": response_text})
@app.get("/", response_class=HTMLResponse)
async def root():
    # Open and read the content of index.html (in the same folder as the app)
    file_path = "index.html"

    try:
        with open(file_path, "r") as file:
            html_content = file.read()
        return HTMLResponse(content=html_content)
    except FileNotFoundError:
        return HTMLResponse(content="<h1>File not found</h1>", status_code=404)

async def get_models():
    try:
        # Load the models from models.json in the same folder
        file_path = Path(__file__).parent / 'models.json'
        with open(file_path, 'r') as f:
            return json.load(f)
    except FileNotFoundError:
        raise HTTPException(status_code=404, detail="models.json not found")
    except json.JSONDecodeError:
        raise HTTPException(status_code=500, detail="Error decoding models.json")

@app.get("/models")
async def fetch_models():
    return await get_models()

available_model_ids = [
    "gpt-4o", "gpt-4o-mini", "claude-3-haiku", "llama-3.1-405b", "llama-3.1-70b", 
    "llama-3.1-8b", "gemini-1.5-flash", "mixtral-8x7b" , "command-r","gemini-pro",
    "gpt-3.5-turbo", "command"
]

@app.post("/chat/completions")
@app.post("/v1/chat/completions")
async def get_completion(payload: Payload,request: Request):
    model_to_use = payload.model if payload.model else "gpt-4o-mini"
    # Validate model availability
    if model_to_use not in available_model_ids:
        raise HTTPException(
            status_code=400, 
            detail=f"Model '{model_to_use}' is not available. Check /models for the available model list."
        )

    # Proceed with the request handling
    payload_dict = payload.dict()
    payload_dict["model"] = model_to_use

    # Select the appropriate endpoint
    endpoint = secret_api_endpoint_2 if model_to_use in alternate_models else secret_api_endpoint
    current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    aaip = request.client.host
    print(f"Time: {current_time}, {aaip}")
    print(payload_dict)

    async def stream_generator(payload_dict):
        scraper = cloudscraper.create_scraper()  # Create a CloudScraper session
        try:
            # Send POST request using CloudScraper
            response = scraper.post(f"{endpoint}/v1/chat/completions", json=payload_dict, stream=True)
            
            # Check response status
            if response.status_code == 422:
                raise HTTPException(status_code=422, detail="Unprocessable entity. Check your payload.")
            elif response.status_code == 400:
                raise HTTPException(status_code=400, detail="Bad request. Verify input data.")
            elif response.status_code == 403:
                raise HTTPException(status_code=403, detail="Forbidden. You do not have access to this resource.")
            elif response.status_code == 404:
                raise HTTPException(status_code=404, detail="The requested resource was not found.")
            elif response.status_code >= 500:
                raise HTTPException(status_code=500, detail="Server error. Try again later.")

            # Stream response lines to the client
            for line in response.iter_lines():
                if line:
                    yield line.decode('utf-8') + "\n"

        except requests.exceptions.RequestException as req_err:
            # Handle request-specific errors
            raise HTTPException(status_code=500, detail=f"Request failed: {req_err}")
        except Exception as e:
            # Handle unexpected errors
            raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {e}")

    return StreamingResponse(stream_generator(payload_dict), media_type="application/json")
@app.get("/playground", response_class=HTMLResponse)
async def playground():
    # Open and read the content of playground.html (in the same folder as the app)
    file_path = "playground.html"

    try:
        with open(file_path, "r") as file:
            html_content = file.read()
        return HTMLResponse(content=html_content)
    except FileNotFoundError:
        return HTMLResponse(content="<h1>playground.html not found</h1>", status_code=404)
@app.on_event("startup")
async def startup_event():
    print("API endpoints:")
    print("GET /")
    print("GET /models")
    print("GET /searchgpt")  # We now have the new search API
    print("POST /chat/completions")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)