File size: 2,179 Bytes
fdfb7a5
3d94315
 
 
fdfb7a5
 
 
 
 
3d94315
 
 
fdfb7a5
3d94315
 
 
fdfb7a5
3d94315
 
 
 
 
fdfb7a5
 
 
3d94315
 
 
 
 
 
 
 
 
 
fdfb7a5
3d94315
 
 
 
 
fdfb7a5
 
 
 
 
 
 
3d94315
fdfb7a5
 
3d94315
 
 
 
fdfb7a5
3d94315
fdfb7a5
 
 
 
 
 
 
 
 
 
 
 
 
3d94315
 
 
fdfb7a5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from langchain_google_genai import ChatGoogleGenerativeAI
import os
from dotenv import load_dotenv

load_dotenv()  

app = FastAPI()


# Allow requests from your front-end's origin.
app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:3000", "chrome-extension://*"],  # Allow specific origins
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# It's recommended to load secrets from environment variables
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")

# Define the request model that expects a JSON body with "text"
class MeaningRequest(BaseModel):
    text: str

# Define the response model that will return the meaning
class MeaningResponse(BaseModel):
    meaning: str

def get_meaning_from_llm(text: str) -> str:
    """
    Get meaning of text using Google's Generative AI.
    """
    # Create a prompt for your LLM
    prompt = f"Explain the meaning of the following text in simple terms in only one or two lines not more than that: '{text}'"
    
    # Make sure this URL is accessible and valid
    llm = ChatGoogleGenerativeAI(
        model="gemini-1.5-flash",
        temperature=0.1,
        max_tokens=None,
        timeout=None,
        max_retries=2,
        google_api_key=GOOGLE_API_KEY
    )
    response = llm.invoke(prompt)
    return response.content

@app.post("/get_meaning", response_model=MeaningResponse)
async def get_meaning(request: MeaningRequest):
    """
    Endpoint to return meaning.
    """
    try:
        print(f"Received text: {request.text}")
        # Extract text from the request
        text = request.text
        # Generate meaning using the LLM call
        meaning = get_meaning_from_llm(text)
        
        return MeaningResponse(
            meaning=meaning
        )
    except Exception as e:
        print(f"An error occurred: {e}")
        raise HTTPException(status_code=500, detail=str(e))

if __name__ == "__main__":
    # Run the FastAPI app with Uvicorn
    uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)