File size: 992 Bytes
2ce47b6
 
 
b6204d2
 
 
2ce47b6
a36c95d
 
ba1223e
 
 
a36c95d
 
 
 
2ce47b6
 
 
 
 
 
 
 
b6204d2
2ce47b6
b6204d2
2ce47b6
a36c95d
 
b6204d2
 
 
a36c95d
 
2ce47b6
 
b6204d2
 
2ce47b6
b6204d2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from fastapi import FastAPI
from transformers import pipeline

import crawl_archive
import GenerateAIPodcast


from llama_cpp import Llama


app = FastAPI()

llm = Llama.from_pretrained(
	repo_id="hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF",
	filename="llama-3.2-1b-instruct-q8_0.gguf",
)

@app.get("/")
def home():
    return {"message":"Hello World"}

# Define a function to handle the GET request at `/generate`



@app.get("/generate")
def generate(link:str):
    ## use the pipeline to generate text from given input text
    output= llm.create_chat_completion(
	messages = [
		{"role": "system",
		 "content": "Always answer short and most detailled and dont use * in your answers. It should be good to hear as a Podcast"},
		{"role": "user", "content": f"Please summarize this website: {link}."}
	]
)

    ## return the generate text in Json reposnfe
    return output['choices'][0]['message']['content']


crawl_archive.run_tldr_crawler()