Defender117 commited on
Commit
a36c95d
·
verified ·
1 Parent(s): 26ac486

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -5
app.py CHANGED
@@ -1,13 +1,27 @@
1
  from fastapi import FastAPI
2
  from transformers import pipeline
3
 
4
- ## create a new FASTAPI app instance
5
- app=FastAPI()
6
 
7
- # Initialize the text generation pipeline
8
- pipe = pipeline("image-text-to-text", model="deepseek-ai/deepseek-vl2-small")
 
 
 
 
9
 
 
 
 
 
 
 
 
 
10
 
 
 
 
 
11
  @app.get("/")
12
  def home():
13
  return {"message":"Hello World"}
@@ -18,7 +32,14 @@ def home():
18
  @app.get("/generate")
19
  def generate(text:str):
20
  ## use the pipeline to generate text from given input text
21
- output=pipe(text)
 
 
 
 
 
 
 
22
 
23
  ## return the generate text in Json reposnfe
24
  return {"output":output[0]['generated_text']}
 
1
  from fastapi import FastAPI
2
  from transformers import pipeline
3
 
 
 
4
 
5
+ from llama_cpp import Llama
6
+
7
+ llm = Llama.from_pretrained(
8
+ repo_id="hugging-quants/Llama-3.2-1B-Instruct-Q8_0-GGUF",
9
+ filename="llama-3.2-1b-instruct-q8_0.gguf",
10
+ )
11
 
12
+ check = llm.create_chat_completion(
13
+ messages = [
14
+ {
15
+ "role": "user",
16
+ "content": "What is the capital of France?"
17
+ }
18
+ ]
19
+ )
20
 
21
+ print(check['choices'][0]['message']['content'])
22
+
23
+ ## create a new FASTAPI app instance
24
+ app=FastAPI()
25
  @app.get("/")
26
  def home():
27
  return {"message":"Hello World"}
 
32
  @app.get("/generate")
33
  def generate(text:str):
34
  ## use the pipeline to generate text from given input text
35
+ output= llm.create_chat_completion(
36
+ messages = [
37
+ {
38
+ "role": "user",
39
+ "content": f"{text}"
40
+ }
41
+ ]
42
+ )
43
 
44
  ## return the generate text in Json reposnfe
45
  return {"output":output[0]['generated_text']}