Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,8 @@ import gradio as gr
|
|
2 |
import spaces
|
3 |
import torch
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
-
from retriever.vectordb_rerank import search_documents # 🧠 RAG 검색기 불러오기
|
|
|
6 |
|
7 |
model_name = "dasomaru/gemma-3-4bit-it-demo"
|
8 |
|
@@ -57,5 +58,9 @@ def generate_response(query):
|
|
57 |
|
58 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
59 |
|
60 |
-
|
|
|
|
|
|
|
|
|
61 |
demo.launch()
|
|
|
2 |
import spaces
|
3 |
import torch
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
+
# from retriever.vectordb_rerank import search_documents # 🧠 RAG 검색기 불러오기
|
6 |
+
from services.rag_pipeline import rag_pipeline
|
7 |
|
8 |
model_name = "dasomaru/gemma-3-4bit-it-demo"
|
9 |
|
|
|
58 |
|
59 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
60 |
|
61 |
+
@spaces.GPU(duration=300)
|
62 |
+
def generate_response_with_pipeline(query):
|
63 |
+
return rag_pipeline(query)
|
64 |
+
|
65 |
+
demo = gr.Interface(fn=generate_response_with_pipeline, inputs="text", outputs="text")
|
66 |
demo.launch()
|