Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import inspect
|
|
5 |
import pandas as pd
|
6 |
import logging
|
7 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, OpenAIServerModel
|
|
|
8 |
|
9 |
from llama_index.core import (
|
10 |
VectorStoreIndex,
|
@@ -49,7 +50,7 @@ class BasicAgent:
|
|
49 |
# Prepara il query engine
|
50 |
self.query_engine = self.index.as_query_engine()
|
51 |
print("Agent ready.")
|
52 |
-
|
53 |
def __call__(self, question: str) -> str:
|
54 |
print(f"Received question: {question[:50]}...")
|
55 |
response = self.query_engine.query(question)
|
@@ -59,6 +60,83 @@ class BasicAgent:
|
|
59 |
print("Response text:", str(response))
|
60 |
|
61 |
return str(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
|
64 |
|
|
|
5 |
import pandas as pd
|
6 |
import logging
|
7 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, OpenAIServerModel
|
8 |
+
from pathlib import Path
|
9 |
|
10 |
from llama_index.core import (
|
11 |
VectorStoreIndex,
|
|
|
50 |
# Prepara il query engine
|
51 |
self.query_engine = self.index.as_query_engine()
|
52 |
print("Agent ready.")
|
53 |
+
'''
|
54 |
def __call__(self, question: str) -> str:
|
55 |
print(f"Received question: {question[:50]}...")
|
56 |
response = self.query_engine.query(question)
|
|
|
60 |
print("Response text:", str(response))
|
61 |
|
62 |
return str(response)
|
63 |
+
'''
|
64 |
+
|
65 |
+
def __call__(self, question: str, context=None) -> str:
|
66 |
+
if isinstance(context, str):
|
67 |
+
#prompt = f"{context}\n\nDomanda: {question}"
|
68 |
+
response = self.query_engine.query(question)
|
69 |
+
# Stampa ragionamento interno
|
70 |
+
print("Query response object:", response)
|
71 |
+
print("Response text:", str(response))
|
72 |
+
return response.text
|
73 |
+
|
74 |
+
elif context and hasattr(context, "read"): # file immagine o audio
|
75 |
+
file_bytes = context.read()
|
76 |
+
filename = context.name
|
77 |
+
|
78 |
+
if filename.endswith((".png", ".jpg", ".jpeg", ".webp", ".bmp", ".gif")):
|
79 |
+
print("coso entrato in video file:", filename)
|
80 |
+
image = Image.open(io.BytesIO(file_bytes))
|
81 |
+
response = self.query_engine.query(
|
82 |
+
messages=[
|
83 |
+
{"role": "user", "content": [
|
84 |
+
{"type": "text", "text": question},
|
85 |
+
{"type": "image_url", "image_url": image}
|
86 |
+
]}
|
87 |
+
]
|
88 |
+
)
|
89 |
+
return response.choices[0].message.content
|
90 |
+
|
91 |
+
elif filename.endswith((".mp3", ".wav", ".ogg")):
|
92 |
+
print("coso entrato in audio file:", filename)
|
93 |
+
response = self.query_engine.query(
|
94 |
+
messages=[
|
95 |
+
{"role": "user", "content": [
|
96 |
+
{"type": "text", "text": question},
|
97 |
+
{"type": "audio_url", "audio_url": file_bytes}
|
98 |
+
]}
|
99 |
+
]
|
100 |
+
)
|
101 |
+
return response.choices[0].message.content
|
102 |
+
|
103 |
+
else:
|
104 |
+
print("coso entrato in else")
|
105 |
+
response = self.llm.complete(question)
|
106 |
+
return response.text
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
def answer_with_media(question, uploaded_file):
|
111 |
+
file_content = ""
|
112 |
+
file_path = None
|
113 |
+
|
114 |
+
if uploaded_file is not None:
|
115 |
+
file_path = Path(uploaded_file.name)
|
116 |
+
suffix = file_path.suffix.lower()
|
117 |
+
|
118 |
+
if suffix in [".txt", ".md", ".csv"]:
|
119 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
120 |
+
file_content = f.read()
|
121 |
+
elif suffix in [".png", ".jpg", ".jpeg", ".webp", ".bmp", ".gif", ".mp3", ".wav", ".ogg"]:
|
122 |
+
# immagini/audio li gestiamo come path da passare al modello
|
123 |
+
file_content = uploaded_file
|
124 |
+
else:
|
125 |
+
return "Formato file non supportato."
|
126 |
+
|
127 |
+
response = agent(question, context=file_content)
|
128 |
+
return response
|
129 |
+
|
130 |
+
gr.Interface(
|
131 |
+
fn=answer_with_media,
|
132 |
+
inputs=[
|
133 |
+
gr.Textbox(label="Domanda"),
|
134 |
+
gr.File(label="Carica file (testo, immagine o audio)")
|
135 |
+
],
|
136 |
+
outputs=gr.Textbox(label="Risposta")
|
137 |
+
).launch()
|
138 |
+
|
139 |
+
|
140 |
|
141 |
|
142 |
|