Spaces:
Sleeping
Sleeping
File size: 6,065 Bytes
2677642 1155ca4 2677642 7cd9628 2677642 7cd9628 2677642 a367b14 1155ca4 2677642 7cd9628 2677642 1155ca4 2677642 7cd9628 2677642 7cd9628 2677642 7cd9628 2677642 7cd9628 2677642 7cd9628 1155ca4 2677642 7cd9628 a367b14 1155ca4 a367b14 7cd9628 1155ca4 a367b14 1155ca4 a367b14 1155ca4 a367b14 1155ca4 a367b14 1155ca4 7cd9628 1155ca4 a367b14 1155ca4 7cd9628 1155ca4 a367b14 1155ca4 7cd9628 2677642 7cd9628 a367b14 2677642 a367b14 2677642 a367b14 1155ca4 a367b14 1155ca4 a367b14 1155ca4 a367b14 1155ca4 a367b14 7cd9628 a367b14 2677642 a367b14 1155ca4 2677642 a367b14 1155ca4 a367b14 2677642 a367b14 2677642 7cd9628 a367b14 2677642 7cd9628 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import gradio as gr
import numpy as np
import sqlite3
import json
from PIL import Image, ImageDraw
# ------ Tool Implementations ------
def get_recipe_by_ingredients(ingredients):
"""Find recipes based on available ingredients"""
return {
"recipes": [
{"name": "Vegetable Stir Fry", "time": 20, "difficulty": "Easy"},
{"name": "Pasta Primavera", "time": 30, "difficulty": "Medium"}
]
}
def get_recipe_image(recipe_name):
"""Generate an image of the finished recipe"""
# Create placeholder image
img = Image.new('RGB', (300, 200), color=(73, 109, 137))
d = ImageDraw.Draw(img)
d.text((10,10), f"Image of: {recipe_name}", fill=(255,255,0))
return img
def convert_measurements(amount, from_unit, to_unit):
"""Convert cooking measurements between units"""
conversions = {
("tbsp", "tsp"): lambda x: x * 3,
("cups", "ml"): lambda x: x * 240,
("oz", "g"): lambda x: x * 28.35
}
conversion_key = (from_unit.lower(), to_unit.lower())
if conversion_key in conversions:
result = conversions[conversion_key](amount)
return {"result": round(result, 2), "unit": to_unit}
return {"error": "Conversion not supported"}
# ------ Recipe Database ------
def init_recipe_db():
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('''CREATE TABLE recipes
(id INTEGER PRIMARY KEY, name TEXT, ingredients TEXT, instructions TEXT, prep_time INT)''')
recipes = [
("Classic Pancakes", json.dumps(["flour", "eggs", "milk", "baking powder"]),
"1. Mix dry ingredients\n2. Add wet ingredients\n3. Cook on griddle", 15),
("Tomato Soup", json.dumps(["tomatoes", "onion", "garlic", "vegetable stock"]),
"1. Sauté onions\n2. Add tomatoes\n3. Simmer and blend", 30),
("Chocolate Cake", json.dumps(["flour", "sugar", "cocoa", "eggs", "milk"]),
"1. Mix dry ingredients\n2. Add wet ingredients\n3. Bake at 350°F", 45)
]
c.executemany("INSERT INTO recipes (name, ingredients, instructions, prep_time) VALUES (?,?,?,?)", recipes)
conn.commit()
return conn
# ------ Agent Logic ------
def process_query(query, db_conn):
"""Process user query"""
print(f"Processing query: {query}")
# Simple intent recognition
if "recipe" in query.lower() or "make" in query.lower() or "cook" in query.lower():
ingredients = [word for word in ["eggs", "flour", "milk", "tomatoes"] if word in query.lower()]
if not ingredients:
ingredients = ["eggs", "flour"]
return {
"type": "recipes",
"data": get_recipe_by_ingredients(ingredients)
}
elif "image" in query.lower() or "show" in query.lower():
recipe_name = next((r for r in ["pancakes", "soup", "cake"] if r in query.lower()), "pancakes")
return {
"type": "image",
"data": get_recipe_image(recipe_name)
}
elif "convert" in query.lower():
words = query.split()
try:
amount = float(words[words.index("convert")+1])
from_unit = words[words.index("convert")+2]
to_unit = words[words.index("to")+1]
except:
amount = 2
from_unit = "cups"
to_unit = "ml"
return {
"type": "conversion",
"data": convert_measurements(amount, from_unit, to_unit)
}
else:
c = db_conn.cursor()
c.execute("SELECT * FROM recipes WHERE name LIKE ?", (f"%{query}%",))
return {
"type": "db_recipes",
"data": c.fetchall()
}
# ------ Gradio Interface ------
def process_voice_command(audio):
"""Process voice command"""
# For demo purposes, we'll use text input directly
# In a real implementation, this would convert audio to text
sample_rate, audio_data = audio
query = "What can I make with eggs and flour?" # Fixed for demo
# Initialize database on first run
if not hasattr(process_voice_command, "db_conn"):
process_voice_command.db_conn = init_recipe_db()
# Process query
result = process_query(query, process_voice_command.db_conn)
# Generate response
response_text = ""
image = None
if result["type"] == "recipes":
recipes = result["data"]["recipes"]
response_text = f"Found {len(recipes)} recipes:\n"
for recipe in recipes:
response_text += f"- {recipe['name']} ({recipe['time']} mins)\n"
elif result["type"] == "image":
image = result["data"]
response_text = "Here's an image of the recipe!"
elif result["type"] == "conversion":
conv = result["data"]
response_text = f"Result: {conv.get('result', '?')} {conv.get('unit', '')}" + \
(f"\nError: {conv['error']}" if "error" in conv else "")
elif result["type"] == "db_recipes":
recipes = result["data"]
response_text = f"Found {len(recipes)} recipes:\n" if recipes else "No recipes found."
for recipe in recipes:
response_text += f"- {recipe[1]} ({recipe[4]} mins)\n"
# Return results (no audio in this simplified version)
return None, response_text, image
# ------ Create Gradio Interface ------
with gr.Blocks(title="Culinary Voice Assistant") as demo:
gr.Markdown("# 🧑🍳 MCP-Powered Culinary Voice Assistant")
with gr.Row():
audio_input = gr.Audio(source="microphone", type="numpy", label="Speak to Chef")
with gr.Column():
text_output = gr.Textbox(label="Assistant Response", interactive=False)
image_output = gr.Image(label="Recipe Image", interactive=False)
submit_btn = gr.Button("Process Command", variant="primary")
submit_btn.click(
fn=process_voice_command,
inputs=[audio_input],
outputs=[gr.Audio(visible=False), text_output, image_output]
)
if __name__ == "__main__":
demo.launch() |