import os
import re
from http import HTTPStatus
from typing import Dict, List, Optional, Tuple
import base64
import mimetypes
import PyPDF2
import docx
import cv2
import numpy as np
from PIL import Image
import pytesseract
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import html2text
import json
import time
import webbrowser
import urllib.parse
import copy
import html
import gradio as gr
from huggingface_hub import InferenceClient
from tavily import TavilyClient
from huggingface_hub import HfApi
import tempfile
from openai import OpenAI
import uuid
import datetime
from mistralai import Mistral
import shutil
import urllib.parse
import mimetypes
import threading
import atexit
import asyncio
from datetime import datetime, timedelta
from typing import Optional
# Gradio supported languages for syntax highlighting
GRADIO_SUPPORTED_LANGUAGES = [
"python", "c", "cpp", "markdown", "latex", "json", "html", "css", "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell", "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite", "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql", "sql-gpSQL", "sql-sparkSQL", "sql-esper", None
]
def get_gradio_language(language):
# Map composite options to a supported syntax highlighting
if language == "streamlit":
return "python"
if language == "gradio":
return "python"
return language if language in GRADIO_SUPPORTED_LANGUAGES else None
# Search/Replace Constants
SEARCH_START = "<<<<<<< SEARCH"
DIVIDER = "======="
REPLACE_END = ">>>>>>> REPLACE"
# Gradio Documentation Auto-Update System
GRADIO_LLMS_TXT_URL = "https://www.gradio.app/llms.txt"
GRADIO_DOCS_CACHE_FILE = ".gradio_docs_cache.txt"
GRADIO_DOCS_LAST_UPDATE_FILE = ".gradio_docs_last_update.txt"
GRADIO_DOCS_UPDATE_ON_APP_UPDATE = True # Only update when app is updated, not on a timer
# Global variable to store the current Gradio documentation
_gradio_docs_content: Optional[str] = None
_gradio_docs_last_fetched: Optional[datetime] = None
def fetch_gradio_docs() -> Optional[str]:
"""Fetch the latest Gradio documentation from llms.txt"""
try:
response = requests.get(GRADIO_LLMS_TXT_URL, timeout=10)
response.raise_for_status()
return response.text
except Exception as e:
print(f"Warning: Failed to fetch Gradio docs from {GRADIO_LLMS_TXT_URL}: {e}")
return None
def load_cached_gradio_docs() -> Optional[str]:
"""Load cached Gradio documentation from file"""
try:
if os.path.exists(GRADIO_DOCS_CACHE_FILE):
with open(GRADIO_DOCS_CACHE_FILE, 'r', encoding='utf-8') as f:
return f.read()
except Exception as e:
print(f"Warning: Failed to load cached Gradio docs: {e}")
return None
def save_gradio_docs_cache(content: str):
"""Save Gradio documentation to cache file"""
try:
with open(GRADIO_DOCS_CACHE_FILE, 'w', encoding='utf-8') as f:
f.write(content)
with open(GRADIO_DOCS_LAST_UPDATE_FILE, 'w', encoding='utf-8') as f:
f.write(datetime.now().isoformat())
except Exception as e:
print(f"Warning: Failed to save Gradio docs cache: {e}")
def get_last_update_time() -> Optional[datetime]:
"""Get the last update time from file"""
try:
if os.path.exists(GRADIO_DOCS_LAST_UPDATE_FILE):
with open(GRADIO_DOCS_LAST_UPDATE_FILE, 'r', encoding='utf-8') as f:
return datetime.fromisoformat(f.read().strip())
except Exception as e:
print(f"Warning: Failed to read last update time: {e}")
return None
def should_update_gradio_docs() -> bool:
"""Check if Gradio documentation should be updated"""
# Only update if we don't have cached content (first run or cache deleted)
return not os.path.exists(GRADIO_DOCS_CACHE_FILE)
def force_update_gradio_docs():
"""
Force an update of Gradio documentation (useful when app is updated).
To manually refresh docs, you can call this function or simply delete the cache file:
rm .gradio_docs_cache.txt && restart the app
"""
global _gradio_docs_content, _gradio_docs_last_fetched
print("π Forcing Gradio documentation update...")
latest_content = fetch_gradio_docs()
if latest_content:
_gradio_docs_content = latest_content
_gradio_docs_last_fetched = datetime.now()
save_gradio_docs_cache(latest_content)
update_gradio_system_prompts()
print("β Gradio documentation updated successfully")
return True
else:
print("β Failed to update Gradio documentation")
return False
def get_gradio_docs_content() -> str:
"""Get the current Gradio documentation content, updating if necessary"""
global _gradio_docs_content, _gradio_docs_last_fetched
# Check if we need to update
if (_gradio_docs_content is None or
_gradio_docs_last_fetched is None or
should_update_gradio_docs()):
print("Updating Gradio documentation...")
# Try to fetch latest content
latest_content = fetch_gradio_docs()
if latest_content:
_gradio_docs_content = latest_content
_gradio_docs_last_fetched = datetime.now()
save_gradio_docs_cache(latest_content)
print("β Gradio documentation updated successfully")
else:
# Fallback to cached content
cached_content = load_cached_gradio_docs()
if cached_content:
_gradio_docs_content = cached_content
_gradio_docs_last_fetched = datetime.now()
print("β οΈ Using cached Gradio documentation (network fetch failed)")
else:
# Fallback to minimal content
_gradio_docs_content = """
# Gradio API Reference (Offline Fallback)
This is a minimal fallback when documentation cannot be fetched.
Please check your internet connection for the latest API reference.
Basic Gradio components: Button, Textbox, Slider, Image, Audio, Video, File, etc.
Use gr.Blocks() for custom layouts and gr.Interface() for simple apps.
"""
print("β Using minimal fallback documentation")
return _gradio_docs_content or ""
def update_gradio_system_prompts():
"""Update the global Gradio system prompts with latest documentation"""
global GRADIO_SYSTEM_PROMPT, GRADIO_SYSTEM_PROMPT_WITH_SEARCH
docs_content = get_gradio_docs_content()
# Base system prompt
base_prompt = """You are an expert Gradio developer. Write clean, idiomatic, and runnable Gradio applications for the user's request. Use the latest Gradio API and best practices. Output ONLY the code inside a ``` code block, and do not include any explanations or extra text. If the user provides a file or other context, use it as a reference. Make the app as self-contained as possible. Do NOT add the language name at the top of the code output.
π¨ IMPORTANT: If the user is asking to use external APIs (like OpenRouter, OpenAI API, Hugging Face Inference API, etc.), DO NOT use @spaces.GPU decorators or any ZeroGPU features. External APIs handle the model inference remotely, so GPU allocation on the Spaces instance is not needed.
π¨ CRITICAL REQUIREMENT: If the user provides ANY diffusion model code (FLUX, Stable Diffusion, etc.) that runs locally (not via API), you MUST implement ZeroGPU ahead-of-time (AoT) compilation. This is mandatory and provides 1.3x-1.8x performance improvements. Do not create basic Gradio apps without AoT optimization for diffusion models.
## ZeroGPU Integration (MANDATORY)
ALWAYS use ZeroGPU for GPU-dependent functions in Gradio apps:
1. Import the spaces module: `import spaces`
2. Decorate GPU-dependent functions with `@spaces.GPU`
3. Specify appropriate duration based on expected runtime:
- Quick inference (< 30s): `@spaces.GPU(duration=30)`
- Standard generation (30-60s): `@spaces.GPU` (default 60s)
- Complex generation (60-120s): `@spaces.GPU(duration=120)`
- Heavy processing (120-180s): `@spaces.GPU(duration=180)`
Example usage:
```python
import spaces
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained(...)
pipe.to('cuda')
@spaces.GPU(duration=120)
def generate(prompt):
return pipe(prompt).images
gr.Interface(
fn=generate,
inputs=gr.Text(),
outputs=gr.Gallery(),
).launch()
```
Duration Guidelines:
- Shorter durations improve queue priority for users
- Text-to-image: typically 30-60 seconds
- Image-to-image: typically 20-40 seconds
- Video generation: typically 60-180 seconds
- Audio/music generation: typically 30-90 seconds
- Model loading + inference: add 10-30s buffer
- AoT compilation during startup: use @spaces.GPU(duration=1500) for maximum allowed duration
Functions that typically need @spaces.GPU:
- Image generation (text-to-image, image-to-image)
- Video generation
- Audio/music generation
- Model inference with transformers, diffusers
- Any function using .to('cuda') or GPU operations
## CRITICAL: Use ZeroGPU AoT Compilation for ALL Diffusion Models
FOR ANY DIFFUSION MODEL (FLUX, Stable Diffusion, etc.), YOU MUST IMPLEMENT AHEAD-OF-TIME COMPILATION.
This is NOT optional - it provides 1.3x-1.8x speedup and is essential for production ZeroGPU Spaces.
ALWAYS implement this pattern for diffusion models:
### MANDATORY: Basic AoT Compilation Pattern
YOU MUST USE THIS EXACT PATTERN for any diffusion model (FLUX, Stable Diffusion, etc.):
1. ALWAYS add AoT compilation function with @spaces.GPU(duration=1500)
2. ALWAYS use spaces.aoti_capture to capture inputs
3. ALWAYS use torch.export.export to export the transformer
4. ALWAYS use spaces.aoti_compile to compile
5. ALWAYS use spaces.aoti_apply to apply to pipeline
### Required AoT Implementation
```python
import spaces
import torch
from diffusers import DiffusionPipeline
MODEL_ID = 'black-forest-labs/FLUX.1-dev'
pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
pipe.to('cuda')
@spaces.GPU(duration=1500) # Maximum duration allowed during startup
def compile_transformer():
# 1. Capture example inputs
with spaces.aoti_capture(pipe.transformer) as call:
pipe("arbitrary example prompt")
# 2. Export the model
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
)
# 3. Compile the exported model
return spaces.aoti_compile(exported)
# 4. Apply compiled model to pipeline
compiled_transformer = compile_transformer()
spaces.aoti_apply(compiled_transformer, pipe.transformer)
@spaces.GPU
def generate(prompt):
return pipe(prompt).images
```
### Advanced Optimizations
#### FP8 Quantization (Additional 1.2x speedup on H200)
```python
from torchao.quantization import quantize_, Float8DynamicActivationFloat8WeightConfig
@spaces.GPU(duration=1500)
def compile_transformer_with_quantization():
# Quantize before export for FP8 speedup
quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
with spaces.aoti_capture(pipe.transformer) as call:
pipe("arbitrary example prompt")
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
)
return spaces.aoti_compile(exported)
```
#### Dynamic Shapes (Variable input sizes)
```python
from torch.utils._pytree import tree_map
@spaces.GPU(duration=1500)
def compile_transformer_dynamic():
with spaces.aoti_capture(pipe.transformer) as call:
pipe("arbitrary example prompt")
# Define dynamic dimension ranges (model-dependent)
transformer_hidden_dim = torch.export.Dim('hidden', min=4096, max=8212)
# Map argument names to dynamic dimensions
transformer_dynamic_shapes = {
"hidden_states": {1: transformer_hidden_dim},
"img_ids": {0: transformer_hidden_dim},
}
# Create dynamic shapes structure
dynamic_shapes = tree_map(lambda v: None, call.kwargs)
dynamic_shapes.update(transformer_dynamic_shapes)
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
dynamic_shapes=dynamic_shapes,
)
return spaces.aoti_compile(exported)
```
#### Multi-Compile for Different Resolutions
```python
@spaces.GPU(duration=1500)
def compile_multiple_resolutions():
compiled_models = {}
resolutions = [(512, 512), (768, 768), (1024, 1024)]
for width, height in resolutions:
# Capture inputs for specific resolution
with spaces.aoti_capture(pipe.transformer) as call:
pipe(f"test prompt {width}x{height}", width=width, height=height)
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
)
compiled_models[f"{width}x{height}"] = spaces.aoti_compile(exported)
return compiled_models
# Usage with resolution dispatch
compiled_models = compile_multiple_resolutions()
@spaces.GPU
def generate_with_resolution(prompt, width=1024, height=1024):
resolution_key = f"{width}x{height}"
if resolution_key in compiled_models:
# Temporarily apply the right compiled model
spaces.aoti_apply(compiled_models[resolution_key], pipe.transformer)
return pipe(prompt, width=width, height=height).images
```
#### FlashAttention-3 Integration
```python
from kernels import get_kernel
# Load pre-built FA3 kernel compatible with H200
try:
vllm_flash_attn3 = get_kernel("kernels-community/vllm-flash-attn3")
print("β FlashAttention-3 kernel loaded successfully")
except Exception as e:
print(f"β οΈ FlashAttention-3 not available: {e}")
# Custom attention processor example
class FlashAttention3Processor:
def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None):
# Use FA3 kernel for attention computation
return vllm_flash_attn3(hidden_states, encoder_hidden_states, attention_mask)
# Apply FA3 processor to model
if 'vllm_flash_attn3' in locals():
for name, module in pipe.transformer.named_modules():
if hasattr(module, 'processor'):
module.processor = FlashAttention3Processor()
```
### Complete Optimized Example
```python
import spaces
import torch
from diffusers import DiffusionPipeline
from torchao.quantization import quantize_, Float8DynamicActivationFloat8WeightConfig
MODEL_ID = 'black-forest-labs/FLUX.1-dev'
pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
pipe.to('cuda')
@spaces.GPU(duration=1500)
def compile_optimized_transformer():
# Apply FP8 quantization
quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
# Capture inputs
with spaces.aoti_capture(pipe.transformer) as call:
pipe("optimization test prompt")
# Export and compile
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
)
return spaces.aoti_compile(exported)
# Compile during startup
compiled_transformer = compile_optimized_transformer()
spaces.aoti_apply(compiled_transformer, pipe.transformer)
@spaces.GPU
def generate(prompt):
return pipe(prompt).images
```
**Expected Performance Gains:**
- Basic AoT: 1.3x-1.8x speedup
- + FP8 Quantization: Additional 1.2x speedup
- + FlashAttention-3: Additional attention speedup
- Total potential: 2x-3x faster inference
**Hardware Requirements:**
- FP8 quantization requires CUDA compute capability β₯ 9.0 (H200 β )
- FlashAttention-3 works on H200 hardware via kernels library
- Dynamic shapes add flexibility for variable input sizes
## Complete Gradio API Reference
This reference is automatically synced from https://www.gradio.app/llms.txt to ensure accuracy.
"""
# Search-enabled prompt
search_prompt = """You are an expert Gradio developer with access to real-time web search. Write clean, idiomatic, and runnable Gradio applications for the user's request. Use the latest Gradio API and best practices. When needed, use web search to find current best practices or verify latest Gradio features. Output ONLY the code inside a ``` code block, and do not include any explanations or extra text. If the user provides a file or other context, use it as a reference. Make the app as self-contained as possible. Do NOT add the language name at the top of the code output.
π¨ IMPORTANT: If the user is asking to use external APIs (like OpenRouter, OpenAI API, Hugging Face Inference API, etc.), DO NOT use @spaces.GPU decorators or any ZeroGPU features. External APIs handle the model inference remotely, so GPU allocation on the Spaces instance is not needed.
π¨ CRITICAL REQUIREMENT: If the user provides ANY diffusion model code (FLUX, Stable Diffusion, etc.) that runs locally (not via API), you MUST implement ZeroGPU ahead-of-time (AoT) compilation. This is mandatory and provides 1.3x-1.8x performance improvements. Do not create basic Gradio apps without AoT optimization for diffusion models.
## ZeroGPU Integration (MANDATORY)
ALWAYS use ZeroGPU for GPU-dependent functions in Gradio apps:
1. Import the spaces module: `import spaces`
2. Decorate GPU-dependent functions with `@spaces.GPU`
3. Specify appropriate duration based on expected runtime:
- Quick inference (< 30s): `@spaces.GPU(duration=30)`
- Standard generation (30-60s): `@spaces.GPU` (default 60s)
- Complex generation (60-120s): `@spaces.GPU(duration=120)`
- Heavy processing (120-180s): `@spaces.GPU(duration=180)`
Example usage:
```python
import spaces
from diffusers import DiffusionPipeline
pipe = DiffusionPipeline.from_pretrained(...)
pipe.to('cuda')
@spaces.GPU(duration=120)
def generate(prompt):
return pipe(prompt).images
gr.Interface(
fn=generate,
inputs=gr.Text(),
outputs=gr.Gallery(),
).launch()
```
Duration Guidelines:
- Shorter durations improve queue priority for users
- Text-to-image: typically 30-60 seconds
- Image-to-image: typically 20-40 seconds
- Video generation: typically 60-180 seconds
- Audio/music generation: typically 30-90 seconds
- Model loading + inference: add 10-30s buffer
- AoT compilation during startup: use @spaces.GPU(duration=1500) for maximum allowed duration
Functions that typically need @spaces.GPU:
- Image generation (text-to-image, image-to-image)
- Video generation
- Audio/music generation
- Model inference with transformers, diffusers
- Any function using .to('cuda') or GPU operations
## CRITICAL: Use ZeroGPU AoT Compilation for ALL Diffusion Models
FOR ANY DIFFUSION MODEL (FLUX, Stable Diffusion, etc.), YOU MUST IMPLEMENT AHEAD-OF-TIME COMPILATION.
This is NOT optional - it provides 1.3x-1.8x speedup and is essential for production ZeroGPU Spaces.
ALWAYS implement this pattern for diffusion models:
### MANDATORY: Basic AoT Compilation Pattern
YOU MUST USE THIS EXACT PATTERN for any diffusion model (FLUX, Stable Diffusion, etc.):
1. ALWAYS add AoT compilation function with @spaces.GPU(duration=1500)
2. ALWAYS use spaces.aoti_capture to capture inputs
3. ALWAYS use torch.export.export to export the transformer
4. ALWAYS use spaces.aoti_compile to compile
5. ALWAYS use spaces.aoti_apply to apply to pipeline
### Required AoT Implementation
For production Spaces with heavy models, use ahead-of-time (AoT) compilation for 1.3x-1.8x speedups:
### Basic AoT Compilation
```python
import spaces
import torch
from diffusers import DiffusionPipeline
MODEL_ID = 'black-forest-labs/FLUX.1-dev'
pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
pipe.to('cuda')
@spaces.GPU(duration=1500) # Maximum duration allowed during startup
def compile_transformer():
# 1. Capture example inputs
with spaces.aoti_capture(pipe.transformer) as call:
pipe("arbitrary example prompt")
# 2. Export the model
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
)
# 3. Compile the exported model
return spaces.aoti_compile(exported)
# 4. Apply compiled model to pipeline
compiled_transformer = compile_transformer()
spaces.aoti_apply(compiled_transformer, pipe.transformer)
@spaces.GPU
def generate(prompt):
return pipe(prompt).images
```
### Advanced Optimizations
#### FP8 Quantization (Additional 1.2x speedup on H200)
```python
from torchao.quantization import quantize_, Float8DynamicActivationFloat8WeightConfig
@spaces.GPU(duration=1500)
def compile_transformer_with_quantization():
# Quantize before export for FP8 speedup
quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
with spaces.aoti_capture(pipe.transformer) as call:
pipe("arbitrary example prompt")
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
)
return spaces.aoti_compile(exported)
```
#### Dynamic Shapes (Variable input sizes)
```python
from torch.utils._pytree import tree_map
@spaces.GPU(duration=1500)
def compile_transformer_dynamic():
with spaces.aoti_capture(pipe.transformer) as call:
pipe("arbitrary example prompt")
# Define dynamic dimension ranges (model-dependent)
transformer_hidden_dim = torch.export.Dim('hidden', min=4096, max=8212)
# Map argument names to dynamic dimensions
transformer_dynamic_shapes = {
"hidden_states": {1: transformer_hidden_dim},
"img_ids": {0: transformer_hidden_dim},
}
# Create dynamic shapes structure
dynamic_shapes = tree_map(lambda v: None, call.kwargs)
dynamic_shapes.update(transformer_dynamic_shapes)
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
dynamic_shapes=dynamic_shapes,
)
return spaces.aoti_compile(exported)
```
#### Multi-Compile for Different Resolutions
```python
@spaces.GPU(duration=1500)
def compile_multiple_resolutions():
compiled_models = {}
resolutions = [(512, 512), (768, 768), (1024, 1024)]
for width, height in resolutions:
# Capture inputs for specific resolution
with spaces.aoti_capture(pipe.transformer) as call:
pipe(f"test prompt {width}x{height}", width=width, height=height)
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
)
compiled_models[f"{width}x{height}"] = spaces.aoti_compile(exported)
return compiled_models
# Usage with resolution dispatch
compiled_models = compile_multiple_resolutions()
@spaces.GPU
def generate_with_resolution(prompt, width=1024, height=1024):
resolution_key = f"{width}x{height}"
if resolution_key in compiled_models:
# Temporarily apply the right compiled model
spaces.aoti_apply(compiled_models[resolution_key], pipe.transformer)
return pipe(prompt, width=width, height=height).images
```
#### FlashAttention-3 Integration
```python
from kernels import get_kernel
# Load pre-built FA3 kernel compatible with H200
try:
vllm_flash_attn3 = get_kernel("kernels-community/vllm-flash-attn3")
print("β FlashAttention-3 kernel loaded successfully")
except Exception as e:
print(f"β οΈ FlashAttention-3 not available: {e}")
# Custom attention processor example
class FlashAttention3Processor:
def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None):
# Use FA3 kernel for attention computation
return vllm_flash_attn3(hidden_states, encoder_hidden_states, attention_mask)
# Apply FA3 processor to model
if 'vllm_flash_attn3' in locals():
for name, module in pipe.transformer.named_modules():
if hasattr(module, 'processor'):
module.processor = FlashAttention3Processor()
```
### Complete Optimized Example
```python
import spaces
import torch
from diffusers import DiffusionPipeline
from torchao.quantization import quantize_, Float8DynamicActivationFloat8WeightConfig
MODEL_ID = 'black-forest-labs/FLUX.1-dev'
pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
pipe.to('cuda')
@spaces.GPU(duration=1500)
def compile_optimized_transformer():
# Apply FP8 quantization
quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
# Capture inputs
with spaces.aoti_capture(pipe.transformer) as call:
pipe("optimization test prompt")
# Export and compile
exported = torch.export.export(
pipe.transformer,
args=call.args,
kwargs=call.kwargs,
)
return spaces.aoti_compile(exported)
# Compile during startup
compiled_transformer = compile_optimized_transformer()
spaces.aoti_apply(compiled_transformer, pipe.transformer)
@spaces.GPU
def generate(prompt):
return pipe(prompt).images
```
**Expected Performance Gains:**
- Basic AoT: 1.3x-1.8x speedup
- + FP8 Quantization: Additional 1.2x speedup
- + FlashAttention-3: Additional attention speedup
- Total potential: 2x-3x faster inference
**Hardware Requirements:**
- FP8 quantization requires CUDA compute capability β₯ 9.0 (H200 β )
- FlashAttention-3 works on H200 hardware via kernels library
- Dynamic shapes add flexibility for variable input sizes
## Complete Gradio API Reference
This reference is automatically synced from https://www.gradio.app/llms.txt to ensure accuracy.
"""
# Update the prompts
GRADIO_SYSTEM_PROMPT = base_prompt + docs_content + "\n\nAlways use the exact function signatures from this API reference and follow modern Gradio patterns."
GRADIO_SYSTEM_PROMPT_WITH_SEARCH = search_prompt + docs_content + "\n\nAlways use the exact function signatures from this API reference and follow modern Gradio patterns."
# Initialize Gradio documentation on startup
def initialize_gradio_docs():
"""Initialize Gradio documentation on application startup"""
try:
update_gradio_system_prompts()
if should_update_gradio_docs():
print("π Gradio documentation system initialized (fetched fresh content)")
else:
print("π Gradio documentation system initialized (using cached content)")
except Exception as e:
print(f"Warning: Failed to initialize Gradio documentation: {e}")
# Configuration
HTML_SYSTEM_PROMPT = """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. MAKE IT RESPONSIVE USING MODERN CSS. Use as much as you can modern CSS for the styling, if you can't do something with modern CSS, then use custom CSS. Also, try to elaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE
For website redesign tasks:
- Use the provided original HTML code as the starting point for redesign
- Preserve all original content, structure, and functionality
- Keep the same semantic HTML structure but enhance the styling
- Reuse all original images and their URLs from the HTML code
- Create a modern, responsive design with improved typography and spacing
- Use modern CSS frameworks and design patterns
- Ensure accessibility and mobile responsiveness
- Maintain the same navigation and user flow
- Enhance the visual design while keeping the original layout structure
If an image is provided, analyze it and use the visual information to better understand the user's requirements.
Always respond with code that can be executed or rendered directly.
Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text. Do NOT add the language name at the top of the code output."""
def validate_video_html(video_html: str) -> bool:
"""Validate that the video HTML is well-formed and safe to insert."""
try:
# Basic checks for video HTML structure
if not video_html or not video_html.strip():
return False
# Check for required video elements
if ' not found
return False
return True
except Exception:
return False
def llm_place_media(html_content: str, media_html_tag: str, media_kind: str = "image") -> str:
"""Ask a lightweight model to produce search/replace blocks that insert media_html_tag in the best spot.
The model must return ONLY our block format using SEARCH_START/DIVIDER/REPLACE_END.
"""
try:
client = get_inference_client("Qwen/Qwen3-Coder-480B-A35B-Instruct", "auto")
system_prompt = (
"You are a code editor. Insert the provided media tag into the given HTML in the most semantically appropriate place.\n"
"For video elements: prefer replacing placeholder images or inserting in hero sections with proper container divs.\n"
"For image elements: prefer replacing placeholder images or inserting near related content.\n"
"CRITICAL: Ensure proper HTML structure - videos should be wrapped in appropriate containers.\n"
"Return ONLY search/replace blocks using the exact markers: <<<<<<< SEARCH, =======, >>>>>>> REPLACE.\n"
"Do NOT include any commentary. Ensure the SEARCH block matches exact lines from the input.\n"
"When inserting videos, ensure they are properly contained within semantic HTML elements.\n"
)
# Truncate very long media tags for LLM prompt only to prevent token limits
truncated_media_tag_for_prompt = media_html_tag
if len(media_html_tag) > 2000:
# For very long data URIs, show structure but truncate the data for LLM prompt
if 'data:video/mp4;base64,' in media_html_tag:
start_idx = media_html_tag.find('data:video/mp4;base64,')
end_idx = media_html_tag.find('"', start_idx)
if start_idx != -1 and end_idx != -1:
truncated_media_tag_for_prompt = (
media_html_tag[:start_idx] +
'data:video/mp4;base64,[TRUNCATED_BASE64_DATA]' +
media_html_tag[end_idx:]
)
user_payload = (
"HTML Document:\n" + html_content + "\n\n" +
f"Media ({media_kind}):\n" + truncated_media_tag_for_prompt + "\n\n" +
"Produce search/replace blocks now."
)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_payload},
]
completion = client.chat.completions.create(
model="Qwen/Qwen3-Coder-480B-A35B-Instruct",
messages=messages,
max_tokens=2000,
temperature=0.2,
)
text = (completion.choices[0].message.content or "") if completion and completion.choices else ""
# Replace any truncated placeholders with the original full media HTML
if '[TRUNCATED_BASE64_DATA]' in text and 'data:video/mp4;base64,[TRUNCATED_BASE64_DATA]' in truncated_media_tag_for_prompt:
# Extract the original base64 data from the full media tag
original_start = media_html_tag.find('data:video/mp4;base64,')
original_end = media_html_tag.find('"', original_start)
if original_start != -1 and original_end != -1:
original_data_uri = media_html_tag[original_start:original_end]
text = text.replace('data:video/mp4;base64,[TRUNCATED_BASE64_DATA]', original_data_uri)
return text.strip()
except Exception as e:
print(f"[LLMPlaceMedia] Fallback due to error: {e}")
return ""
# Stricter prompt for GLM-4.5V to ensure a complete, runnable HTML document with no escaped characters
GLM45V_HTML_SYSTEM_PROMPT = """You are an expert front-end developer.
Output a COMPLETE, STANDALONE HTML document that renders directly in a browser.
Hard constraints:
- DO NOT use React, ReactDOM, JSX, Babel, Vue, Angular, Svelte, or any SPA framework.
- Use ONLY plain HTML, CSS, and vanilla JavaScript.
- Allowed external resources: Tailwind CSS CDN, Font Awesome CDN, Google Fonts.
- Do NOT escape characters (no \\n, \\t, or escaped quotes). Output raw HTML/JS/CSS.
Structural requirements:
- Include , ,
, and with proper nesting
- Include required tags for any CSS you reference (e.g., Tailwind, Font Awesome, Google Fonts)
- Keep everything in ONE file; inline CSS/JS as needed
Return ONLY the code inside a single ```html ... ``` code block. No additional text before or after.
"""
# ---------------------------------------------------------------------------
# Video temp-file management (per-session tracking and cleanup)
# ---------------------------------------------------------------------------
VIDEO_TEMP_DIR = os.path.join(tempfile.gettempdir(), "anycoder_videos")
VIDEO_FILE_TTL_SECONDS = 6 * 60 * 60 # 6 hours
_SESSION_VIDEO_FILES: Dict[str, List[str]] = {}
_VIDEO_FILES_LOCK = threading.Lock()
def _ensure_video_dir_exists() -> None:
try:
os.makedirs(VIDEO_TEMP_DIR, exist_ok=True)
except Exception:
pass
def _register_video_for_session(session_id: Optional[str], file_path: str) -> None:
if not session_id or not file_path:
return
with _VIDEO_FILES_LOCK:
if session_id not in _SESSION_VIDEO_FILES:
_SESSION_VIDEO_FILES[session_id] = []
_SESSION_VIDEO_FILES[session_id].append(file_path)
def cleanup_session_videos(session_id: Optional[str]) -> None:
if not session_id:
return
with _VIDEO_FILES_LOCK:
file_list = _SESSION_VIDEO_FILES.pop(session_id, [])
for path in file_list:
try:
if path and os.path.exists(path):
os.unlink(path)
except Exception:
# Best-effort cleanup
pass
def reap_old_videos(ttl_seconds: int = VIDEO_FILE_TTL_SECONDS) -> None:
"""Delete old video files in the temp directory based on modification time."""
try:
_ensure_video_dir_exists()
now_ts = time.time()
for name in os.listdir(VIDEO_TEMP_DIR):
path = os.path.join(VIDEO_TEMP_DIR, name)
try:
if not os.path.isfile(path):
continue
mtime = os.path.getmtime(path)
if now_ts - mtime > ttl_seconds:
os.unlink(path)
except Exception:
pass
except Exception:
# Temp dir might not exist or be accessible; ignore
pass
# ---------------------------------------------------------------------------
# Audio temp-file management (per-session tracking and cleanup)
# ---------------------------------------------------------------------------
AUDIO_TEMP_DIR = os.path.join(tempfile.gettempdir(), "anycoder_audio")
AUDIO_FILE_TTL_SECONDS = 6 * 60 * 60 # 6 hours
_SESSION_AUDIO_FILES: Dict[str, List[str]] = {}
_AUDIO_FILES_LOCK = threading.Lock()
def _ensure_audio_dir_exists() -> None:
try:
os.makedirs(AUDIO_TEMP_DIR, exist_ok=True)
except Exception:
pass
def _register_audio_for_session(session_id: Optional[str], file_path: str) -> None:
if not session_id or not file_path:
return
with _AUDIO_FILES_LOCK:
if session_id not in _SESSION_AUDIO_FILES:
_SESSION_AUDIO_FILES[session_id] = []
_SESSION_AUDIO_FILES[session_id].append(file_path)
def cleanup_session_audio(session_id: Optional[str]) -> None:
if not session_id:
return
with _AUDIO_FILES_LOCK:
file_list = _SESSION_AUDIO_FILES.pop(session_id, [])
for path in file_list:
try:
if path and os.path.exists(path):
os.unlink(path)
except Exception:
pass
def reap_old_audio(ttl_seconds: int = AUDIO_FILE_TTL_SECONDS) -> None:
try:
_ensure_audio_dir_exists()
now_ts = time.time()
for name in os.listdir(AUDIO_TEMP_DIR):
path = os.path.join(AUDIO_TEMP_DIR, name)
try:
if not os.path.isfile(path):
continue
mtime = os.path.getmtime(path)
if now_ts - mtime > ttl_seconds:
os.unlink(path)
except Exception:
pass
except Exception:
pass
TRANSFORMERS_JS_SYSTEM_PROMPT = """You are an expert web developer creating a transformers.js application. You will generate THREE separate files: index.html, index.js, and style.css.
IMPORTANT: You MUST output ALL THREE files in the following format:
```html
```
```javascript
// index.js content here
```
```css
/* style.css content here */
```
Requirements:
1. Create a modern, responsive web application using transformers.js
2. Use the transformers.js library for AI/ML functionality
3. Create a clean, professional UI with good user experience
4. Make the application fully responsive for mobile devices
5. Use modern CSS practices and JavaScript ES6+ features
6. Include proper error handling and loading states
7. Follow accessibility best practices
Library import (required): Add the following snippet to index.html to import transformers.js:
Device Options: By default, transformers.js runs on CPU (via WASM). For better performance, you can run models on GPU using WebGPU:
- CPU (default): const pipe = await pipeline('task', 'model-name');
- GPU (WebGPU): const pipe = await pipeline('task', 'model-name', { device: 'webgpu' });
Consider providing users with a toggle option to choose between CPU and GPU execution based on their browser's WebGPU support.
The index.html should contain the basic HTML structure and link to the CSS and JS files.
The index.js should contain all the JavaScript logic including transformers.js integration.
The style.css should contain all the styling for the application.
Always output only the three code blocks as shown above, and do not include any explanations or extra text."""
SVELTE_SYSTEM_PROMPT = """You are an expert Svelte developer creating a modern Svelte application.
File selection policy (dynamic, model-decided):
- Generate ONLY the files actually needed for the user's request.
- MUST include src/App.svelte (entry component) and src/main.ts (entry point).
- Usually include src/app.css for global styles.
- Add additional files when needed, e.g. src/lib/*.svelte, src/components/*.svelte, src/stores/*.ts, static/* assets, etc.
- Other base template files (package.json, vite.config.ts, tsconfig, svelte.config.js, src/vite-env.d.ts) are provided by the template and should NOT be generated unless explicitly requested by the user.
CRITICAL: Always generate src/main.ts with correct Svelte 5 syntax:
```typescript
import './app.css'
import App from './App.svelte'
const app = new App({
target: document.getElementById('app')!,
})
export default app
```
Do NOT use the old mount syntax: `import { mount } from 'svelte'` - this will cause build errors.
Output format (CRITICAL):
- Return ONLY a series of file sections, each starting with a filename line:
=== src/App.svelte ===
...file content...
=== src/app.css ===
...file content...
(repeat for all files you decide to create)
- Do NOT wrap files in Markdown code fences.
Dependency policy:
- If you import any third-party npm packages (e.g., "@gradio/dataframe"), include a package.json at the project root with a "dependencies" section listing them. Keep scripts and devDependencies compatible with the default Svelte + Vite template.
Requirements:
1. Create a modern, responsive Svelte application based on the user's specific request
2. Prefer TypeScript where applicable for better type safety
3. Create a clean, professional UI with good user experience
4. Make the application fully responsive for mobile devices
5. Use modern CSS practices and Svelte best practices
6. Include proper error handling and loading states
7. Follow accessibility best practices
8. Use Svelte's reactive features effectively
9. Include proper component structure and organization (only what's needed)
"""
SVELTE_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert Svelte developer. You have access to real-time web search.
File selection policy (dynamic, model-decided):
- Generate ONLY the files actually needed for the user's request.
- MUST include src/App.svelte (entry component) and src/main.ts (entry point).
- Usually include src/app.css for global styles.
- Add additional files when needed, e.g. src/lib/*.svelte, src/components/*.svelte, src/stores/*.ts, static/* assets, etc.
- Other base template files (package.json, vite.config.ts, tsconfig, svelte.config.js, src/vite-env.d.ts) are provided by the template and should NOT be generated unless explicitly requested by the user.
CRITICAL: Always generate src/main.ts with correct Svelte 5 syntax:
```typescript
import './app.css'
import App from './App.svelte'
const app = new App({
target: document.getElementById('app')!,
})
export default app
```
Do NOT use the old mount syntax: `import { mount } from 'svelte'` - this will cause build errors.
Output format (CRITICAL):
- Return ONLY a series of file sections, each starting with a filename line:
=== src/App.svelte ===
...file content...
=== src/app.css ===
...file content...
(repeat for all files you decide to create)
- Do NOT wrap files in Markdown code fences.
Dependency policy:
- If you import any third-party npm packages, include a package.json at the project root with a "dependencies" section listing them. Keep scripts and devDependencies compatible with the default Svelte + Vite template.
Requirements:
1. Create a modern, responsive Svelte application
2. Prefer TypeScript where applicable
3. Clean, professional UI and UX
4. Mobile-first responsiveness
5. Svelte best practices and modern CSS
6. Error handling and loading states
7. Accessibility best practices
8. Use search to apply current best practices
9. Keep component structure organized and minimal
"""
TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert web developer creating a transformers.js application. You have access to real-time web search. When needed, use web search to find the latest information, best practices, or specific technologies for transformers.js.
You will generate THREE separate files: index.html, index.js, and style.css.
IMPORTANT: You MUST output ALL THREE files in the following format:
```html
```
```javascript
// index.js content here
```
```css
/* style.css content here */
```
Requirements:
1. Create a modern, responsive web application using transformers.js
2. Use the transformers.js library for AI/ML functionality
3. Use web search to find current best practices and latest transformers.js features
4. Create a clean, professional UI with good user experience
5. Make the application fully responsive for mobile devices
6. Use modern CSS practices and JavaScript ES6+ features
7. Include proper error handling and loading states
8. Follow accessibility best practices
Library import (required): Add the following snippet to index.html to import transformers.js:
Device Options: By default, transformers.js runs on CPU (via WASM). For better performance, you can run models on GPU using WebGPU:
- CPU (default): const pipe = await pipeline('task', 'model-name');
- GPU (WebGPU): const pipe = await pipeline('task', 'model-name', { device: 'webgpu' });
Consider providing users with a toggle option to choose between CPU and GPU execution based on their browser's WebGPU support.
The index.html should contain the basic HTML structure and link to the CSS and JS files.
The index.js should contain all the JavaScript logic including transformers.js integration.
The style.css should contain all the styling for the application.
Always output only the three code blocks as shown above, and do not include any explanations or extra text."""
# Gradio system prompts will be dynamically populated by update_gradio_system_prompts()
GRADIO_SYSTEM_PROMPT = ""
GRADIO_SYSTEM_PROMPT_WITH_SEARCH = ""
# GRADIO_SYSTEM_PROMPT_WITH_SEARCH will be dynamically populated by update_gradio_system_prompts()
# All Gradio API documentation is now dynamically loaded from https://www.gradio.app/llms.txt
GENERIC_SYSTEM_PROMPT = """You are an expert {language} developer. Write clean, idiomatic, and runnable {language} code for the user's request. If possible, include comments and best practices. Output ONLY the code inside a ``` code block, and do not include any explanations or extra text. If the user provides a file or other context, use it as a reference. If the code is for a script or app, make it as self-contained as possible. Do NOT add the language name at the top of the code output."""
# System prompt with search capability
HTML_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert front-end developer. You have access to real-time web search.
Output a COMPLETE, STANDALONE HTML document that renders directly in a browser. Requirements:
- Include , , , and with proper nesting
- Include all required and
{REPLACE_END}
```
Example Fixing Dependencies (requirements.txt):
```
Adding missing dependency to fix ImportError...
=== requirements.txt ===
{SEARCH_START}
gradio
streamlit
{DIVIDER}
gradio
streamlit
mistral-common
{REPLACE_END}
```
Example Deleting Code:
```
Removing the paragraph...
{SEARCH_START}
This paragraph will be deleted.
{DIVIDER}
{REPLACE_END}
```"""
# Follow-up system prompt for modifying existing transformers.js applications
TransformersJSFollowUpSystemPrompt = f"""You are an expert web developer modifying an existing transformers.js application.
The user wants to apply changes based on their request.
You MUST output ONLY the changes required using the following SEARCH/REPLACE block format. Do NOT output the entire file.
Explain the changes briefly *before* the blocks if necessary, but the code changes THEMSELVES MUST be within the blocks.
IMPORTANT: When the user reports an ERROR MESSAGE, analyze it carefully to determine which file needs fixing:
- JavaScript errors/module loading issues β Fix index.js
- HTML rendering/DOM issues β Fix index.html
- Styling/visual issues β Fix style.css
- CDN/library loading errors β Fix script tags in index.html
The transformers.js application consists of three files: index.html, index.js, and style.css.
When making changes, specify which file you're modifying by starting your search/replace blocks with the file name.
Format Rules:
1. Start with {SEARCH_START}
2. Provide the exact lines from the current code that need to be replaced.
3. Use {DIVIDER} to separate the search block from the replacement.
4. Provide the new lines that should replace the original lines.
5. End with {REPLACE_END}
6. You can use multiple SEARCH/REPLACE blocks if changes are needed in different parts of the file.
7. To insert code, use an empty SEARCH block (only {SEARCH_START} and {DIVIDER} on their lines) if inserting at the very beginning, otherwise provide the line *before* the insertion point in the SEARCH block and include that line plus the new lines in the REPLACE block.
8. To delete code, provide the lines to delete in the SEARCH block and leave the REPLACE block empty (only {DIVIDER} and {REPLACE_END} on their lines).
9. IMPORTANT: The SEARCH block must *exactly* match the current code, including indentation and whitespace.
Example Modifying HTML:
```
Changing the title in index.html...
=== index.html ===
{SEARCH_START}
Old Title
{DIVIDER}
New Title
{REPLACE_END}
```
Example Modifying JavaScript:
```
Adding a new function to index.js...
=== index.js ===
{SEARCH_START}
// Existing code
{DIVIDER}
// Existing code
function newFunction() {{
console.log("New function added");
}}
{REPLACE_END}
```
Example Modifying CSS:
```
Changing background color in style.css...
=== style.css ===
{SEARCH_START}
body {{
background-color: white;
}}
{DIVIDER}
body {{
background-color: #f0f0f0;
}}
{REPLACE_END}
```
Example Fixing Library Loading Error:
```
Fixing transformers.js CDN loading error...
=== index.html ===
{SEARCH_START}
{DIVIDER}
{REPLACE_END}
```"""
# Available models
AVAILABLE_MODELS = [
{
"name": "Moonshot Kimi-K2",
"id": "moonshotai/Kimi-K2-Instruct",
"description": "Moonshot AI Kimi-K2-Instruct model for code generation and general tasks"
},
{
"name": "Kimi K2 Turbo (Preview)",
"id": "kimi-k2-turbo-preview",
"description": "Moonshot AI Kimi K2 Turbo via OpenAI-compatible API"
},
{
"name": "Carrot",
"id": "stealth-model-1",
"description": "High-performance AI model for code generation and complex reasoning tasks"
},
{
"name": "DeepSeek V3",
"id": "deepseek-ai/DeepSeek-V3-0324",
"description": "DeepSeek V3 model for code generation"
},
{
"name": "DeepSeek V3.1",
"id": "deepseek-ai/DeepSeek-V3.1",
"description": "DeepSeek V3.1 model for code generation and general tasks"
},
{
"name": "DeepSeek R1",
"id": "deepseek-ai/DeepSeek-R1-0528",
"description": "DeepSeek R1 model for code generation"
},
{
"name": "ERNIE-4.5-VL",
"id": "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT",
"description": "ERNIE-4.5-VL model for multimodal code generation with image support"
},
{
"name": "MiniMax M1",
"id": "MiniMaxAI/MiniMax-M1-80k",
"description": "MiniMax M1 model for code generation and general tasks"
},
{
"name": "Qwen3-235B-A22B",
"id": "Qwen/Qwen3-235B-A22B",
"description": "Qwen3-235B-A22B model for code generation and general tasks"
},
{
"name": "SmolLM3-3B",
"id": "HuggingFaceTB/SmolLM3-3B",
"description": "SmolLM3-3B model for code generation and general tasks"
},
{
"name": "GLM-4.5",
"id": "zai-org/GLM-4.5",
"description": "GLM-4.5 model with thinking capabilities for advanced code generation"
},
{
"name": "GLM-4.5V",
"id": "zai-org/GLM-4.5V",
"description": "GLM-4.5V multimodal model with image understanding for code generation"
},
{
"name": "GLM-4.1V-9B-Thinking",
"id": "THUDM/GLM-4.1V-9B-Thinking",
"description": "GLM-4.1V-9B-Thinking model for multimodal code generation with image support"
},
{
"name": "Qwen3-235B-A22B-Instruct-2507",
"id": "Qwen/Qwen3-235B-A22B-Instruct-2507",
"description": "Qwen3-235B-A22B-Instruct-2507 model for code generation and general tasks"
},
{
"name": "Qwen3-Coder-480B-A35B-Instruct",
"id": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
"description": "Qwen3-Coder-480B-A35B-Instruct model for advanced code generation and programming tasks"
},
{
"name": "Qwen3-32B",
"id": "Qwen/Qwen3-32B",
"description": "Qwen3-32B model for code generation and general tasks"
},
{
"name": "Qwen3-4B-Instruct-2507",
"id": "Qwen/Qwen3-4B-Instruct-2507",
"description": "Qwen3-4B-Instruct-2507 model for code generation and general tasks"
},
{
"name": "Qwen3-4B-Thinking-2507",
"id": "Qwen/Qwen3-4B-Thinking-2507",
"description": "Qwen3-4B-Thinking-2507 model with advanced reasoning capabilities for code generation and general tasks"
},
{
"name": "Qwen3-235B-A22B-Thinking",
"id": "Qwen/Qwen3-235B-A22B-Thinking-2507",
"description": "Qwen3-235B-A22B-Thinking model with advanced reasoning capabilities"
},
{
"name": "Qwen3-30B-A3B-Instruct-2507",
"id": "qwen3-30b-a3b-instruct-2507",
"description": "Qwen3-30B-A3B-Instruct model via Alibaba Cloud DashScope API"
},
{
"name": "Qwen3-30B-A3B-Thinking-2507",
"id": "qwen3-30b-a3b-thinking-2507",
"description": "Qwen3-30B-A3B-Thinking model with advanced reasoning via Alibaba Cloud DashScope API"
},
{
"name": "Qwen3-Coder-30B-A3B-Instruct",
"id": "qwen3-coder-30b-a3b-instruct",
"description": "Qwen3-Coder-30B-A3B-Instruct model for advanced code generation via Alibaba Cloud DashScope API"
},
{
"name": "Cohere Command-A Reasoning 08-2025",
"id": "CohereLabs/command-a-reasoning-08-2025",
"description": "Cohere Labs Command-A Reasoning (Aug 2025) via Hugging Face InferenceClient"
},
{
"name": "StepFun Step-3",
"id": "step-3",
"description": "StepFun Step-3 model - AI chat assistant by ιΆθ·ζθΎ° with multilingual capabilities"
},
{
"name": "Codestral 2508",
"id": "codestral-2508",
"description": "Mistral Codestral model - specialized for code generation and programming tasks"
},
{
"name": "Mistral Medium 2508",
"id": "mistral-medium-2508",
"description": "Mistral Medium 2508 model via Mistral API for general tasks and coding"
},
{
"name": "Gemini 2.5 Flash",
"id": "gemini-2.5-flash",
"description": "Google Gemini 2.5 Flash via OpenAI-compatible API"
},
{
"name": "Gemini 2.5 Pro",
"id": "gemini-2.5-pro",
"description": "Google Gemini 2.5 Pro via OpenAI-compatible API"
},
{
"name": "GPT-OSS-120B",
"id": "openai/gpt-oss-120b",
"description": "OpenAI GPT-OSS-120B model for advanced code generation and general tasks"
},
{
"name": "GPT-OSS-20B",
"id": "openai/gpt-oss-20b",
"description": "OpenAI GPT-OSS-20B model for code generation and general tasks"
},
{
"name": "GPT-5",
"id": "gpt-5",
"description": "OpenAI GPT-5 model for advanced code generation and general tasks"
},
{
"name": "Grok-4",
"id": "grok-4",
"description": "Grok-4 model via Poe (OpenAI-compatible) for advanced tasks"
},
{
"name": "Grok-Code-Fast-1",
"id": "Grok-Code-Fast-1",
"description": "Grok-Code-Fast-1 model via Poe (OpenAI-compatible) for fast code generation"
},
{
"name": "Claude-Opus-4.1",
"id": "claude-opus-4.1",
"description": "Anthropic Claude Opus 4.1 via Poe (OpenAI-compatible)"
},
{
"name": "Qwen3 Max Preview",
"id": "qwen3-max-preview",
"description": "Qwen3 Max Preview model via DashScope International API"
},
{
"name": "Sonoma Dusk Alpha",
"id": "openrouter/sonoma-dusk-alpha",
"description": "OpenRouter Sonoma Dusk Alpha model with vision capabilities"
}
]
# Default model selection
DEFAULT_MODEL_NAME = "Qwen3 Max Preview"
DEFAULT_MODEL = None
for _m in AVAILABLE_MODELS:
if _m.get("name") == DEFAULT_MODEL_NAME:
DEFAULT_MODEL = _m
break
if DEFAULT_MODEL is None and AVAILABLE_MODELS:
DEFAULT_MODEL = AVAILABLE_MODELS[0]
DEMO_LIST = [
{
"title": "Todo App",
"description": "Create a simple todo application with add, delete, and mark as complete functionality"
},
{
"title": "Calculator",
"description": "Build a basic calculator with addition, subtraction, multiplication, and division"
},
{
"title": "Chat Interface",
"description": "Build a chat interface with message history and user input"
},
{
"title": "E-commerce Product Card",
"description": "Create a product card component for an e-commerce website"
},
{
"title": "Login Form",
"description": "Build a responsive login form with validation"
},
{
"title": "Dashboard Layout",
"description": "Create a dashboard layout with sidebar navigation and main content area"
},
{
"title": "Data Table",
"description": "Build a data table with sorting and filtering capabilities"
},
{
"title": "Image Gallery",
"description": "Create an image gallery with lightbox functionality and responsive grid layout"
},
{
"title": "UI from Image",
"description": "Upload an image of a UI design and I'll generate the HTML/CSS code for it"
},
{
"title": "Extract Text from Image",
"description": "Upload an image containing text and I'll extract and process the text content"
},
{
"title": "Website Redesign",
"description": "Enter a website URL to extract its content and redesign it with a modern, responsive layout"
},
{
"title": "Modify HTML",
"description": "After generating HTML, ask me to modify it with specific changes using search/replace format"
},
{
"title": "Search/Replace Example",
"description": "Generate HTML first, then ask: 'Change the title to My New Title' or 'Add a blue background to the body'"
},
{
"title": "Transformers.js App",
"description": "Create a transformers.js application with AI/ML functionality using the transformers.js library"
},
{
"title": "Svelte App",
"description": "Create a modern Svelte application with TypeScript, Vite, and responsive design"
}
]
# HF Inference Client
HF_TOKEN = os.getenv('HF_TOKEN')
if not HF_TOKEN:
raise RuntimeError("HF_TOKEN environment variable is not set. Please set it to your Hugging Face API token.")
def get_inference_client(model_id, provider="auto"):
"""Return an InferenceClient with provider based on model_id and user selection."""
if model_id == "qwen3-30b-a3b-instruct-2507":
# Use DashScope OpenAI client
return OpenAI(
api_key=os.getenv("DASHSCOPE_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)
elif model_id == "qwen3-30b-a3b-thinking-2507":
# Use DashScope OpenAI client for Thinking model
return OpenAI(
api_key=os.getenv("DASHSCOPE_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)
elif model_id == "qwen3-coder-30b-a3b-instruct":
# Use DashScope OpenAI client for Coder model
return OpenAI(
api_key=os.getenv("DASHSCOPE_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)
elif model_id == "gpt-5":
# Use Poe (OpenAI-compatible) client for GPT-5 model
return OpenAI(
api_key=os.getenv("POE_API_KEY"),
base_url="https://api.poe.com/v1"
)
elif model_id == "grok-4":
# Use Poe (OpenAI-compatible) client for Grok-4 model
return OpenAI(
api_key=os.getenv("POE_API_KEY"),
base_url="https://api.poe.com/v1"
)
elif model_id == "Grok-Code-Fast-1":
# Use Poe (OpenAI-compatible) client for Grok-Code-Fast-1 model
return OpenAI(
api_key=os.getenv("POE_API_KEY"),
base_url="https://api.poe.com/v1"
)
elif model_id == "claude-opus-4.1":
# Use Poe (OpenAI-compatible) client for Claude-Opus-4.1
return OpenAI(
api_key=os.getenv("POE_API_KEY"),
base_url="https://api.poe.com/v1"
)
elif model_id == "qwen3-max-preview":
# Use DashScope International OpenAI client for Qwen3 Max Preview
return OpenAI(
api_key=os.getenv("DASHSCOPE_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)
elif model_id == "openrouter/sonoma-dusk-alpha":
# Use OpenRouter client for Sonoma Dusk Alpha model
return OpenAI(
api_key=os.getenv("OPENROUTER_API_KEY"),
base_url="https://openrouter.ai/api/v1",
)
elif model_id == "step-3":
# Use StepFun API client for Step-3 model
return OpenAI(
api_key=os.getenv("STEP_API_KEY"),
base_url="https://api.stepfun.com/v1"
)
elif model_id == "codestral-2508" or model_id == "mistral-medium-2508":
# Use Mistral client for Mistral models
return Mistral(api_key=os.getenv("MISTRAL_API_KEY"))
elif model_id == "gemini-2.5-flash":
# Use Google Gemini (OpenAI-compatible) client
return OpenAI(
api_key=os.getenv("GEMINI_API_KEY"),
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
)
elif model_id == "gemini-2.5-pro":
# Use Google Gemini Pro (OpenAI-compatible) client
return OpenAI(
api_key=os.getenv("GEMINI_API_KEY"),
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
)
elif model_id == "kimi-k2-turbo-preview":
# Use Moonshot AI (OpenAI-compatible) client for Kimi K2 Turbo (Preview)
return OpenAI(
api_key=os.getenv("MOONSHOT_API_KEY"),
base_url="https://api.moonshot.ai/v1",
)
elif model_id == "stealth-model-1":
# Use stealth model with generic configuration
api_key = os.getenv("STEALTH_MODEL_1_API_KEY")
if not api_key:
raise ValueError("STEALTH_MODEL_1_API_KEY environment variable is required for Carrot model")
base_url = os.getenv("STEALTH_MODEL_1_BASE_URL")
if not base_url:
raise ValueError("STEALTH_MODEL_1_BASE_URL environment variable is required for Carrot model")
return OpenAI(
api_key=api_key,
base_url=base_url,
)
elif model_id == "openai/gpt-oss-120b":
provider = "groq"
elif model_id == "openai/gpt-oss-20b":
provider = "groq"
elif model_id == "moonshotai/Kimi-K2-Instruct":
provider = "groq"
elif model_id == "Qwen/Qwen3-235B-A22B":
provider = "cerebras"
elif model_id == "Qwen/Qwen3-235B-A22B-Instruct-2507":
provider = "cerebras"
elif model_id == "Qwen/Qwen3-32B":
provider = "cerebras"
elif model_id == "Qwen/Qwen3-235B-A22B-Thinking-2507":
provider = "cerebras"
elif model_id == "Qwen/Qwen3-Coder-480B-A35B-Instruct":
provider = "cerebras"
elif model_id == "deepseek-ai/DeepSeek-V3.1":
provider = "novita"
elif model_id == "zai-org/GLM-4.5":
provider = "fireworks-ai"
return InferenceClient(
provider=provider,
api_key=HF_TOKEN,
bill_to="huggingface"
)
# Helper function to get real model ID for stealth models
def get_real_model_id(model_id: str) -> str:
"""Get the real model ID, checking environment variables for stealth models"""
if model_id == "stealth-model-1":
# Get the real model ID from environment variable
real_model_id = os.getenv("STEALTH_MODEL_1_ID")
if not real_model_id:
raise ValueError("STEALTH_MODEL_1_ID environment variable is required for Carrot model")
return real_model_id
return model_id
# Type definitions
History = List[Tuple[str, str]]
Messages = List[Dict[str, str]]
# Tavily Search Client
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
tavily_client = None
if TAVILY_API_KEY:
try:
tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
except Exception as e:
print(f"Failed to initialize Tavily client: {e}")
tavily_client = None
def history_to_messages(history: History, system: str) -> Messages:
messages = [{'role': 'system', 'content': system}]
for h in history:
# Handle multimodal content in history
user_content = h[0]
if isinstance(user_content, list):
# Extract text from multimodal content
text_content = ""
for item in user_content:
if isinstance(item, dict) and item.get("type") == "text":
text_content += item.get("text", "")
user_content = text_content if text_content else str(user_content)
messages.append({'role': 'user', 'content': user_content})
messages.append({'role': 'assistant', 'content': h[1]})
return messages
def messages_to_history(messages: Messages) -> Tuple[str, History]:
assert messages[0]['role'] == 'system'
history = []
for q, r in zip(messages[1::2], messages[2::2]):
# Extract text content from multimodal messages for history
user_content = q['content']
if isinstance(user_content, list):
text_content = ""
for item in user_content:
if isinstance(item, dict) and item.get("type") == "text":
text_content += item.get("text", "")
user_content = text_content if text_content else str(user_content)
history.append([user_content, r['content']])
return history
def history_to_chatbot_messages(history: History) -> List[Dict[str, str]]:
"""Convert history tuples to chatbot message format"""
messages = []
for user_msg, assistant_msg in history:
# Handle multimodal content
if isinstance(user_msg, list):
text_content = ""
for item in user_msg:
if isinstance(item, dict) and item.get("type") == "text":
text_content += item.get("text", "")
user_msg = text_content if text_content else str(user_msg)
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
return messages
def remove_code_block(text):
# Try to match code blocks with language markers
patterns = [
r'```(?:html|HTML)\n([\s\S]+?)\n```', # Match ```html or ```HTML
r'```\n([\s\S]+?)\n```', # Match code blocks without language markers
r'```([\s\S]+?)```' # Match code blocks without line breaks
]
for pattern in patterns:
match = re.search(pattern, text, re.DOTALL)
if match:
extracted = match.group(1).strip()
# Remove a leading language marker line (e.g., 'python') if present
if extracted.split('\n', 1)[0].strip().lower() in ['python', 'html', 'css', 'javascript', 'json', 'c', 'cpp', 'markdown', 'latex', 'jinja2', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', 'sql', 'sql-mssql', 'sql-mysql', 'sql-mariadb', 'sql-sqlite', 'sql-cassandra', 'sql-plSQL', 'sql-hive', 'sql-pgsql', 'sql-gql', 'sql-gpsql', 'sql-sparksql', 'sql-esper']:
return extracted.split('\n', 1)[1] if '\n' in extracted else ''
# If HTML markup starts later in the block (e.g., Poe injected preface), trim to first HTML root
html_root_idx = None
for tag in [' 0:
return extracted[html_root_idx:].strip()
return extracted
# If no code block is found, check if the entire text is HTML
stripped = text.strip()
if stripped.startswith('') or stripped.startswith(' 0:
return stripped[idx:].strip()
return stripped
# Special handling for python: remove python marker
if text.strip().startswith('```python'):
return text.strip()[9:-3].strip()
# Remove a leading language marker line if present (fallback)
lines = text.strip().split('\n', 1)
if lines[0].strip().lower() in ['python', 'html', 'css', 'javascript', 'json', 'c', 'cpp', 'markdown', 'latex', 'jinja2', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', 'sql', 'sql-mssql', 'sql-mysql', 'sql-mariadb', 'sql-sqlite', 'sql-cassandra', 'sql-plSQL', 'sql-hive', 'sql-pgsql', 'sql-gql', 'sql-gpsql', 'sql-sparksql', 'sql-esper']:
return lines[1] if len(lines) > 1 else ''
return text.strip()
## React CDN compatibility fixer removed per user preference
def strip_placeholder_thinking(text: str) -> str:
"""Remove placeholder 'Thinking...' status lines from streamed text."""
if not text:
return text
# Matches lines like: "Thinking..." or "Thinking... (12s elapsed)"
return re.sub(r"(?mi)^[\t ]*Thinking\.\.\.(?:\s*\(\d+s elapsed\))?[\t ]*$\n?", "", text)
def is_placeholder_thinking_only(text: str) -> bool:
"""Return True if text contains only 'Thinking...' placeholder lines (with optional elapsed)."""
if not text:
return False
stripped = text.strip()
if not stripped:
return False
return re.fullmatch(r"(?s)(?:\s*Thinking\.\.\.(?:\s*\(\d+s elapsed\))?\s*)+", stripped) is not None
def extract_last_thinking_line(text: str) -> str:
"""Extract the last 'Thinking...' line to display as status."""
matches = list(re.finditer(r"Thinking\.\.\.(?:\s*\(\d+s elapsed\))?", text))
return matches[-1].group(0) if matches else "Thinking..."
def parse_transformers_js_output(text):
"""Parse transformers.js output and extract the three files (index.html, index.js, style.css)"""
files = {
'index.html': '',
'index.js': '',
'style.css': ''
}
# Multiple patterns to match the three code blocks with different variations
html_patterns = [
r'```html\s*\n([\s\S]*?)(?:```|\Z)',
r'```htm\s*\n([\s\S]*?)(?:```|\Z)',
r'```\s*(?:index\.html|html)\s*\n([\s\S]*?)(?:```|\Z)'
]
js_patterns = [
r'```javascript\s*\n([\s\S]*?)(?:```|\Z)',
r'```js\s*\n([\s\S]*?)(?:```|\Z)',
r'```\s*(?:index\.js|javascript|js)\s*\n([\s\S]*?)(?:```|\Z)'
]
css_patterns = [
r'```css\s*\n([\s\S]*?)(?:```|\Z)',
r'```\s*(?:style\.css|css)\s*\n([\s\S]*?)(?:```|\Z)'
]
# Extract HTML content
for pattern in html_patterns:
html_match = re.search(pattern, text, re.IGNORECASE)
if html_match:
files['index.html'] = html_match.group(1).strip()
break
# Extract JavaScript content
for pattern in js_patterns:
js_match = re.search(pattern, text, re.IGNORECASE)
if js_match:
files['index.js'] = js_match.group(1).strip()
break
# Extract CSS content
for pattern in css_patterns:
css_match = re.search(pattern, text, re.IGNORECASE)
if css_match:
files['style.css'] = css_match.group(1).strip()
break
# Fallback: support === index.html === format if any file is missing
if not (files['index.html'] and files['index.js'] and files['style.css']):
# Use regex to extract sections
html_fallback = re.search(r'===\s*index\.html\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
js_fallback = re.search(r'===\s*index\.js\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
css_fallback = re.search(r'===\s*style\.css\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
if html_fallback:
files['index.html'] = html_fallback.group(1).strip()
if js_fallback:
files['index.js'] = js_fallback.group(1).strip()
if css_fallback:
files['style.css'] = css_fallback.group(1).strip()
# Additional fallback: extract from numbered sections or file headers
if not (files['index.html'] and files['index.js'] and files['style.css']):
# Try patterns like "1. index.html:" or "**index.html**"
patterns = [
(r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)index\.html(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'index.html'),
(r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)index\.js(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'index.js'),
(r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)style\.css(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'style.css')
]
for pattern, file_key in patterns:
if not files[file_key]:
match = re.search(pattern, text, re.IGNORECASE | re.MULTILINE)
if match:
# Clean up the content by removing any code block markers
content = match.group(1).strip()
content = re.sub(r'^```\w*\s*\n', '', content)
content = re.sub(r'\n```\s*$', '', content)
files[file_key] = content.strip()
return files
def format_transformers_js_output(files):
"""Format the three files into a single display string"""
output = []
output.append("=== index.html ===")
output.append(files['index.html'])
output.append("\n=== index.js ===")
output.append(files['index.js'])
output.append("\n=== style.css ===")
output.append(files['style.css'])
return '\n'.join(output)
def build_transformers_inline_html(files: dict) -> str:
"""Merge transformers.js three-file output into a single self-contained HTML document.
- Inlines style.css into a " if css else ""
if style_tag:
if '' in doc.lower():
# Preserve original casing by finding closing head case-insensitively
match = _re.search(r"", doc, flags=_re.IGNORECASE)
if match:
idx = match.start()
doc = doc[:idx] + style_tag + doc[idx:]
else:
# No head; insert at top of body
match = _re.search(r"]*>", doc, flags=_re.IGNORECASE)
if match:
idx = match.end()
doc = doc[:idx] + "\n" + style_tag + doc[idx:]
else:
# Append at beginning
doc = style_tag + doc
# Inline JS: insert before
script_tag = f"" if js else ""
# Lightweight debug console overlay to surface runtime errors inside the iframe
debug_overlay = (
"\n"
"\n"
""
)
# Cleanup script to clear Cache Storage and IndexedDB on unload to free model weights
cleanup_tag = (
""
)
if script_tag:
match = _re.search(r"", doc, flags=_re.IGNORECASE)
if match:
idx = match.start()
doc = doc[:idx] + debug_overlay + script_tag + cleanup_tag + doc[idx:]
else:
# Append at end
doc = doc + debug_overlay + script_tag + cleanup_tag
return doc
def send_transformers_to_sandbox(files: dict) -> str:
"""Build a self-contained HTML document from transformers.js files and return an iframe preview."""
merged_html = build_transformers_inline_html(files)
return send_to_sandbox(merged_html)
def parse_multipage_html_output(text: str) -> Dict[str, str]:
"""Parse multi-page HTML output formatted as repeated "=== filename ===" sections.
Returns a mapping of filename β file content. Supports nested paths like assets/css/styles.css.
"""
if not text:
return {}
# First, strip any markdown fences
cleaned = remove_code_block(text)
files: Dict[str, str] = {}
import re as _re
pattern = _re.compile(r"^===\s*([^=\n]+?)\s*===\s*\n([\s\S]*?)(?=\n===\s*[^=\n]+?\s*===|\Z)", _re.MULTILINE)
for m in pattern.finditer(cleaned):
name = m.group(1).strip()
content = m.group(2).strip()
# Remove accidental trailing fences if present
content = _re.sub(r"^```\w*\s*\n|\n```\s*$", "", content)
files[name] = content
return files
def format_multipage_output(files: Dict[str, str]) -> str:
"""Format a dict of files back into === filename === sections.
Ensures `index.html` appears first if present; others follow sorted by path.
"""
if not isinstance(files, dict) or not files:
return ""
ordered_paths = []
if 'index.html' in files:
ordered_paths.append('index.html')
for path in sorted(files.keys()):
if path == 'index.html':
continue
ordered_paths.append(path)
parts: list[str] = []
for path in ordered_paths:
parts.append(f"=== {path} ===")
# Avoid trailing extra newlines to keep blocks compact
parts.append((files.get(path) or '').rstrip())
return "\n".join(parts)
def validate_and_autofix_files(files: Dict[str, str]) -> Dict[str, str]:
"""Ensure minimal contract for multi-file sites; auto-fix missing pieces.
Rules:
- Ensure at least one HTML entrypoint (index.html). If none, synthesize a simple index.html linking discovered pages.
- For each HTML file, ensure referenced local assets exist in files; if missing, add minimal stubs.
- Normalize relative paths (strip leading '/').
"""
if not isinstance(files, dict) or not files:
return files or {}
import re as _re
normalized: Dict[str, str] = {}
for k, v in files.items():
safe_key = k.strip().lstrip('/')
normalized[safe_key] = v
html_files = [p for p in normalized.keys() if p.lower().endswith('.html')]
has_index = 'index.html' in normalized
# If no index.html but some HTML pages exist, create a simple hub index linking to them
if not has_index and html_files:
links = '\n'.join([f"
" for p in html_files])
normalized['index.html'] = (
"\n\n\n\n"
"\n"
"Site Index\n\n\n
Site
\n
\n"
+ links + "\n
\n\n"
)
# Collect references from HTML files
asset_refs: set[str] = set()
link_href = _re.compile(r"]+href=\"([^\"]+)\"")
script_src = _re.compile(r""
return match.group(0)
doc = _re.sub(r"", _inline_js, doc, flags=_re.IGNORECASE)
# Inject a lightweight in-iframe client-side navigator to load other HTML files
try:
import json as _json
import base64 as _b64
import re as _re
html_pages = {k: v for k, v in files.items() if k.lower().endswith('.html')}
# Ensure index.html entry restores the current body's HTML
_m_body = _re.search(r"]*>([\s\S]*?)", doc, flags=_re.IGNORECASE)
_index_body = _m_body.group(1) if _m_body else doc
html_pages['index.html'] = _index_body
encoded = _b64.b64encode(_json.dumps(html_pages).encode('utf-8')).decode('ascii')
nav_script = (
""
)
m = _re.search(r"", doc, flags=_re.IGNORECASE)
if m:
i = m.start()
doc = doc[:i] + nav_script + doc[i:]
else:
doc = doc + nav_script
except Exception:
# Non-fatal in preview
pass
return doc
def extract_html_document(text: str) -> str:
"""Return substring starting from the first or if present, else original text.
This ignores prose or planning notes before the actual HTML so previews don't break.
"""
if not text:
return text
lower = text.lower()
idx = lower.find(" Dict[str, str]:
"""Infer npm dependencies from Svelte/TS imports across generated files.
Returns mapping of package name -> semver (string). Uses conservative defaults
when versions aren't known. Adds special-cased versions when known.
"""
import re as _re
deps: Dict[str, str] = {}
import_from = _re.compile(r"import\s+[^;]*?from\s+['\"]([^'\"]+)['\"]", _re.IGNORECASE)
bare_import = _re.compile(r"import\s+['\"]([^'\"]+)['\"]", _re.IGNORECASE)
def maybe_add(pkg: str):
if not pkg or pkg.startswith('.') or pkg.startswith('/') or pkg.startswith('http'):
return
if pkg.startswith('svelte'):
return
if pkg not in deps:
# Default to wildcard; adjust known packages below
deps[pkg] = "*"
for path, content in (files or {}).items():
if not isinstance(content, str):
continue
for m in import_from.finditer(content):
maybe_add(m.group(1))
for m in bare_import.finditer(content):
maybe_add(m.group(1))
# Pin known versions when sensible
if '@gradio/dataframe' in deps:
deps['@gradio/dataframe'] = '^0.19.1'
return deps
def build_svelte_package_json(existing_json_text: Optional[str], detected_dependencies: Dict[str, str]) -> str:
"""Create or merge a package.json for Svelte spaces.
- If existing_json_text is provided, merge detected deps into its dependencies.
- Otherwise, start from the template defaults provided by the user and add deps.
- Always preserve template scripts and devDependencies.
"""
import json as _json
# Template from the user's Svelte space scaffold
template = {
"name": "svelte",
"private": True,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview",
"check": "svelte-check --tsconfig ./tsconfig.app.json && tsc -p tsconfig.node.json"
},
"devDependencies": {
"@sveltejs/vite-plugin-svelte": "^5.0.3",
"@tsconfig/svelte": "^5.0.4",
"svelte": "^5.28.1",
"svelte-check": "^4.1.6",
"typescript": "~5.8.3",
"vite": "^6.3.5"
}
}
result = template
if existing_json_text:
try:
parsed = _json.loads(existing_json_text)
# Merge with template as base, keeping template scripts/devDependencies if missing in parsed
result = {
**template,
**{k: v for k, v in parsed.items() if k not in ("scripts", "devDependencies")},
}
# If parsed contains its own scripts/devDependencies, prefer parsed to respect user's file
if isinstance(parsed.get("scripts"), dict):
result["scripts"] = parsed["scripts"]
if isinstance(parsed.get("devDependencies"), dict):
result["devDependencies"] = parsed["devDependencies"]
except Exception:
# Fallback to template if parse fails
result = template
# Merge dependencies
existing_deps = result.get("dependencies", {})
if not isinstance(existing_deps, dict):
existing_deps = {}
merged = {**existing_deps, **(detected_dependencies or {})}
if merged:
result["dependencies"] = merged
else:
result.pop("dependencies", None)
return _json.dumps(result, indent=2, ensure_ascii=False) + "\n"
def history_render(history: History):
return gr.update(visible=True), history
def clear_history():
return [], [], None, "" # Empty lists for both tuple format and chatbot messages, None for file, empty string for website URL
def update_image_input_visibility(model):
"""Update image input visibility based on selected model"""
is_ernie_vl = model.get("id") == "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT"
is_glm_vl = model.get("id") == "THUDM/GLM-4.1V-9B-Thinking"
is_glm_45v = model.get("id") == "zai-org/GLM-4.5V"
return gr.update(visible=is_ernie_vl or is_glm_vl or is_glm_45v)
def process_image_for_model(image):
"""Convert image to base64 for model input"""
if image is None:
return None
# Convert numpy array to PIL Image if needed
import io
import base64
import numpy as np
from PIL import Image
# Handle numpy array from Gradio
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
buffer = io.BytesIO()
image.save(buffer, format='PNG')
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
return f"data:image/png;base64,{img_str}"
def compress_video_for_data_uri(video_bytes: bytes, max_size_mb: int = 8) -> bytes:
"""Compress video bytes for data URI embedding with size limit"""
import subprocess
import tempfile
import os
max_size = max_size_mb * 1024 * 1024
# If already small enough, return as-is
if len(video_bytes) <= max_size:
return video_bytes
print(f"[VideoCompress] Video size {len(video_bytes)} bytes exceeds {max_size_mb}MB limit, attempting compression")
try:
# Create temp files
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_input:
temp_input.write(video_bytes)
temp_input_path = temp_input.name
temp_output_path = temp_input_path.replace('.mp4', '_compressed.mp4')
try:
# Compress with ffmpeg - aggressive settings for small size
subprocess.run([
'ffmpeg', '-i', temp_input_path,
'-vcodec', 'libx264', '-crf', '30', '-preset', 'fast',
'-vf', 'scale=480:-1', '-r', '15', # Lower resolution and frame rate
'-an', # Remove audio to save space
'-y', temp_output_path
], check=True, capture_output=True, stderr=subprocess.DEVNULL)
# Read compressed video
with open(temp_output_path, 'rb') as f:
compressed_bytes = f.read()
print(f"[VideoCompress] Compressed from {len(video_bytes)} to {len(compressed_bytes)} bytes")
return compressed_bytes
except (subprocess.CalledProcessError, FileNotFoundError):
print("[VideoCompress] ffmpeg compression failed, using original video")
return video_bytes
finally:
# Clean up temp files
for path in [temp_input_path, temp_output_path]:
try:
if os.path.exists(path):
os.remove(path)
except Exception:
pass
except Exception as e:
print(f"[VideoCompress] Compression failed: {e}, using original video")
return video_bytes
def compress_audio_for_data_uri(audio_bytes: bytes, max_size_mb: int = 4) -> bytes:
"""Compress audio bytes for data URI embedding with size limit"""
import subprocess
import tempfile
import os
max_size = max_size_mb * 1024 * 1024
# If already small enough, return as-is
if len(audio_bytes) <= max_size:
return audio_bytes
print(f"[AudioCompress] Audio size {len(audio_bytes)} bytes exceeds {max_size_mb}MB limit, attempting compression")
try:
# Create temp files
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_input:
temp_input.write(audio_bytes)
temp_input_path = temp_input.name
temp_output_path = temp_input_path.replace('.wav', '_compressed.mp3')
try:
# Compress with ffmpeg - convert to MP3 with lower bitrate
subprocess.run([
'ffmpeg', '-i', temp_input_path,
'-codec:a', 'libmp3lame', '-b:a', '64k', # Low bitrate MP3
'-y', temp_output_path
], check=True, capture_output=True, stderr=subprocess.DEVNULL)
# Read compressed audio
with open(temp_output_path, 'rb') as f:
compressed_bytes = f.read()
print(f"[AudioCompress] Compressed from {len(audio_bytes)} to {len(compressed_bytes)} bytes")
return compressed_bytes
except (subprocess.CalledProcessError, FileNotFoundError):
print("[AudioCompress] ffmpeg compression failed, using original audio")
return audio_bytes
finally:
# Clean up temp files
for path in [temp_input_path, temp_output_path]:
try:
if os.path.exists(path):
os.remove(path)
except Exception:
pass
except Exception as e:
print(f"[AudioCompress] Compression failed: {e}, using original audio")
return audio_bytes
# ---------------------------------------------------------------------------
# General temp media file management (per-session tracking and cleanup)
# ---------------------------------------------------------------------------
MEDIA_TEMP_DIR = os.path.join(tempfile.gettempdir(), "anycoder_media")
MEDIA_FILE_TTL_SECONDS = 6 * 60 * 60 # 6 hours
_SESSION_MEDIA_FILES: Dict[str, List[str]] = {}
_MEDIA_FILES_LOCK = threading.Lock()
# Global dictionary to store temporary media files for the session
temp_media_files = {}
def _ensure_media_dir_exists() -> None:
"""Ensure the media temp directory exists."""
try:
os.makedirs(MEDIA_TEMP_DIR, exist_ok=True)
except Exception:
pass
def track_session_media_file(session_id: Optional[str], file_path: str) -> None:
"""Track a media file for session-based cleanup."""
if not session_id or not file_path:
return
with _MEDIA_FILES_LOCK:
if session_id not in _SESSION_MEDIA_FILES:
_SESSION_MEDIA_FILES[session_id] = []
_SESSION_MEDIA_FILES[session_id].append(file_path)
def cleanup_session_media(session_id: Optional[str]) -> None:
"""Clean up media files for a specific session."""
if not session_id:
return
with _MEDIA_FILES_LOCK:
files_to_clean = _SESSION_MEDIA_FILES.pop(session_id, [])
for path in files_to_clean:
try:
if path and os.path.exists(path):
os.unlink(path)
except Exception:
# Best-effort cleanup
pass
def reap_old_media(ttl_seconds: int = MEDIA_FILE_TTL_SECONDS) -> None:
"""Delete old media files in the temp directory based on modification time."""
try:
_ensure_media_dir_exists()
now_ts = time.time()
for name in os.listdir(MEDIA_TEMP_DIR):
path = os.path.join(MEDIA_TEMP_DIR, name)
if os.path.isfile(path):
try:
mtime = os.path.getmtime(path)
if (now_ts - mtime) > ttl_seconds:
os.unlink(path)
except Exception:
pass
except Exception:
# Temp dir might not exist or be accessible; ignore
pass
def cleanup_all_temp_media_on_startup() -> None:
"""Clean up all temporary media files on app startup."""
try:
# Clean up temp_media_files registry
temp_media_files.clear()
# Clean up actual files from disk (assume all are orphaned on startup)
_ensure_media_dir_exists()
for name in os.listdir(MEDIA_TEMP_DIR):
path = os.path.join(MEDIA_TEMP_DIR, name)
if os.path.isfile(path):
try:
os.unlink(path)
except Exception:
pass
# Clear session tracking
with _MEDIA_FILES_LOCK:
_SESSION_MEDIA_FILES.clear()
print("[StartupCleanup] Cleaned up orphaned temporary media files")
except Exception as e:
print(f"[StartupCleanup] Error during media cleanup: {str(e)}")
def cleanup_all_temp_media_on_shutdown() -> None:
"""Clean up all temporary media files on app shutdown."""
try:
print("[ShutdownCleanup] Cleaning up temporary media files...")
# Clean up temp_media_files registry and remove files
for file_id, file_info in temp_media_files.items():
try:
if os.path.exists(file_info['path']):
os.unlink(file_info['path'])
except Exception:
pass
temp_media_files.clear()
# Clean up all session files
with _MEDIA_FILES_LOCK:
for session_id, file_paths in _SESSION_MEDIA_FILES.items():
for path in file_paths:
try:
if path and os.path.exists(path):
os.unlink(path)
except Exception:
pass
_SESSION_MEDIA_FILES.clear()
print("[ShutdownCleanup] Temporary media cleanup completed")
except Exception as e:
print(f"[ShutdownCleanup] Error during cleanup: {str(e)}")
# Register shutdown cleanup handler
atexit.register(cleanup_all_temp_media_on_shutdown)
def create_temp_media_url(media_bytes: bytes, filename: str, media_type: str = "image", session_id: Optional[str] = None) -> str:
"""Create a temporary file and return a local URL for preview.
Args:
media_bytes: Raw bytes of the media file
filename: Name for the file (will be made unique)
media_type: Type of media ('image', 'video', 'audio')
session_id: Session ID for tracking cleanup
Returns:
Temporary file URL for preview or error message
"""
try:
# Create unique filename with timestamp and UUID
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
unique_id = str(uuid.uuid4())[:8]
base_name, ext = os.path.splitext(filename)
unique_filename = f"{media_type}_{timestamp}_{unique_id}_{base_name}{ext}"
# Create temporary file in the dedicated directory
_ensure_media_dir_exists()
temp_path = os.path.join(MEDIA_TEMP_DIR, unique_filename)
# Write media bytes to temporary file
with open(temp_path, 'wb') as f:
f.write(media_bytes)
# Track file for session-based cleanup
if session_id:
track_session_media_file(session_id, temp_path)
# Store the file info for later upload
file_id = f"{media_type}_{unique_id}"
temp_media_files[file_id] = {
'path': temp_path,
'filename': filename,
'media_type': media_type,
'media_bytes': media_bytes
}
# Return file:// URL for preview
file_url = f"file://{temp_path}"
print(f"[TempMedia] Created temporary {media_type} file: {file_url}")
return file_url
except Exception as e:
print(f"[TempMedia] Failed to create temporary file: {str(e)}")
return f"Error creating temporary {media_type} file: {str(e)}"
def upload_media_to_hf(media_bytes: bytes, filename: str, media_type: str = "image", token: gr.OAuthToken | None = None, use_temp: bool = True) -> str:
"""Upload media file to user's Hugging Face account or create temporary file.
Args:
media_bytes: Raw bytes of the media file
filename: Name for the file (will be made unique)
media_type: Type of media ('image', 'video', 'audio')
token: OAuth token from gr.login (takes priority over env var)
use_temp: If True, create temporary file for preview; if False, upload to HF
Returns:
Permanent URL to the uploaded file, temporary URL, or error message
"""
try:
# If use_temp is True, create temporary file for preview
if use_temp:
return create_temp_media_url(media_bytes, filename, media_type)
# Otherwise, upload to Hugging Face for permanent URL
# Try to get token from OAuth first, then fall back to environment variable
hf_token = None
if token and token.token:
hf_token = token.token
else:
hf_token = os.getenv('HF_TOKEN')
if not hf_token:
return "Error: Please log in with your Hugging Face account to upload media, or set HF_TOKEN environment variable."
# Initialize HF API
api = HfApi(token=hf_token)
# Get current user info to determine username
try:
user_info = api.whoami()
username = user_info.get('name', 'unknown-user')
except Exception as e:
print(f"[HFUpload] Could not get user info: {e}")
username = 'anycoder-user'
# Create repository name for media storage
repo_name = f"{username}/anycoder-media"
# Try to create the repository if it doesn't exist
try:
api.create_repo(
repo_id=repo_name,
repo_type="dataset",
private=False,
exist_ok=True
)
print(f"[HFUpload] Repository {repo_name} ready")
except Exception as e:
print(f"[HFUpload] Repository creation/access issue: {e}")
# Continue anyway, repo might already exist
# Create unique filename with timestamp and UUID
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
unique_id = str(uuid.uuid4())[:8]
base_name, ext = os.path.splitext(filename)
unique_filename = f"{media_type}/{timestamp}_{unique_id}_{base_name}{ext}"
# Create temporary file for upload
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as temp_file:
temp_file.write(media_bytes)
temp_path = temp_file.name
try:
# Upload file to HF repository
api.upload_file(
path_or_fileobj=temp_path,
path_in_repo=unique_filename,
repo_id=repo_name,
repo_type="dataset",
commit_message=f"Upload {media_type} generated by AnyCoder"
)
# Generate permanent URL
permanent_url = f"https://huggingface.co/datasets/{repo_name}/resolve/main/{unique_filename}"
print(f"[HFUpload] Successfully uploaded {media_type} to {permanent_url}")
return permanent_url
finally:
# Clean up temporary file
try:
os.unlink(temp_path)
except Exception:
pass
except Exception as e:
print(f"[HFUpload] Upload failed: {str(e)}")
return f"Error uploading {media_type} to Hugging Face: {str(e)}"
def upload_temp_files_to_hf_and_replace_urls(html_content: str, token: gr.OAuthToken | None = None) -> str:
"""Upload all temporary media files to HF and replace their URLs in HTML content.
Args:
html_content: HTML content containing temporary file URLs
token: OAuth token for HF authentication
Returns:
Updated HTML content with permanent HF URLs
"""
try:
if not temp_media_files:
print("[DeployUpload] No temporary media files to upload")
return html_content
print(f"[DeployUpload] Uploading {len(temp_media_files)} temporary media files to HF")
updated_content = html_content
for file_id, file_info in temp_media_files.items():
try:
# Upload to HF with permanent URL
permanent_url = upload_media_to_hf(
file_info['media_bytes'],
file_info['filename'],
file_info['media_type'],
token,
use_temp=False # Force permanent upload
)
if not permanent_url.startswith("Error"):
# Replace the temporary file URL with permanent URL
temp_url = f"file://{file_info['path']}"
updated_content = updated_content.replace(temp_url, permanent_url)
print(f"[DeployUpload] Replaced {temp_url} with {permanent_url}")
else:
print(f"[DeployUpload] Failed to upload {file_id}: {permanent_url}")
except Exception as e:
print(f"[DeployUpload] Error uploading {file_id}: {str(e)}")
continue
# Clean up temporary files after upload
cleanup_temp_media_files()
return updated_content
except Exception as e:
print(f"[DeployUpload] Failed to upload temporary files: {str(e)}")
return html_content
def cleanup_temp_media_files():
"""Clean up temporary media files from disk and memory."""
try:
for file_id, file_info in temp_media_files.items():
try:
if os.path.exists(file_info['path']):
os.remove(file_info['path'])
print(f"[TempCleanup] Removed {file_info['path']}")
except Exception as e:
print(f"[TempCleanup] Failed to remove {file_info['path']}: {str(e)}")
# Clear the global dictionary
temp_media_files.clear()
print("[TempCleanup] Cleared temporary media files registry")
except Exception as e:
print(f"[TempCleanup] Error during cleanup: {str(e)}")
def generate_image_with_gemini(prompt: str, image_index: int = 0, token: gr.OAuthToken | None = None) -> str:
"""Generate image using Google Gemini 2.5 Flash Image Preview via OpenRouter.
Uses google/gemini-2.5-flash-image-preview:free via OpenRouter chat completions API.
Returns an HTML tag whose src is an uploaded temporary URL.
"""
try:
print(f"[Text2Image] Starting generation with prompt: {prompt[:100]}...")
# Check for OpenRouter API key
openrouter_key = os.getenv('OPENROUTER_API_KEY')
if not openrouter_key:
print("[Text2Image] Missing OPENROUTER_API_KEY")
return "Error: OPENROUTER_API_KEY environment variable is not set. Please set it to your OpenRouter API key."
import requests
import json as _json
import base64
import io as _io
from PIL import Image
# Create the chat completion request for text-to-image
headers = {
"Authorization": f"Bearer {openrouter_key}",
"Content-Type": "application/json"
}
data = {
"model": "google/gemini-2.5-flash-image-preview:free",
"messages": [
{
"role": "user",
"content": f"Generate an image based on this description: {prompt}"
}
],
"temperature": 0.7,
"max_tokens": 1000
}
try:
print("[Text2Image] Making API request to OpenRouter...")
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers=headers,
json=data,
timeout=60
)
response.raise_for_status()
result_data = response.json()
print(f"[Text2Image] Received API response: {_json.dumps(result_data, indent=2)}")
# Extract the generated image from the response (using same pattern as image-to-image)
message = result_data.get('choices', [{}])[0].get('message', {})
if message and 'images' in message and message['images']:
# Get the first image from the 'images' list
image_data = message['images'][0]
base64_string = image_data.get('image_url', {}).get('url', '')
if base64_string and ',' in base64_string:
# Remove the "data:image/png;base64," prefix
base64_content = base64_string.split(',')[1]
# Decode the base64 string and create a PIL image
img_bytes = base64.b64decode(base64_content)
generated_image = Image.open(_io.BytesIO(img_bytes))
# Convert PIL image to JPEG bytes for upload
out_buf = _io.BytesIO()
generated_image.convert('RGB').save(out_buf, format='JPEG', quality=90, optimize=True)
image_bytes = out_buf.getvalue()
else:
raise RuntimeError(f"API returned an invalid image format. Response: {_json.dumps(result_data, indent=2)}")
else:
raise RuntimeError(f"API did not return an image. Full Response: {_json.dumps(result_data, indent=2)}")
except requests.exceptions.HTTPError as err:
error_body = err.response.text
if err.response.status_code == 401:
return "Error: Authentication failed. Check your OpenRouter API key."
elif err.response.status_code == 429:
return "Error: Rate limit exceeded or insufficient credits. Check your OpenRouter account."
else:
return f"Error: An API error occurred: {error_body}"
except Exception as e:
return f"Error: An unexpected error occurred: {str(e)}"
# Upload and return HTML tag
print("[Text2Image] Uploading image to HF...")
filename = f"generated_image_{image_index}.jpg"
temp_url = upload_media_to_hf(image_bytes, filename, "image", token, use_temp=True)
if temp_url.startswith("Error"):
print(f"[Text2Image] Upload failed: {temp_url}")
return temp_url
print(f"[Text2Image] Successfully generated image: {temp_url}")
return f""
except Exception as e:
print(f"Text-to-image generation error: {str(e)}")
return f"Error generating image (text-to-image): {str(e)}"
def generate_image_with_qwen(prompt: str, image_index: int = 0, token: gr.OAuthToken | None = None) -> str:
"""Generate image using Qwen image model via Hugging Face InferenceClient and upload to HF for permanent URL"""
try:
# Check if HF_TOKEN is available
if not os.getenv('HF_TOKEN'):
return "Error: HF_TOKEN environment variable is not set. Please set it to your Hugging Face API token."
# Create InferenceClient for Qwen image generation
client = InferenceClient(
provider="auto",
api_key=os.getenv('HF_TOKEN'),
bill_to="huggingface",
)
# Generate image using Qwen/Qwen-Image model
image = client.text_to_image(
prompt,
model="Qwen/Qwen-Image",
)
# Resize image to reduce size while maintaining quality
max_size = 1024 # Increased size since we're not using data URIs
if image.width > max_size or image.height > max_size:
image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
# Convert PIL Image to bytes for upload
import io
buffer = io.BytesIO()
# Save as JPEG with good quality since we're not embedding
image.convert('RGB').save(buffer, format='JPEG', quality=90, optimize=True)
image_bytes = buffer.getvalue()
# Create temporary URL for preview (will be uploaded to HF during deploy)
filename = f"generated_image_{image_index}.jpg"
temp_url = upload_media_to_hf(image_bytes, filename, "image", token, use_temp=True)
# Check if creation was successful
if temp_url.startswith("Error"):
return temp_url
# Return HTML img tag with temporary URL
return f''
except Exception as e:
print(f"Image generation error: {str(e)}")
return f"Error generating image: {str(e)}"
def generate_image_to_image(input_image_data, prompt: str, token: gr.OAuthToken | None = None) -> str:
"""Generate an image using image-to-image via OpenRouter.
Uses Google Gemini 2.5 Flash Image Preview via OpenRouter chat completions API.
Returns an HTML tag whose src is an uploaded temporary URL.
"""
try:
# Check for OpenRouter API key
openrouter_key = os.getenv('OPENROUTER_API_KEY')
if not openrouter_key:
return "Error: OPENROUTER_API_KEY environment variable is not set. Please set it to your OpenRouter API key."
# Normalize input image to bytes
import io
from PIL import Image
import base64
import requests
import json as _json
try:
import numpy as np
except Exception:
np = None
if hasattr(input_image_data, 'read'):
raw = input_image_data.read()
pil_image = Image.open(io.BytesIO(raw))
elif hasattr(input_image_data, 'mode') and hasattr(input_image_data, 'size'):
pil_image = input_image_data
elif np is not None and isinstance(input_image_data, np.ndarray):
pil_image = Image.fromarray(input_image_data)
elif isinstance(input_image_data, (bytes, bytearray)):
pil_image = Image.open(io.BytesIO(input_image_data))
else:
pil_image = Image.open(io.BytesIO(bytes(input_image_data)))
if pil_image.mode != 'RGB':
pil_image = pil_image.convert('RGB')
# Resize input image to avoid request body size limits
max_input_size = 1024
if pil_image.width > max_input_size or pil_image.height > max_input_size:
pil_image.thumbnail((max_input_size, max_input_size), Image.Resampling.LANCZOS)
# Convert to base64
import io as _io
buffered = _io.BytesIO()
pil_image.save(buffered, format='PNG')
img_b64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
# Call OpenRouter API
headers = {
"Authorization": f"Bearer {openrouter_key}",
"Content-Type": "application/json",
"HTTP-Referer": os.getenv("YOUR_SITE_URL", "https://example.com"),
"X-Title": os.getenv("YOUR_SITE_NAME", "AnyCoder Image I2I"),
}
payload = {
"model": "google/gemini-2.5-flash-image-preview:free",
"messages": [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_b64}"}},
],
}
],
"max_tokens": 2048,
}
try:
resp = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers=headers,
data=_json.dumps(payload),
timeout=60,
)
resp.raise_for_status()
result_data = resp.json()
# Corrected response parsing logic
message = result_data.get('choices', [{}])[0].get('message', {})
if message and 'images' in message and message['images']:
# Get the first image from the 'images' list
image_data = message['images'][0]
base64_string = image_data.get('image_url', {}).get('url', '')
if base64_string and ',' in base64_string:
# Remove the "data:image/png;base64," prefix
base64_content = base64_string.split(',')[1]
# Decode the base64 string and create a PIL image
img_bytes = base64.b64decode(base64_content)
edited_image = Image.open(_io.BytesIO(img_bytes))
# Convert PIL image to JPEG bytes for upload
out_buf = _io.BytesIO()
edited_image.convert('RGB').save(out_buf, format='JPEG', quality=90, optimize=True)
image_bytes = out_buf.getvalue()
else:
raise RuntimeError(f"API returned an invalid image format. Response: {_json.dumps(result_data, indent=2)}")
else:
raise RuntimeError(f"API did not return an image. Full Response: {_json.dumps(result_data, indent=2)}")
except requests.exceptions.HTTPError as err:
error_body = err.response.text
if err.response.status_code == 401:
return "Error: Authentication failed. Check your OpenRouter API key."
elif err.response.status_code == 429:
return "Error: Rate limit exceeded or insufficient credits. Check your OpenRouter account."
else:
return f"Error: An API error occurred: {error_body}"
except Exception as e:
return f"Error: An unexpected error occurred: {str(e)}"
# Upload and return HTML tag
filename = "image_to_image_result.jpg"
temp_url = upload_media_to_hf(image_bytes, filename, "image", token, use_temp=True)
if temp_url.startswith("Error"):
return temp_url
return f""
except Exception as e:
print(f"Image-to-image generation error: {str(e)}")
return f"Error generating image (image-to-image): {str(e)}"
def generate_video_from_image(input_image_data, prompt: str, session_id: Optional[str] = None, token: gr.OAuthToken | None = None) -> str:
"""Generate a video from an input image and prompt using Hugging Face InferenceClient.
Returns an HTML