builder / app.py
mgbam's picture
Update app.py
c558be9 verified
raw
history blame
4.77 kB
# app.py
# --------------------------------------------------------------------
# AnyCoderΒ /Β ShashaΒ AI – Gradio back‑end
# --------------------------------------------------------------------
"""
β€’ Renders the custom front‑end stored in index.html (+ static assets).
β€’ Provides one API route (`POST /run/predict`) the JS front‑end calls
to run model inference.
β€’ Relies on helper modules (inference.py, models.py, utils.py, …)
exactly as you already have them.
"""
from pathlib import Path
from typing import List, Tuple
import gradio as gr
# ── local helpers (unchanged) ────────────────────────────────────────
from inference import chat_completion
from tavily_search import enhance_query_with_search
from models import AVAILABLE_MODELS, find_model, ModelInfo
from deploy import send_to_sandbox
from utils import (
extract_text_from_file,
extract_website_content,
history_to_messages,
history_to_chatbot_messages,
apply_search_replace_changes,
remove_code_block,
parse_transformers_js_output,
format_transformers_js_output,
)
# ── constants ────────────────────────────────────────────────────────
History = List[Tuple[str, str]]
SYSTEM_PROMPTS = {
"html": (
"ONLY USE HTML, CSS AND JAVASCRIPT. Return ONE html file "
"wrapped in ```html ...```."
),
"transformers.js": (
"Generate THREE separate files (index.html / index.js / style.css) "
"as three fenced blocks."
),
}
# ── core back‑end callback ───────────────────────────────────────────
def generate(
prompt: str,
file_path: str | None,
website_url: str | None,
model_id: str,
language: str,
enable_search: bool,
history: History | None,
) -> Tuple[str, History]:
"""Backend for /run/predict."""
history = history or []
# 1) system prompt + history
system_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
messages = history_to_messages(history, system_prompt)
ctx_parts: list[str] = [prompt.strip()]
if file_path:
ctx_parts.append("[File]")
ctx_parts.append(extract_text_from_file(file_path)[:5_000])
if website_url:
html = extract_website_content(website_url)
if not html.startswith("Error"):
ctx_parts.append("[Website]")
ctx_parts.append(html[:8_000])
user_query = "\n\n".join(filter(None, ctx_parts))
user_query = enhance_query_with_search(user_query, enable_search)
messages.append({"role": "user", "content": user_query})
# 2) model call
model: ModelInfo = find_model(model_id) or AVAILABLE_MODELS[0]
answer = chat_completion(model.id, messages)
# 3) post‑processing
if language == "transformers.js":
files = parse_transformers_js_output(answer)
code = format_transformers_js_output(files)
else:
cleaned = remove_code_block(answer)
if history and not history[-1][1].startswith("❌"):
cleaned = apply_search_replace_changes(history[-1][1], cleaned)
code = cleaned
history.append((prompt, code))
return code, history
# ── read the custom HTML front‑end ───────────────────────────────────
INDEX = Path("index.html").read_text(encoding="utf-8")
# ── Gradio UI (wrapper only) ─────────────────────────────────────────
with gr.Blocks(css="body{margin:0}", title="AnyCoderΒ AI") as demo:
# 1Β visible: your static front‑end
gr.HTML(INDEX) # ← NO unsafe_allow_html / sanitize
# 2Β hidden components for the API call wiring
with gr.Group(visible=False) as api:
prompt_in = gr.Textbox()
file_in = gr.File()
url_in = gr.Textbox()
model_in = gr.Textbox()
lang_in = gr.Textbox()
search_in = gr.Checkbox()
hist_state = gr.State([])
code_out, hist_out = gr.Textbox(), gr.State([])
# bind /run/predict
trig = gr.Button(visible=False)
trig.click(
generate,
inputs=[prompt_in, file_in, url_in, model_in, lang_in, search_in, hist_state],
outputs=[code_out, hist_out],
api_name="predict",
)
# ── static assets (.css / .js) are picked up automatically by HFΒ Spaces
if __name__ == "__main__":
demo.queue().launch()