File size: 7,394 Bytes
c558be9
1bd1ac4
c558be9
1bd1ac4
13a7675
1bd1ac4
 
 
 
 
48f06a6
c558be9
1bd1ac4
 
13a7675
1bd1ac4
13a7675
583310d
13a7675
1bd1ac4
 
 
a18bd58
1bd1ac4
c558be9
 
 
1bd1ac4
13a7675
c558be9
 
 
 
1bd1ac4
 
 
 
 
 
 
 
 
 
13a7675
 
 
1bd1ac4
 
13a7675
 
1bd1ac4
 
13a7675
 
 
1bd1ac4
13a7675
1bd1ac4
 
 
13a7675
 
 
1bd1ac4
 
48f06a6
1bd1ac4
 
 
 
 
13a7675
1bd1ac4
 
 
13a7675
1bd1ac4
13a7675
1bd1ac4
13a7675
 
 
1bd1ac4
48f06a6
1bd1ac4
 
13a7675
1bd1ac4
 
 
 
 
 
 
 
13a7675
1bd1ac4
13a7675
1bd1ac4
13a7675
1bd1ac4
13a7675
1bd1ac4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c558be9
f7cf3be
2deb7a7
c558be9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
# app.py
# ────────────────────────────────────────────────────────────────
"""
AnyCoderΒ /Β ShashaΒ AI – lightweight Gradio front‑end

β€’ Pick an AI model from models.py ➜ AVAILABLE_MODELS
β€’ Provide context (prompt / file / website)
β€’ Choose a target language from 25+ options
β€’ Optional Tavily web‑search enrichment
β€’ Generate code & live‑preview HTML
"""

from __future__ import annotations

from pathlib import Path
from typing import List, Tuple, Dict, Any, Optional

import gradio as gr

# ── local helpers ───────────────────────────────────────────────
from models       import AVAILABLE_MODELS, find_model, ModelInfo
from inference    import chat_completion
from tavily_search import enhance_query_with_search
from utils        import (
    extract_text_from_file,
    extract_website_content,
    history_to_messages,
    history_to_chatbot_messages,
    apply_search_replace_changes,
    remove_code_block,
    parse_transformers_js_output,
    format_transformers_js_output,
)
from deploy       import send_to_sandbox

# ── constants ───────────────────────────────────────────────────
SUPPORTED_LANGUAGES = [
    "python", "c", "cpp", "markdown", "latex", "json", "html", "css",
    "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell",
    "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite",
    "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql",
    "sql-gpSQL", "sql-sparkSQL", "sql-esper",
]

SYSTEM_PROMPTS = {
    "html": (
        "ONLY USE HTML, CSS AND JAVASCRIPT. Produce ONE complete HTML file "
        "wrapped in ```html ...```."
    ),
    "transformers.js": (
        "Generate THREE fenced blocks (index.html / index.js / style.css) "
        "for a transformers.js web‑app."
    ),
}

History = List[Tuple[str, str]]


# ── core callback ───────────────────────────────────────────────
def generate_code(
    prompt: str,
    file_path: str | None,
    website_url: str | None,
    model_name: str,
    enable_search: bool,
    language: str,
    hist: History | None,
) -> Tuple[str, History, str, List[Dict[str, str]]]:
    """Back‑end for the β€˜Generate Code’ button."""
    hist = hist or []
    prompt = (prompt or "").strip()

    # 1Β build messages
    sys_prompt = SYSTEM_PROMPTS.get(language, f"You are an expert {language} developer.")
    messages   = history_to_messages(hist, sys_prompt)

    ctx_parts: list[str] = [prompt]
    if file_path:
        ctx_parts += ["[File]", extract_text_from_file(file_path)[:5000]]
    if website_url:
        html = extract_website_content(website_url)
        if not html.startswith("Error"):
            ctx_parts += ["[Website]", html[:8000]]

    user_msg = enhance_query_with_search("\n\n".join(ctx_parts), enable_search)
    messages.append({"role": "user", "content": user_msg})

    # 2Β run model (provider selection handled in inference.chat_completion)
    model: ModelInfo = find_model(model_name) or AVAILABLE_MODELS[0]
    try:
        raw_out = chat_completion(model.id, messages)
    except Exception as exc:  # pragma: no cover
        err = f"❌ **Error**\n```{exc}```"
        hist.append((prompt, err))
        return "", hist, "", history_to_chatbot_messages(hist)

    # 3Β post‑process
    if language == "transformers.js":
        files = parse_transformers_js_output(raw_out)
        code  = format_transformers_js_output(files)
        preview = send_to_sandbox(files.get("index.html", ""))
    else:
        cleaned = remove_code_block(raw_out)
        if hist and not hist[-1][1].startswith("❌"):
            cleaned = apply_search_replace_changes(hist[-1][1], cleaned)
        code    = cleaned
        preview = send_to_sandbox(cleaned) if language == "html" else ""

    hist.append((prompt, code))
    chat_view = history_to_chatbot_messages(hist)
    return code, hist, preview, chat_view


# ── UI ──────────────────────────────────────────────────────────
THEME = gr.themes.Soft(primary_hue="indigo")
custom_css = """
body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; }
#main_title { text-align:center;font-size:2.4rem;margin-top:1rem }
#subtitle   { text-align:center;color:#5a6475;margin-bottom:2rem }
"""

with gr.Blocks(title="AnyCoderΒ AI", theme=THEME, css=custom_css) as demo:
    state_hist: gr.State[History] = gr.State([])

    gr.Markdown("## πŸš€β€―AnyCoderΒ AI", elem_id="main_title")
    gr.Markdown("Your AI partner for generating, modifying & understanding code.", elem_id="subtitle")

    with gr.Row():
        # ────────── inputs (sidebar) ──────────
        with gr.Column(scale=1):
            gr.Markdown("#### 1β€―Β·β€―Selectβ€―Model")
            model_dd = gr.Dropdown(
                choices=[m.name for m in AVAILABLE_MODELS],
                value=AVAILABLE_MODELS[0].name,
                label="AIΒ Model",
            )

            gr.Markdown("#### 2β€―Β·β€―Provideβ€―Context")
            with gr.Tabs():
                with gr.Tab("Prompt"):
                    prompt_box = gr.Textbox(lines=6, placeholder="Describe what you want…")
                with gr.Tab("File"):
                    file_box   = gr.File(type="filepath")
                with gr.Tab("Website"):
                    url_box    = gr.Textbox(placeholder="https://example.com")

            gr.Markdown("#### 3β€―Β·β€―Configureβ€―Output")
            lang_dd   = gr.Dropdown(SUPPORTED_LANGUAGES, value="html", label="TargetΒ Language")
            search_ck = gr.Checkbox(label="EnableΒ TavilyΒ WebΒ Search")

            with gr.Row():
                clear_btn = gr.Button("Clear Session", variant="secondary")
                gen_btn   = gr.Button("GenerateΒ Code", variant="primary")

        # ────────── outputs (main panel) ─────
        with gr.Column(scale=2):
            with gr.Tabs():
                with gr.Tab("Code"):
                    code_out   = gr.Code(interactive=True)
                with gr.Tab("Liveβ€―Preview"):
                    preview_out = gr.HTML()
                with gr.Tab("History"):
                    chat_out   = gr.Chatbot(type="messages")

    # ── wiring ───────────────────────────────────────────
    gen_btn.click(
        generate_code,
        inputs=[prompt_box, file_box, url_box, model_dd, search_ck, lang_dd, state_hist],
        outputs=[code_out, state_hist, preview_out, chat_out],
    )

    clear_btn.click(
        lambda: ("", None, "", "html", False, [], [], "", ""),
        outputs=[prompt_box, file_box, url_box, lang_dd, search_ck,
                 state_hist, code_out, preview_out, chat_out],
        queue=False,
    )

if __name__ == "__main__":
    demo.queue().launch()