Spaces:
Running
Running
fix search and replace
Browse files
app.py
CHANGED
@@ -3610,6 +3610,108 @@ def generation_code(query: Optional[str], vlm_image: Optional[gr.Image], gen_ima
|
|
3610 |
'=== src/App.svelte ===' in last_assistant_msg):
|
3611 |
has_existing_content = True
|
3612 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3613 |
# Create/lookup a session id for temp-file tracking and cleanup
|
3614 |
if _setting is not None and isinstance(_setting, dict):
|
3615 |
session_id = _setting.get("__session_id__")
|
@@ -5696,6 +5798,9 @@ with gr.Blocks(
|
|
5696 |
interactive=True,
|
5697 |
label="Generated code"
|
5698 |
)
|
|
|
|
|
|
|
5699 |
# Transformers.js multi-file editors (hidden by default)
|
5700 |
with gr.Group(visible=False) as tjs_group:
|
5701 |
with gr.Tabs():
|
@@ -5962,6 +6067,10 @@ with gr.Blocks(
|
|
5962 |
],
|
5963 |
)
|
5964 |
|
|
|
|
|
|
|
|
|
5965 |
def begin_generation_ui():
|
5966 |
# Collapse the sidebar when generation starts; keep status hidden
|
5967 |
return [gr.update(open=False), gr.update(visible=False)]
|
|
|
3610 |
'=== src/App.svelte ===' in last_assistant_msg):
|
3611 |
has_existing_content = True
|
3612 |
|
3613 |
+
# If this is a modification request, try to apply search/replace first
|
3614 |
+
if has_existing_content and query.strip():
|
3615 |
+
try:
|
3616 |
+
# Use the current model to generate search/replace instructions
|
3617 |
+
client = get_inference_client(_current_model['id'], provider)
|
3618 |
+
|
3619 |
+
system_prompt = """You are a code editor assistant. Given existing code and modification instructions, generate EXACT search/replace blocks.
|
3620 |
+
|
3621 |
+
CRITICAL REQUIREMENTS:
|
3622 |
+
1. Use EXACTLY these markers: <<<<<<< SEARCH, =======, >>>>>>> REPLACE
|
3623 |
+
2. The SEARCH block must match the existing code EXACTLY (including whitespace, indentation, line breaks)
|
3624 |
+
3. The REPLACE block should contain the modified version
|
3625 |
+
4. Only include the specific lines that need to change, with enough context to make them unique
|
3626 |
+
5. Generate multiple search/replace blocks if needed for different changes
|
3627 |
+
6. Do NOT include any explanations or comments outside the blocks
|
3628 |
+
|
3629 |
+
Example format:
|
3630 |
+
<<<<<<< SEARCH
|
3631 |
+
function oldFunction() {
|
3632 |
+
return "old";
|
3633 |
+
}
|
3634 |
+
=======
|
3635 |
+
function newFunction() {
|
3636 |
+
return "new";
|
3637 |
+
}
|
3638 |
+
>>>>>>> REPLACE"""
|
3639 |
+
|
3640 |
+
user_prompt = f"""Existing code:
|
3641 |
+
{last_assistant_msg}
|
3642 |
+
|
3643 |
+
Modification instructions:
|
3644 |
+
{query}
|
3645 |
+
|
3646 |
+
Generate the exact search/replace blocks needed to make these changes."""
|
3647 |
+
|
3648 |
+
messages = [
|
3649 |
+
{"role": "system", "content": system_prompt},
|
3650 |
+
{"role": "user", "content": user_prompt}
|
3651 |
+
]
|
3652 |
+
|
3653 |
+
# Generate search/replace instructions
|
3654 |
+
if _current_model.get('type') == 'openai':
|
3655 |
+
response = client.chat.completions.create(
|
3656 |
+
model=_current_model['id'],
|
3657 |
+
messages=messages,
|
3658 |
+
max_tokens=4000,
|
3659 |
+
temperature=0.1
|
3660 |
+
)
|
3661 |
+
changes_text = response.choices[0].message.content
|
3662 |
+
elif _current_model.get('type') == 'mistral':
|
3663 |
+
response = client.chat.complete(
|
3664 |
+
model=_current_model['id'],
|
3665 |
+
messages=messages,
|
3666 |
+
max_tokens=4000,
|
3667 |
+
temperature=0.1
|
3668 |
+
)
|
3669 |
+
changes_text = response.choices[0].message.content
|
3670 |
+
else: # Hugging Face or other
|
3671 |
+
completion = client.chat.completions.create(
|
3672 |
+
model=_current_model['id'],
|
3673 |
+
messages=messages,
|
3674 |
+
max_tokens=4000,
|
3675 |
+
temperature=0.1
|
3676 |
+
)
|
3677 |
+
changes_text = completion.choices[0].message.content
|
3678 |
+
|
3679 |
+
# Apply the search/replace changes
|
3680 |
+
if language == "transformers.js" and ('=== index.html ===' in last_assistant_msg):
|
3681 |
+
modified_content = apply_transformers_js_search_replace_changes(last_assistant_msg, changes_text)
|
3682 |
+
else:
|
3683 |
+
modified_content = apply_search_replace_changes(last_assistant_msg, changes_text)
|
3684 |
+
|
3685 |
+
# If changes were successfully applied, return the modified content
|
3686 |
+
if modified_content != last_assistant_msg:
|
3687 |
+
_history.append([query, modified_content])
|
3688 |
+
|
3689 |
+
# Generate preview based on language
|
3690 |
+
preview_val = None
|
3691 |
+
if language == "html":
|
3692 |
+
# Use full content for multipage detection, then extract for single-page rendering
|
3693 |
+
_mpf2 = parse_multipage_html_output(modified_content)
|
3694 |
+
_mpf2 = validate_and_autofix_files(_mpf2)
|
3695 |
+
if _mpf2 and _mpf2.get('index.html'):
|
3696 |
+
preview_val = send_to_sandbox_with_refresh(inline_multipage_into_single_preview(_mpf2))
|
3697 |
+
else:
|
3698 |
+
safe_preview = extract_html_document(modified_content)
|
3699 |
+
preview_val = send_to_sandbox_with_refresh(safe_preview)
|
3700 |
+
elif language == "python" and is_streamlit_code(modified_content):
|
3701 |
+
preview_val = send_streamlit_to_stlite(modified_content)
|
3702 |
+
|
3703 |
+
yield {
|
3704 |
+
code_output: modified_content,
|
3705 |
+
history: _history,
|
3706 |
+
sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview updated with your changes.</div>",
|
3707 |
+
history_output: history_to_chatbot_messages(_history),
|
3708 |
+
}
|
3709 |
+
return
|
3710 |
+
|
3711 |
+
except Exception as e:
|
3712 |
+
print(f"Search/replace failed, falling back to normal generation: {e}")
|
3713 |
+
# If search/replace fails, continue with normal generation
|
3714 |
+
|
3715 |
# Create/lookup a session id for temp-file tracking and cleanup
|
3716 |
if _setting is not None and isinstance(_setting, dict):
|
3717 |
session_id = _setting.get("__session_id__")
|
|
|
5798 |
interactive=True,
|
5799 |
label="Generated code"
|
5800 |
)
|
5801 |
+
|
5802 |
+
|
5803 |
+
|
5804 |
# Transformers.js multi-file editors (hidden by default)
|
5805 |
with gr.Group(visible=False) as tjs_group:
|
5806 |
with gr.Tabs():
|
|
|
6067 |
],
|
6068 |
)
|
6069 |
|
6070 |
+
|
6071 |
+
|
6072 |
+
|
6073 |
+
|
6074 |
def begin_generation_ui():
|
6075 |
# Collapse the sidebar when generation starts; keep status hidden
|
6076 |
return [gr.update(open=False), gr.update(visible=False)]
|