Spaces:
Running
Running
add quantize my repo url
Browse files
app.py
CHANGED
@@ -422,7 +422,7 @@ repo_owner = gr.Dropdown(
|
|
422 |
org_token = gr.Textbox(label="Org Access Token", type="password", visible=False)
|
423 |
|
424 |
private_repo = gr.Checkbox(
|
425 |
-
value=False, label="Private Repo", info="Create a private repo
|
426 |
)
|
427 |
|
428 |
def create_interface():
|
@@ -432,6 +432,8 @@ def create_interface():
|
|
432 |
gr.Markdown("Logged in, you must be. Classy, secure, and victorious, it keeps us.")
|
433 |
gr.LoginButton(min_width=250)
|
434 |
|
|
|
|
|
435 |
gr.Markdown("# π Abliteration - Model Refusal Removal Tool")
|
436 |
gr.Markdown("Remove refusal behavior from language models to make them more willing to answer various questions")
|
437 |
|
|
|
422 |
org_token = gr.Textbox(label="Org Access Token", type="password", visible=False)
|
423 |
|
424 |
private_repo = gr.Checkbox(
|
425 |
+
value=False, label="Private Repo", info="Create a private repo"
|
426 |
)
|
427 |
|
428 |
def create_interface():
|
|
|
432 |
gr.Markdown("Logged in, you must be. Classy, secure, and victorious, it keeps us.")
|
433 |
gr.LoginButton(min_width=250)
|
434 |
|
435 |
+
gr.Markdown("## If you wish to use llama.cpp to quantize the generated model, we warmly welcome and encourage you to try our other Space: **[Quantize My Repo](https://huggingface.co/spaces/Antigma/quantize-my-repo)**")
|
436 |
+
|
437 |
gr.Markdown("# π Abliteration - Model Refusal Removal Tool")
|
438 |
gr.Markdown("Remove refusal behavior from language models to make them more willing to answer various questions")
|
439 |
|