|
|
|
import spaces |
|
import gradio as gr |
|
from gradio import update |
|
from functools import lru_cache |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
|
|
|
|
MODEL_LIST = [ |
|
"unsloth/gemma-3-1b-pt", |
|
"ckiplab/gpt2-tiny-chinese", |
|
"ckiplab/gpt2-base-chinese", |
|
"liswei/Taiwan-ELM-270M", |
|
"liswei/Taiwan-ELM-1_1B", |
|
"benchang1110/Qwen2.5-Taiwan-1.5B-Instruct", |
|
"benchang1110/Taiwan-tinyllama-v1.0-base", |
|
] |
|
|
|
@lru_cache(maxsize=None) |
|
def get_pipeline(model_name): |
|
tok = AutoTokenizer.from_pretrained(model_name) |
|
mdl = AutoModelForCausalLM.from_pretrained(model_name, weights_only=False) |
|
mdl.to("cuda") |
|
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0) |
|
|
|
@spaces.GPU |
|
def suggest_next(text, model_name, k, m): |
|
""" |
|
使用 Beam Search 產生 M 條最可能的下段建議,並一次更新選項清單。 |
|
""" |
|
gen_pipe = get_pipeline(model_name) |
|
outs = gen_pipe( |
|
text, |
|
max_new_tokens=k, |
|
num_beams=m, |
|
num_return_sequences=m, |
|
do_sample=False, |
|
early_stopping=True |
|
) |
|
suggestions = [out["generated_text"][len(text):] for out in outs] |
|
|
|
return update(choices=suggestions, value=None) |
|
|
|
def append_suggestion(current, choice): |
|
if choice is None: |
|
return current |
|
return current + choice |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown( |
|
"## 🇹🇼 台灣中文輸入法加速器 \n" |
|
"結合小型語言模型與 ZeroGPU,即時 IME 風格建議條。" |
|
) |
|
|
|
|
|
suggestions = gr.Radio( |
|
[], label="建議清單", interactive=True, type="value", elem_id="suggestions-bar" |
|
) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=5): |
|
input_text = gr.TextArea( |
|
label="輸入文字", lines=6, |
|
placeholder="請在此輸入起始片段…" |
|
) |
|
with gr.Column(scale=1, min_width=80): |
|
gpu_button = gr.Button("使用 GPU 生成建議") |
|
|
|
|
|
with gr.Row(): |
|
model_selector = gr.Dropdown( |
|
MODEL_LIST, value=MODEL_LIST[0], label="選擇模型" |
|
) |
|
k_slider = gr.Slider( |
|
minimum=1, maximum=50, step=1, value=5, label="K(最大新生成詞元)" |
|
) |
|
m_slider = gr.Slider( |
|
minimum=1, maximum=30, step=1, value=10, label="M(建議數量 / Beam 數)" |
|
) |
|
|
|
|
|
gpu_button.click( |
|
fn=suggest_next, |
|
inputs=[input_text, model_selector, k_slider, m_slider], |
|
outputs=suggestions, |
|
) |
|
suggestions.change( |
|
fn=append_suggestion, |
|
inputs=[input_text, suggestions], |
|
outputs=input_text, |
|
) |
|
|
|
demo.launch() |
|
|