gemma / generator /llm_inference.py
dasomaru's picture
Upload folder using huggingface_hub
2c5f455 verified
raw
history blame contribute delete
871 Bytes
from transformers import pipeline
import spaces
# 1. ๋ชจ๋ธ ๋กœ๋“œ (์ตœ์ดˆ 1๋ฒˆ๋งŒ ๋กœ๋“œ๋จ)
generator = pipeline(
"text-generation",
model="dasomaru/gemma-3-4bit-it-demo", # ๋„ค๊ฐ€ ์—…๋กœ๋“œํ•œ ๋ชจ๋ธ ์ด๋ฆ„
tokenizer="dasomaru/gemma-3-4bit-it-demo",
device=0, # CUDA:0 ์‚ฌ์šฉ (GPU). CPU๋งŒ ์žˆ์œผ๋ฉด device=-1
max_new_tokens=512,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1
)
# 2. ๋‹ต๋ณ€ ์ƒ์„ฑ ํ•จ์ˆ˜
@spaces.GPU(duration=300)
def generate_answer(prompt: str) -> str:
"""
์ž…๋ ฅ๋ฐ›์€ ํ”„๋กฌํ”„ํŠธ๋กœ๋ถ€ํ„ฐ ๋ชจ๋ธ์ด ๋‹ต๋ณ€์„ ์ƒ์„ฑํ•œ๋‹ค.
"""
print(f"๐Ÿ”ต Prompt Length: {len(prompt)} characters") # ์ถ”๊ฐ€!
outputs = generator(
prompt,
do_sample=True,
top_k=50,
num_return_sequences=1
)
return outputs[0]["generated_text"].strip()