dasomaru commited on
Commit
f755cf7
Β·
verified Β·
1 Parent(s): 27b3397

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -18,12 +18,12 @@ def generate_response(prompt):
18
  # λͺ¨λΈ 및 ν† ν¬λ‚˜μ΄μ € λ‘œλ”©μ€ ν•¨μˆ˜ λ‚΄λΆ€μ—μ„œ μˆ˜ν–‰
19
  # tokenizer = AutoTokenizer.from_pretrained("dasomaru/gemma-3-4bit-it-demo")
20
  # model = AutoModelForCausalLM.from_pretrained("dasomaru/gemma-3-4bit-it-demo")
21
- # tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
22
- # model = AutoModelForCausalLM.from_pretrained(
23
- # model_name,
24
- # torch_dtype=torch.float16,
25
- # trust_remote_code=True,
26
- # )
27
  model.to("cuda")
28
 
29
  # inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
 
18
  # λͺ¨λΈ 및 ν† ν¬λ‚˜μ΄μ € λ‘œλ”©μ€ ν•¨μˆ˜ λ‚΄λΆ€μ—μ„œ μˆ˜ν–‰
19
  # tokenizer = AutoTokenizer.from_pretrained("dasomaru/gemma-3-4bit-it-demo")
20
  # model = AutoModelForCausalLM.from_pretrained("dasomaru/gemma-3-4bit-it-demo")
21
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
22
+ model = AutoModelForCausalLM.from_pretrained(
23
+ model_name,
24
+ torch_dtype=torch.float16,
25
+ trust_remote_code=True,
26
+ )
27
  model.to("cuda")
28
 
29
  # inputs = tokenizer(prompt, return_tensors="pt").to(model.device)