Jeong-hun Kim commited on
Commit
52718fa
·
1 Parent(s): c9a15a7

hf gpu error fix

Browse files
Files changed (1) hide show
  1. app.py +0 -3
app.py CHANGED
@@ -16,12 +16,9 @@ access_token = os.environ.get("HF_TOKEN")
16
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=access_token)
17
  model = AutoModelForCausalLM.from_pretrained(
18
  model_id,
19
- torch_dtype=torch.float16,
20
  token=access_token
21
  )
22
  model.eval()
23
- if torch.cuda.is_available():
24
- model.to("cuda")
25
  llm = pipeline(
26
  "text-generation",
27
  model=model,
 
16
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=access_token)
17
  model = AutoModelForCausalLM.from_pretrained(
18
  model_id,
 
19
  token=access_token
20
  )
21
  model.eval()
 
 
22
  llm = pipeline(
23
  "text-generation",
24
  model=model,