File size: 991 Bytes
615dfe0
b926d6d
7426a5b
5d525a0
615dfe0
2e648a5
 
7426a5b
2e648a5
c7c8204
3bd22b1
 
 
 
615dfe0
3bd22b1
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import streamlit as st
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import os
import torch

from huggingface_hub import login

login(os.getenv('HF_LOGIN'))

token_step_size = 20
model_id = "utter-project/EuroLLM-1.7B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, torch_dtype=torch.bfloat16)
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16)
model.generation_config.pad_token_id = tokenizer.pad_token_id

inner = st.text_area('enter some input!')
text = '<|im_start|><|im_end|>user\n'+inner+'<|im_end|>\n<|im_start|>assistant\n'

inputs = tokenizer(text, return_tensors="pt")
outputs = inputs['input_ids']
if inner:
    while (not torch.any(outputs[0][-token_step_size:] == 4)):
        outputs = model.generate(input_ids=outputs, attention_mask=torch.ones_like(outputs),max_new_tokens=token_step_size)
        st.write(tokenizer.decode(outputs[0][-token_step_size:], skip_special_tokens=True))#, end=' ', flush=True)