Spaces:
Sleeping
Sleeping
| import torch | |
| from transformers import pipeline | |
| def get_tiny_llama(): | |
| pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto") | |
| return pipe | |
| def response_tiny_llama( | |
| pipe=None, | |
| content="How many helicopters can a human eat in one sitting?" | |
| ): | |
| # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": "You are a friendly chatbot who always responds in the style of a pirate", | |
| }, | |
| {"role": "user", "content": content}, | |
| ] | |
| prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| outputs = pipe(prompt, max_new_tokens=32, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) | |
| return outputs[0]['generated_text'].split('<|assistant|>')[1] |