Spaces:
Runtime error
Runtime error
Update models.py
Browse files
models.py
CHANGED
@@ -1,9 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
|
|
2 |
from transformers import LlamaTokenizer, LlamaForCausalLM
|
3 |
import torch
|
4 |
|
|
|
5 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/model_cache'
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
def carregar_modelo():
|
8 |
tokenizer = LlamaTokenizer.from_pretrained("lzw1008/Emollama-7b")
|
9 |
model = LlamaForCausalLM.from_pretrained("lzw1008/Emollama-7b", device_map="auto")
|
|
|
1 |
+
python
|
2 |
+
|
3 |
+
Executar
|
4 |
+
|
5 |
+
Copiar
|
6 |
import os
|
7 |
+
import shutil
|
8 |
from transformers import LlamaTokenizer, LlamaForCausalLM
|
9 |
import torch
|
10 |
|
11 |
+
# Definir o diret贸rio de cache para o modelo
|
12 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/model_cache'
|
13 |
|
14 |
+
# Limpar o diret贸rio de cache, se existir
|
15 |
+
cache_dir = '/tmp/model_cache'
|
16 |
+
if os.path.exists(cache_dir):
|
17 |
+
shutil.rmtree(cache_dir)
|
18 |
+
|
19 |
def carregar_modelo():
|
20 |
tokenizer = LlamaTokenizer.from_pretrained("lzw1008/Emollama-7b")
|
21 |
model = LlamaForCausalLM.from_pretrained("lzw1008/Emollama-7b", device_map="auto")
|