Spaces:
Running
on
Zero
Running
on
Zero
malvin noel
commited on
Commit
·
11210e5
1
Parent(s):
1e3c8be
Change model for qwen3
Browse files
scripts/generate_scripts.py
CHANGED
@@ -13,7 +13,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
13 |
|
14 |
@spaces.GPU()
|
15 |
def generate_local(prompt: str, max_new_tokens: int = 350, temperature: float = 0.7) -> str:
|
16 |
-
model_id = "Qwen/
|
17 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # get the device the model is on
|
18 |
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
|
|
13 |
|
14 |
@spaces.GPU()
|
15 |
def generate_local(prompt: str, max_new_tokens: int = 350, temperature: float = 0.7) -> str:
|
16 |
+
model_id = "Qwen/Qwen3-30B-A3B"
|
17 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # get the device the model is on
|
18 |
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|