malvin noel commited on
Commit
11210e5
·
1 Parent(s): 1e3c8be

Change model for qwen3

Browse files
Files changed (1) hide show
  1. scripts/generate_scripts.py +1 -1
scripts/generate_scripts.py CHANGED
@@ -13,7 +13,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
13
 
14
  @spaces.GPU()
15
  def generate_local(prompt: str, max_new_tokens: int = 350, temperature: float = 0.7) -> str:
16
- model_id = "Qwen/Qwen2.5-3B"
17
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # get the device the model is on
18
 
19
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
 
13
 
14
  @spaces.GPU()
15
  def generate_local(prompt: str, max_new_tokens: int = 350, temperature: float = 0.7) -> str:
16
+ model_id = "Qwen/Qwen3-30B-A3B"
17
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # get the device the model is on
18
 
19
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)