mcamargo00 commited on
Commit
2aa4dcf
·
verified ·
1 Parent(s): c41fd65

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -0
app.py CHANGED
@@ -32,6 +32,11 @@ def load_model():
32
  # Load tokenizer from the same directory
33
  tokenizer = AutoTokenizer.from_pretrained("./lora_adapter")
34
 
 
 
 
 
 
35
  logger.info("LoRA model loaded successfully")
36
  return "LoRA model loaded successfully!"
37
 
@@ -42,6 +47,11 @@ def load_model():
42
 
43
  model_name = "microsoft/DialoGPT-medium" # Closer to Phi-4 architecture
44
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
 
 
45
  model = AutoModelForSequenceClassification.from_pretrained(
46
  model_name,
47
  num_labels=3,
 
32
  # Load tokenizer from the same directory
33
  tokenizer = AutoTokenizer.from_pretrained("./lora_adapter")
34
 
35
+ # Fix padding token issue
36
+ if tokenizer.pad_token is None:
37
+ tokenizer.pad_token = tokenizer.eos_token
38
+ logger.info("Set pad_token to eos_token")
39
+
40
  logger.info("LoRA model loaded successfully")
41
  return "LoRA model loaded successfully!"
42
 
 
47
 
48
  model_name = "microsoft/DialoGPT-medium" # Closer to Phi-4 architecture
49
  tokenizer = AutoTokenizer.from_pretrained(model_name)
50
+
51
+ # Fix padding token for fallback model too
52
+ if tokenizer.pad_token is None:
53
+ tokenizer.pad_token = tokenizer.eos_token
54
+
55
  model = AutoModelForSequenceClassification.from_pretrained(
56
  model_name,
57
  num_labels=3,