suryadev1 commited on
Commit
ed9c71c
·
verified ·
1 Parent(s): 3d3c5a2

confirmed for 8vcpu

Browse files
Files changed (1) hide show
  1. new_test_saved_finetuned_model.py +8 -10
new_test_saved_finetuned_model.py CHANGED
@@ -46,16 +46,16 @@ class BERTFineTuneTrainer:
46
  """
47
 
48
  # Setup cuda device for BERT training, argument -c, --cuda should be true
49
- cuda_condition = torch.cuda.is_available() and with_cuda
50
- self.device = torch.device("cuda:0" if cuda_condition else "cpu")
51
- #self.device = torch.device("cpu") #torch.device("cuda:0" if cuda_condition else "cpu")
52
  # print(cuda_condition, " Device used = ", self.device)
53
  print(" Device used = ", self.device)
54
 
55
  # available_gpus = list(range(torch.cuda.device_count()))
56
 
57
  # This BERT model will be saved every epoch
58
- self.model = bertFinetunedClassifierwithFeats.to(self.device)
59
  print(self.model.parameters())
60
  for param in self.model.parameters():
61
  param.requires_grad = False
@@ -159,11 +159,10 @@ class BERTFineTuneTrainer:
159
  logits = self.model.forward(data["input"], data["segment_label"], data["feat"])
160
  else:
161
  with torch.no_grad():
162
- logits = self.model.forward(data["input"], data["segment_label"], data["feat"])
163
 
164
  logits = logits.cpu()
165
- labels = data["label"].to(logits.device)
166
- loss = self.criterion(logits, labels)
167
  # if torch.cuda.device_count() > 1:
168
  # loss = loss.mean()
169
 
@@ -183,8 +182,7 @@ class BERTFineTuneTrainer:
183
  tlabels.extend(data['label'].cpu().numpy())
184
  positive_class_probs = [prob[1] for prob in probabs]
185
  # Compare predicted labels to true labels and calculate accuracy
186
- correct = (data['label'].to(predicted_labels.device) == predicted_labels).sum().item()
187
-
188
 
189
  avg_loss += loss.item()
190
  total_correct += correct
@@ -560,7 +558,7 @@ def train():
560
  parser.add_argument("-w", "--num_workers", type=int, default=7, help="dataloader worker size")
561
 
562
  # Later run with cuda
563
- parser.add_argument("--with_cuda", type=bool, default=True, help="training with CUDA: true, or false")
564
  parser.add_argument("--log_freq", type=int, default=10, help="printing loss every n iter: setting n")
565
  # parser.add_argument("--corpus_lines", type=int, default=None, help="total number of lines in corpus")
566
  parser.add_argument("--cuda_devices", type=int, nargs='+', default=None, help="CUDA device ids")
 
46
  """
47
 
48
  # Setup cuda device for BERT training, argument -c, --cuda should be true
49
+ # cuda_condition = torch.cuda.is_available() and with_cuda
50
+ # self.device = torch.device("cuda:0" if cuda_condition else "cpu")
51
+ self.device = torch.device("cpu") #torch.device("cuda:0" if cuda_condition else "cpu")
52
  # print(cuda_condition, " Device used = ", self.device)
53
  print(" Device used = ", self.device)
54
 
55
  # available_gpus = list(range(torch.cuda.device_count()))
56
 
57
  # This BERT model will be saved every epoch
58
+ self.model = bertFinetunedClassifierwithFeats.to("cpu")
59
  print(self.model.parameters())
60
  for param in self.model.parameters():
61
  param.requires_grad = False
 
159
  logits = self.model.forward(data["input"], data["segment_label"], data["feat"])
160
  else:
161
  with torch.no_grad():
162
+ logits = self.model.forward(data["input"].cpu(), data["segment_label"].cpu(), data["feat"].cpu())
163
 
164
  logits = logits.cpu()
165
+ loss = self.criterion(logits, data["label"])
 
166
  # if torch.cuda.device_count() > 1:
167
  # loss = loss.mean()
168
 
 
182
  tlabels.extend(data['label'].cpu().numpy())
183
  positive_class_probs = [prob[1] for prob in probabs]
184
  # Compare predicted labels to true labels and calculate accuracy
185
+ correct = (data['label'] == predicted_labels).sum().item()
 
186
 
187
  avg_loss += loss.item()
188
  total_correct += correct
 
558
  parser.add_argument("-w", "--num_workers", type=int, default=7, help="dataloader worker size")
559
 
560
  # Later run with cuda
561
+ parser.add_argument("--with_cuda", type=bool, default=False, help="training with CUDA: true, or false")
562
  parser.add_argument("--log_freq", type=int, default=10, help="printing loss every n iter: setting n")
563
  # parser.add_argument("--corpus_lines", type=int, default=None, help="total number of lines in corpus")
564
  parser.add_argument("--cuda_devices", type=int, nargs='+', default=None, help="CUDA device ids")