ejschwartz commited on
Commit
ddcc7db
·
1 Parent(s): cfda2d8
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -35,7 +35,7 @@ def predict(type, normalized_asm):
35
  input_ids = torch.LongTensor(tokenizer_output['input_ids'].tolist()).unsqueeze(0)
36
  nova_attention_mask = torch.LongTensor(tokenizer_output['nova_attention_mask']).unsqueeze(0)
37
 
38
- outputs = model.generate(
39
  inputs=input_ids.cuda(), max_new_tokens=512, temperature=0.2, top_p=0.95,
40
  num_return_sequences=20, do_sample=True, nova_attention_mask=nova_attention_mask.cuda(),
41
  no_mask_idx=torch.LongTensor([tokenizer_output['no_mask_idx']]).cuda(),
@@ -48,7 +48,7 @@ def predict(type, normalized_asm):
48
 
49
  demo = gr.Interface(
50
  fn=predict,
51
- inputs=[gr.Text(label="Optimization Type", default="O0"), gr.Text(label="Normalized Assembly Code")],
52
  outputs=gr.Text(label="Raw Nova Output"),
53
  description=frontmatter.load("README.md").content,
54
  #examples=examples
 
35
  input_ids = torch.LongTensor(tokenizer_output['input_ids'].tolist()).unsqueeze(0)
36
  nova_attention_mask = torch.LongTensor(tokenizer_output['nova_attention_mask']).unsqueeze(0)
37
 
38
+ output = model.generate(
39
  inputs=input_ids.cuda(), max_new_tokens=512, temperature=0.2, top_p=0.95,
40
  num_return_sequences=20, do_sample=True, nova_attention_mask=nova_attention_mask.cuda(),
41
  no_mask_idx=torch.LongTensor([tokenizer_output['no_mask_idx']]).cuda(),
 
48
 
49
  demo = gr.Interface(
50
  fn=predict,
51
+ inputs=[gr.Text(label="Optimization Type", value="O0"), gr.Text(label="Normalized Assembly Code")],
52
  outputs=gr.Text(label="Raw Nova Output"),
53
  description=frontmatter.load("README.md").content,
54
  #examples=examples