fkalpana commited on
Commit
3c94101
Β·
verified Β·
1 Parent(s): de64e79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -6,7 +6,8 @@ from datasets import load_dataset
6
  tokenizer = T5Tokenizer.from_pretrained('t5-small', legacy=False)
7
  model = T5ForConditionalGeneration.from_pretrained('t5-small')
8
 
9
- dataset = load_dataset("b-mc2/sql-create-context")
 
10
 
11
  # examples = []
12
 
@@ -17,8 +18,8 @@ dataset = load_dataset("b-mc2/sql-create-context")
17
 
18
  def generate_sql(question):
19
  # Format the question for the model if needed. For example:
20
- input_text = f"translate English to SQL: {question}"
21
- # input_text = f"{question}" # Directly use the question if the model is fine-tuned for SQL generation
22
 
23
  # Tokenize the input text
24
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
 
6
  tokenizer = T5Tokenizer.from_pretrained('t5-small', legacy=False)
7
  model = T5ForConditionalGeneration.from_pretrained('t5-small')
8
 
9
+ # dataset = load_dataset("b-mc2/sql-create-context")
10
+ dataset = load_dataset("rotten_tomatoes", split="train")
11
 
12
  # examples = []
13
 
 
18
 
19
  def generate_sql(question):
20
  # Format the question for the model if needed. For example:
21
+ # input_text = f"translate English to SQL: {question}"
22
+ input_text = f"{question}" # Directly use the question if the model is fine-tuned for SQL generation
23
 
24
  # Tokenize the input text
25
  input_ids = tokenizer.encode(input_text, return_tensors="pt")