File size: 1,299 Bytes
54e8483
9a6e449
aaa6f9c
 
 
 
54e8483
bd0d5ac
a49fba1
 
 
 
 
 
 
bd0d5ac
 
aaa6f9c
a49fba1
 
aaa6f9c
 
 
 
 
 
3b69718
bd0d5ac
4c23181
b503163
bd0d5ac
 
a49fba1
50aa5cd
3b69718
 
bd0d5ac
2a369c5
1a2cecb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import logging

# Set up logging
logging.basicConfig(level=logging.INFO)

# Load tokenizer and model
model_name = "mrm8488/t5-base-finetuned-wikiSQL"  # Alternative model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

def preprocess_query(query):
    # Example preprocessing: convert to lowercase
    return query.lower()

def generate_sql(query):
    try:
        processed_query = preprocess_query(query)
        inputs = tokenizer(processed_query, return_tensors="pt", padding=True)
        outputs = model.generate(**inputs, max_length=512)
        sql_query = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return sql_query
    except Exception as e:
        logging.error(f"Error generating SQL query: {e}")
        return "Error generating SQL query"

# Create a Gradio interface
interface = gr.Interface(
    fn=generate_sql,
    inputs=gr.Textbox(lines=2, placeholder="Enter your natural language query here..."),
    outputs="text",
    title="NL to SQL with T5",
    description="This model converts natural language queries into SQL. Enter your query!"
)

# Launch the app
if __name__ == "__main__":
    interface.launch()