import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained("hrshtsharma2012/NL2SQL-Picard-final") model = AutoModelForSeq2SeqLM.from_pretrained("hrshtsharma2012/NL2SQL-Picard-final") # Initialize the pipeline nl2sql_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer) def generate_sql(query): # Use the model to generate SQL from the natural language query results = nl2sql_pipeline(query) # Extract the first result (highest likelihood) sql_query = results[0]['generated_text'] return sql_query # Example questions from the Spider dataset example_questions = [ ("How many heads of the departments are older than 56 ?",), ("List the name, born state and age of the heads of departments ordered by age.",), ("List the creation year, name and budget of each department.",), ("What are the maximum and minimum budget of the departments?",), ("In which year were most departments established?.",) ] # Create a Gradio interface interface = gr.Interface( fn=generate_sql, inputs=gr.Textbox(lines=2, placeholder="Enter your natural language query here..."), outputs="text", examples=example_questions, title="NL to SQL with Picard", description="This model converts natural language queries into SQL. It's based on the Spider dataset. Try one of the example questions or enter your own!" ) # Launch the app if __name__ == "__main__": interface.launch()