import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer # Load the model and tokenizer model_name = "InvestmentResearchAI/LLM-ADE_tiny-v0.001" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def generate_response(input_text): """Generate response from the model based on the input text.""" inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=512) output = model.generate(**inputs, max_length=512, num_return_sequences=1) response = tokenizer.decode(output[0], skip_special_tokens=True) return response # Streamlit interface st.title("IRAI LLM-ADE Model") user_input = st.text_area("Enter your text here:", "") if st.button("Generate"): if user_input: response = generate_response(user_input) st.text_area("Model Response:", response, height=300) else: st.warning("Please enter some text to generate a response.")