import gradio as gr from auto_gptq import AutoGPTQForCausalLM from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from utils import build_faiss_index, retrieve # Load documents with open("documents/1mg_rag.txt") as f: docs = [line.strip() for line in f if line.strip()] # Build FAISS index index, _ = build_faiss_index(docs) # Load quantized Mistral 7B model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GPTQ" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoGPTQForCausalLM.from_quantized(model_id, device_map="auto", trust_remote_code=True) generator = pipeline("text-generation", model=model, tokenizer=tokenizer) def answer_question(query): context = "\n".join(retrieve(query, index, docs)) prompt = f"[INST] Use the following context to answer the question.\n\nContext:\n{context}\n\nQuestion: {query} [/INST]" result = generator(prompt, max_new_tokens=256, do_sample=True, temperature=0.7) return result[0]['generated_text'] gr.Interface(fn=answer_question, inputs="text", outputs="text", title="Mistral RAG").launch()