File size: 2,135 Bytes
dfc785e
 
5d41fe8
 
 
dfc785e
5d41fe8
dfc785e
5d41fe8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dfc785e
5d41fe8
f0e607c
0ec21e2
5d41fe8
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import faiss
import numpy as np
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr

# Set paths
FAISS_PATH = "asa_faiss.index"
DATASET_PATH = "responses.txt"  # Ensure this file contains indexed responses

# Load FAISS index
print(f"Loading FAISS index from {FAISS_PATH}...")
faiss_index = faiss.read_index(FAISS_PATH)
print("✅ FAISS index loaded successfully!")

# Load dataset responses
with open(DATASET_PATH, "r", encoding="utf-8") as f:
    dataset = f.readlines()
print("✅ Responses dataset loaded!")

# Load model & tokenizer (Ensure model path is correct)
MODEL_NAME = "Futuresony/my_model"  # Change this if using a local model
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)

# Set FAISS threshold (Adjust based on FAISS distance values)
THRESHOLD = 100  # Lower threshold to improve response accuracy


def embed(text):
    """Convert text to FAISS-compatible vector (Ensure same embeddings as FAISS training)."""
    tokens = tokenizer.encode(text, add_special_tokens=True)
    return np.array(tokens, dtype=np.float32).reshape(1, -1)


def chatbot_response(user_query):
    """Fetches response from FAISS or falls back to the model."""
    query_vector = embed(user_query)  # Convert input to vector
    D, I = faiss_index.search(query_vector, k=1)  # Search FAISS

    print(f"Closest FAISS match index: {I[0][0]}, Distance: {D[0][0]}")  # Debugging info

    if D[0][0] < THRESHOLD:  # Check if FAISS result is relevant
        response = dataset[I[0][0]].strip()  # Fetch matched response
        print("✅ FAISS response used!")
    else:
        # Fallback to model-generated response
        print("⚠️ FAISS match too weak, using model instead.")
        inputs = tokenizer(user_query, return_tensors="pt")
        outputs = model.generate(**inputs, max_new_tokens=150)
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)

    return response


# Gradio UI
iface = gr.Interface(fn=chatbot_response, inputs="text", outputs="text", title="ASA Microfinance Chatbot")
iface.launch()