File size: 5,840 Bytes
2975595
d06b252
df15a5f
d06b252
d6e6c98
8ab0a40
d06b252
 
 
57fff59
d06b252
 
8ab0a40
57fff59
d06b252
df15a5f
 
57fff59
d06b252
 
 
 
 
57fff59
 
d06b252
 
 
 
 
 
57fff59
d06b252
 
 
57fff59
8ab0a40
 
d6e6c98
df15a5f
 
 
57fff59
 
df15a5f
 
57fff59
d06b252
57fff59
d06b252
57fff59
 
 
d06b252
 
57fff59
 
 
d06b252
 
 
 
 
57fff59
 
 
d06b252
57fff59
d06b252
 
 
57fff59
8ab0a40
57fff59
d06b252
57fff59
 
d06b252
57fff59
d06b252
57fff59
d06b252
57fff59
d06b252
 
57fff59
bc25066
d06b252
 
8ab0a40
d06b252
 
8ab0a40
d06b252
 
 
8ab0a40
57fff59
 
df15a5f
8ab0a40
 
df15a5f
 
d06b252
 
 
 
 
57fff59
 
 
 
 
d06b252
 
57fff59
 
 
 
d06b252
57fff59
 
 
d06b252
 
df15a5f
 
57fff59
 
 
 
 
 
 
 
 
d06b252
8ab0a40
 
57fff59
 
d06b252
57fff59
d6e6c98
57fff59
 
 
 
 
 
 
 
 
d06b252
 
 
57fff59
 
d06b252
57fff59
 
8ab0a40
57fff59
df15a5f
57fff59
 
 
 
 
 
 
 
d06b252
8ab0a40
57fff59
d06b252
57fff59
d06b252
57fff59
df365ca
8ab0a40
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import os
import fitz
import json
import gradio as gr
import pytesseract
import chromadb
import torch
import nltk
import traceback
import docx2txt
from PIL import Image
from io import BytesIO
from tqdm import tqdm
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer, util
from nltk.tokenize import sent_tokenize

# Ensure punkt is downloaded
try:
    nltk.data.find("tokenizers/punkt")
except LookupError:
    nltk.download("punkt")

# Configuration
HF_TOKEN = os.getenv("HF_TOKEN")
MANUALS_DIR = "Manuals"
CHROMA_PATH = "chroma_store"
COLLECTION_NAME = "manual_chunks"
CHUNK_SIZE = 750
CHUNK_OVERLAP = 100
MAX_CONTEXT_CHUNKS = 3
MODEL_ID = "ibm-granite/granite-vision-3.2-2b"

device = "cuda" if torch.cuda.is_available() else "cpu"

# ---------------- Text Helpers ----------------
def clean(text):
    return "\n".join([line.strip() for line in text.splitlines() if line.strip()])

def split_sentences(text):
    try:
        return sent_tokenize(text)
    except:
        print("⚠️ Tokenizer fallback: simple split.")
        return text.split(". ")

def split_chunks(sentences, max_tokens=CHUNK_SIZE, overlap=CHUNK_OVERLAP):
    chunks = []
    current_chunk, length = [], 0

    for sent in sentences:
        words = sent.split()
        if length + len(words) > max_tokens and current_chunk:
            chunks.append(" ".join(current_chunk))
            current_chunk = current_chunk[-overlap:]
            length = sum(len(s.split()) for s in current_chunk)
        current_chunk.append(sent)
        length += len(words)

    if current_chunk:
        chunks.append(" ".join(current_chunk))
    return chunks

# ---------------- File Readers ----------------
def extract_pdf_text(path):
    chunks = []
    try:
        doc = fitz.open(path)
        for i, page in enumerate(doc):
            text = page.get_text().strip()
            if not text:
                img = Image.open(BytesIO(page.get_pixmap(dpi=300).tobytes("png")))
                text = pytesseract.image_to_string(img)
            chunks.append((path, i + 1, clean(text)))
    except Exception as e:
        print("❌ PDF read error:", path, e)
    return chunks

def extract_docx_text(path):
    try:
        return [(path, 1, clean(docx2txt.process(path)))]
    except Exception as e:
        print("❌ DOCX read error:", path, e)
        return []

# ---------------- Embedding ----------------
def embed_all():
    embedder = SentenceTransformer("all-MiniLM-L6-v2")
    embedder.eval()
    client = chromadb.PersistentClient(path=CHROMA_PATH)

    try:
        client.delete_collection(COLLECTION_NAME)
    except:
        pass
    collection = client.get_or_create_collection(COLLECTION_NAME)

    docs, ids, metas = [], [], []
    print("πŸ“„ Processing manuals...")

    for fname in os.listdir(MANUALS_DIR):
        fpath = os.path.join(MANUALS_DIR, fname)
        if fname.lower().endswith(".pdf"):
            pages = extract_pdf_text(fpath)
        elif fname.lower().endswith(".docx"):
            pages = extract_docx_text(fpath)
        else:
            continue

        for path, page, text in pages:
            for i, chunk in enumerate(split_chunks(split_sentences(text))):
                chunk_id = f"{fname}::{page}::{i}"
                docs.append(chunk)
                ids.append(chunk_id)
                metas.append({"source": fname, "page": page})

                if len(docs) >= 16:
                    embs = embedder.encode(docs).tolist()
                    collection.add(documents=docs, ids=ids, metadatas=metas, embeddings=embs)
                    docs, ids, metas = [], [], []

    if docs:
        embs = embedder.encode(docs).tolist()
        collection.add(documents=docs, ids=ids, metadatas=metas, embeddings=embs)

    print(f"βœ… Embedded {len(ids)} chunks.")
    return collection, embedder

# ---------------- Model Setup ----------------
def load_model():
    tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=HF_TOKEN)
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_ID,
        token=HF_TOKEN,
        device_map="auto" if torch.cuda.is_available() else None,
        torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32
    ).to(device)
    pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
    return pipe, tokenizer

def ask_model(question, context, pipe, tokenizer):
    prompt = f"""Use only the following context to answer. If uncertain, say "I don't know."

<context>
{context}
</context>

Q: {question}
A:"""
    output = pipe(prompt, max_new_tokens=512)[0]["generated_text"]
    return output.split("A:")[-1].strip()

# ---------------- Query ----------------
def get_answer(question):
    try:
        query_emb = embedder.encode(question, convert_to_tensor=True)
        results = db.query(query_texts=[question], n_results=MAX_CONTEXT_CHUNKS)
        context = "\n\n".join(results["documents"][0])
        return ask_model(question, context, model_pipe, model_tokenizer)
    except Exception as e:
        print("❌ Query error:", e)
        return f"Error: {e}"

# ---------------- UI ----------------
with gr.Blocks() as demo:
    gr.Markdown("## πŸ€– SmartManuals-AI (Granite 3.2-2B)")
    with gr.Row():
        question = gr.Textbox(label="Ask your question")
        ask = gr.Button("Ask")
    answer = gr.Textbox(label="Answer", lines=8)
    ask.click(fn=get_answer, inputs=question, outputs=answer)

# Embed + Load Model at Startup
try:
    db, embedder = embed_all()
    model_pipe, model_tokenizer = load_model()
except Exception as e:
    print("❌ Startup failure:", e)
    db, embedder = None, None
    model_pipe, model_tokenizer = None, None

if __name__ == "__main__":
    demo.launch()