File size: 6,089 Bytes
d6e6c98
 
 
2975595
 
 
 
 
d6e6c98
bfb4fda
d6e6c98
 
 
2975595
df365ca
d6e6c98
df365ca
05604a9
d6e6c98
 
 
 
835a614
d6e6c98
 
 
 
 
 
 
 
 
 
 
 
2975595
bfb4fda
d6e6c98
 
 
 
 
 
 
 
 
bfb4fda
d6e6c98
 
bc25066
d6e6c98
 
 
bc25066
bfb4fda
bc25066
d6e6c98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2975595
 
d6e6c98
 
 
 
2975595
 
d6e6c98
 
 
2975595
 
6f368e7
c76542a
d6e6c98
 
 
 
 
 
 
 
 
 
 
 
 
 
bc25066
d6e6c98
 
 
 
 
 
 
 
 
 
 
 
 
 
2975595
 
d6e6c98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2975595
 
d6e6c98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfb4fda
d6e6c98
 
 
 
 
df365ca
bc25066
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
# βœ… SmartManuals-AI App for Hugging Face Spaces
# Full app.py with spaCy-based sentence segmentation and model dropdown selection

import os
import json
import fitz  # PyMuPDF
import chromadb
import torch
import docx
import gradio as gr
import pytesseract
import numpy as np
import spacy
from tqdm import tqdm
from PIL import Image
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer, util

# ---------------------------
# βš™οΈ Configuration
# ---------------------------
MANUALS_DIR = "./Manuals"
CHROMA_PATH = "./chroma_store"
CHROMA_COLLECTION = "manual_chunks"
CHUNK_SIZE = 750
CHUNK_OVERLAP = 100
EMBED_MODEL = "all-MiniLM-L6-v2"
DEFAULT_MODEL = "meta-llama/Llama-3-8B-Instruct"
AVAILABLE_MODELS = [
    "meta-llama/Llama-3-8B-Instruct",
    "meta-llama/Llama-4-Scout-17B-16E-Instruct",
    "google/gemma-1.1-7b-it",
    "mistralai/Mistral-7B-Instruct-v0.3",
    "Qwen/Qwen1.5-7B-Chat"
]
HF_TOKEN = os.environ.get("HF_TOKEN")

# ---------------------------
# πŸ“š Load NLP model for sentence splitting
# ---------------------------
try:
    import spacy
    nlp = spacy.load("en_core_web_sm")
except:
    os.system("python -m spacy download en_core_web_sm")
    nlp = spacy.load("en_core_web_sm")

def split_sentences(text):
    return [sent.text.strip() for sent in nlp(text).sents if sent.text.strip()]

# ---------------------------
# 🧹 Text cleanup
# ---------------------------
def clean(text):
    return "\n".join([line.strip() for line in text.splitlines() if line.strip()])

# ---------------------------
# πŸ“„ PDF and DOCX extractors
# ---------------------------
def extract_pdf_text(path):
    doc = fitz.open(path)
    pages = []
    for i, page in enumerate(doc):
        text = page.get_text()
        if not text.strip():
            pix = page.get_pixmap(dpi=300)
            img = Image.open(io.BytesIO(pix.tobytes("png")))
            text = pytesseract.image_to_string(img)
        pages.append((i + 1, text))
    return pages

def extract_docx_text(path):
    doc = docx.Document(path)
    full_text = "\n".join([para.text for para in doc.paragraphs if para.text.strip()])
    return [(1, full_text)]

# ---------------------------
# πŸ“¦ Chunk splitter
# ---------------------------
def chunkify(sentences, max_tokens=CHUNK_SIZE, overlap=CHUNK_OVERLAP):
    chunks = []
    current = []
    length = 0
    for s in sentences:
        tokens = len(s.split())
        if length + tokens > max_tokens:
            chunks.append(" ".join(current))
            current = current[-overlap:]
            length = sum(len(w.split()) for w in current)
        current.append(s)
        length += tokens
    if current:
        chunks.append(" ".join(current))
    return chunks

# ---------------------------
# πŸ”Ž Metadata from file
# ---------------------------
def extract_meta(name):
    name = name.lower()
    return {
        "model": next((m for m in ["se3", "se4", "symbio", "explore"] if m in name), "unknown"),
        "doc_type": next((d for d in ["owner", "service", "parts"] if d in name), "unknown"),
        "brand": "life fitness"
    }

# ---------------------------
# πŸ”  Embed and store chunks
# ---------------------------
def embed_all():
    embedder = SentenceTransformer(EMBED_MODEL)
    client = chromadb.PersistentClient(path=CHROMA_PATH)
    try:
        client.delete_collection(CHROMA_COLLECTION)
    except:
        pass
    db = client.create_collection(CHROMA_COLLECTION)

    for fname in os.listdir(MANUALS_DIR):
        path = os.path.join(MANUALS_DIR, fname)
        if fname.endswith(".pdf"):
            pages = extract_pdf_text(path)
        elif fname.endswith(".docx"):
            pages = extract_docx_text(path)
        else:
            continue
        meta = extract_meta(fname)
        for page, text in pages:
            sents = split_sentences(clean(text))
            chunks = chunkify(sents)
            for i, chunk in enumerate(chunks):
                db.add(
                    ids=[f"{fname}::p{page}::c{i}"],
                    documents=[chunk],
                    metadatas=[{**meta, "source": fname, "page": page}]
                )
    return db, embedder

# ---------------------------
# πŸ€– Load selected LLM model
# ---------------------------
def load_model(repo):
    tokenizer = AutoTokenizer.from_pretrained(repo, token=HF_TOKEN)
    model = AutoModelForCausalLM.from_pretrained(
        repo, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
        device_map="auto" if torch.cuda.is_available() else None, token=HF_TOKEN
    )
    return pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)

# ---------------------------
# πŸ“₯ Retrieval-Augmented QA
# ---------------------------
def answer_query(q, model_choice):
    results = db.query(query_texts=[q], n_results=3)
    context = "\n\n".join(results["documents"][0])
    prompt = f"""
You are a helpful assistant. Answer based on the context. If unsure, say "I don't know".

Context:
{context}

Question: {q}
Answer:
"""
    pipe = load_model(model_choice)
    out = pipe(prompt, max_new_tokens=300, do_sample=False)[0]["generated_text"]
    return out.split("Answer:")[-1].strip()

# ---------------------------
# πŸš€ Initialize app
# ---------------------------
print("Embedding documents...")
db, embedder = embed_all()
print("Done embedding.")

# ---------------------------
# πŸŽ›οΈ Gradio UI
# ---------------------------
demo = gr.Blocks()

with demo:
    gr.Markdown("""# 🧠 SmartManuals-AI
Ask any question and let the model answer from your uploaded manuals.
""")
    with gr.Row():
        qbox = gr.Textbox(label="Ask a Question", placeholder="e.g. How to reset the SE3 console?")
        model_select = gr.Dropdown(choices=AVAILABLE_MODELS, label="Choose LLM", value=DEFAULT_MODEL)
    ansbox = gr.Textbox(label="Answer", lines=10)
    btn = gr.Button("πŸ” Submit")
    btn.click(fn=answer_query, inputs=[qbox, model_select], outputs=ansbox)

demo.launch()