Spaces:
Running
Running
Commit
Β·
6f034a7
0
Parent(s):
test
Browse files- .env-example +2 -0
- .gitignore +61 -0
- Procfile +1 -0
- __init__.py +0 -0
- app.py +36 -0
- config.py +2 -0
- features/text_classifier/__init__.py +1 -0
- features/text_classifier/controller.py +125 -0
- features/text_classifier/inferencer.py +40 -0
- features/text_classifier/model_loader.py +55 -0
- features/text_classifier/preprocess.py +32 -0
- features/text_classifier/routes.py +49 -0
- readme.md +401 -0
- requirements.txt +12 -0
.env-example
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
MY_SECRET_TOKEN="SECRET_CODE_TOKEN"
|
2 |
+
|
.gitignore
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ---- Python Environment ----
|
2 |
+
venv/
|
3 |
+
.venv/
|
4 |
+
env/
|
5 |
+
ENV/
|
6 |
+
*.pyc
|
7 |
+
*.pyo
|
8 |
+
*.pyd
|
9 |
+
__pycache__/
|
10 |
+
**/__pycache__/
|
11 |
+
|
12 |
+
# ---- VS Code / IDEs ----
|
13 |
+
.vscode/
|
14 |
+
.idea/
|
15 |
+
*.swp
|
16 |
+
|
17 |
+
# ---- Jupyter / IPython ----
|
18 |
+
.ipynb_checkpoints/
|
19 |
+
*.ipynb
|
20 |
+
|
21 |
+
# ---- Model & Data Artifacts ----
|
22 |
+
*.pth
|
23 |
+
*.pt
|
24 |
+
*.h5
|
25 |
+
*.ckpt
|
26 |
+
*.onnx
|
27 |
+
*.joblib
|
28 |
+
*.pkl
|
29 |
+
|
30 |
+
# ---- Hugging Face Cache ----
|
31 |
+
~/.cache/huggingface/
|
32 |
+
huggingface_cache/
|
33 |
+
|
34 |
+
# ---- Logs and Dumps ----
|
35 |
+
*.log
|
36 |
+
*.out
|
37 |
+
*.err
|
38 |
+
|
39 |
+
# ---- Build Artifacts ----
|
40 |
+
build/
|
41 |
+
dist/
|
42 |
+
*.egg-info/
|
43 |
+
|
44 |
+
# ---- System Files ----
|
45 |
+
.DS_Store
|
46 |
+
Thumbs.db
|
47 |
+
|
48 |
+
# ---- Environment Configs ----
|
49 |
+
.env
|
50 |
+
.env.*
|
51 |
+
|
52 |
+
# ---- Project-specific ----
|
53 |
+
Ai-Text-Detector/
|
54 |
+
HuggingFace/model/
|
55 |
+
|
56 |
+
# ---- Node Projects (if applicable) ----
|
57 |
+
node_modules/
|
58 |
+
model/
|
59 |
+
models/.gitattributes #<-- This line can stay if you only want to ignore that file, not the whole folder
|
60 |
+
|
61 |
+
todo.md
|
Procfile
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
web: uvicorn app:app --host 0.0.0.0 --port ${PORT:-8000}
|
__init__.py
ADDED
File without changes
|
app.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request
|
2 |
+
from slowapi import Limiter, _rate_limit_exceeded_handler
|
3 |
+
from slowapi.middleware import SlowAPIMiddleware
|
4 |
+
from slowapi.errors import RateLimitExceeded
|
5 |
+
from slowapi.util import get_remote_address
|
6 |
+
from fastapi.responses import JSONResponse
|
7 |
+
from features.text_classifier.routes import router as text_classifier_router
|
8 |
+
from config import ACCESS_RATE
|
9 |
+
import requests
|
10 |
+
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
11 |
+
|
12 |
+
app = FastAPI()
|
13 |
+
|
14 |
+
# Set up SlowAPI
|
15 |
+
app.state.limiter = limiter
|
16 |
+
app.add_exception_handler(RateLimitExceeded, lambda request, exc: JSONResponse(
|
17 |
+
status_code=429,
|
18 |
+
content={
|
19 |
+
"status_code": 429,
|
20 |
+
"error": "Rate limit exceeded",
|
21 |
+
"message": "Too many requests. Chill for a bit and try again"
|
22 |
+
}
|
23 |
+
))
|
24 |
+
app.add_middleware(SlowAPIMiddleware)
|
25 |
+
|
26 |
+
# Include your routes
|
27 |
+
app.include_router(text_classifier_router, prefix="/text")
|
28 |
+
|
29 |
+
@app.get("/")
|
30 |
+
@limiter.limit(ACCESS_RATE)
|
31 |
+
async def root(request: Request):
|
32 |
+
return {
|
33 |
+
"message": "API is working",
|
34 |
+
"endpoints": ["/text/analyse", "/text/upload", "/text/analyse-sentences", "/text/analyse-sentance-file"]
|
35 |
+
}
|
36 |
+
|
config.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
ACCESS_RATE = "20/minute"
|
2 |
+
|
features/text_classifier/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
features/text_classifier/controller.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import asyncio
|
3 |
+
import logging
|
4 |
+
from io import BytesIO
|
5 |
+
|
6 |
+
from fastapi import HTTPException, UploadFile, status, Depends
|
7 |
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
8 |
+
from nltk.tokenize import sent_tokenize
|
9 |
+
|
10 |
+
from .inferencer import classify_text
|
11 |
+
from .preprocess import parse_docx, parse_pdf, parse_txt
|
12 |
+
|
13 |
+
security = HTTPBearer()
|
14 |
+
|
15 |
+
# Verify Bearer token from Authorization header
|
16 |
+
async def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
17 |
+
token = credentials.credentials
|
18 |
+
expected_token = os.getenv("MY_SECRET_TOKEN")
|
19 |
+
if token != expected_token:
|
20 |
+
raise HTTPException(
|
21 |
+
status_code=status.HTTP_403_FORBIDDEN,
|
22 |
+
detail="Invalid or expired token"
|
23 |
+
)
|
24 |
+
return token
|
25 |
+
|
26 |
+
# Classify plain text input
|
27 |
+
async def handle_text_analysis(text: str):
|
28 |
+
text = text.strip()
|
29 |
+
if not text or len(text.split()) < 10:
|
30 |
+
raise HTTPException(status_code=400, detail="Text must contain at least 10 words")
|
31 |
+
if len(text) > 10000:
|
32 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
33 |
+
|
34 |
+
label, perplexity, ai_likelihood = await asyncio.to_thread(classify_text, text)
|
35 |
+
return {
|
36 |
+
"result": label,
|
37 |
+
"perplexity": round(perplexity, 2),
|
38 |
+
"ai_likelihood": ai_likelihood
|
39 |
+
}
|
40 |
+
|
41 |
+
# Extract text from uploaded files (.docx, .pdf, .txt)
|
42 |
+
async def extract_file_contents(file: UploadFile) -> str:
|
43 |
+
content = await file.read()
|
44 |
+
file_stream = BytesIO(content)
|
45 |
+
|
46 |
+
if file.content_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
|
47 |
+
return parse_docx(file_stream)
|
48 |
+
elif file.content_type == "application/pdf":
|
49 |
+
return parse_pdf(file_stream)
|
50 |
+
elif file.content_type == "text/plain":
|
51 |
+
return parse_txt(file_stream)
|
52 |
+
else:
|
53 |
+
raise HTTPException(
|
54 |
+
status_code=415,
|
55 |
+
detail="Invalid file type. Only .docx, .pdf, and .txt are allowed."
|
56 |
+
)
|
57 |
+
|
58 |
+
# Classify text from uploaded file
|
59 |
+
async def handle_file_upload(file: UploadFile):
|
60 |
+
try:
|
61 |
+
file_contents = await extract_file_contents(file)
|
62 |
+
if len(file_contents) > 10000:
|
63 |
+
return {"message": "File contains more than 10,000 characters."}
|
64 |
+
|
65 |
+
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
66 |
+
if not cleaned_text:
|
67 |
+
raise HTTPException(status_code=404, detail="The file is empty or only contains whitespace.")
|
68 |
+
|
69 |
+
label, perplexity, ai_likelihood = await asyncio.to_thread(classify_text, cleaned_text)
|
70 |
+
return {
|
71 |
+
"content": file_contents,
|
72 |
+
"result": label,
|
73 |
+
"perplexity": round(perplexity, 2),
|
74 |
+
"ai_likelihood": ai_likelihood
|
75 |
+
}
|
76 |
+
except Exception as e:
|
77 |
+
logging.error(f"Error processing file: {e}")
|
78 |
+
raise HTTPException(status_code=500, detail="Error processing the file")
|
79 |
+
|
80 |
+
# Analyze each sentence in plain text input
|
81 |
+
async def handle_sentence_level_analysis(text: str):
|
82 |
+
text = text.strip()
|
83 |
+
if text[-1] != ".":
|
84 |
+
text+="."
|
85 |
+
if len(text) > 10000:
|
86 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
87 |
+
|
88 |
+
sentences = sent_tokenize(text, language="english")
|
89 |
+
results = []
|
90 |
+
for sentence in sentences:
|
91 |
+
if not sentence.strip():
|
92 |
+
continue
|
93 |
+
label, perplexity, ai_likelihood = await asyncio.to_thread(classify_text, sentence)
|
94 |
+
results.append({
|
95 |
+
"sentence": sentence,
|
96 |
+
"label": label,
|
97 |
+
"perplexity": round(perplexity, 2),
|
98 |
+
"ai_likelihood": ai_likelihood
|
99 |
+
})
|
100 |
+
return {"analysis": results}
|
101 |
+
|
102 |
+
# Analyze each sentence from uploaded file
|
103 |
+
async def handle_file_sentence(file: UploadFile):
|
104 |
+
try:
|
105 |
+
file_contents = await extract_file_contents(file)
|
106 |
+
if len(file_contents) > 10000:
|
107 |
+
return {"message": "File contains more than 10,000 characters."}
|
108 |
+
|
109 |
+
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
110 |
+
if not cleaned_text:
|
111 |
+
raise HTTPException(status_code=404, detail="The file is empty or only contains whitespace.")
|
112 |
+
|
113 |
+
result = await handle_sentence_level_analysis(cleaned_text)
|
114 |
+
return {
|
115 |
+
"content": file_contents,
|
116 |
+
**result
|
117 |
+
}
|
118 |
+
except Exception as e:
|
119 |
+
logging.error(f"Error processing file: {e}")
|
120 |
+
raise HTTPException(status_code=500, detail="Error processing the file")
|
121 |
+
|
122 |
+
# Optional synchronous helper function
|
123 |
+
def classify(text: str):
|
124 |
+
return classify_text(text)
|
125 |
+
|
features/text_classifier/inferencer.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from .model_loader import get_model_tokenizer
|
3 |
+
|
4 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
5 |
+
|
6 |
+
def perplexity_to_ai_likelihood(ppl: float) -> float:
|
7 |
+
# You can tune these parameters
|
8 |
+
min_ppl = 10 # very confident it's AI
|
9 |
+
max_ppl = 100 # very confident it's human
|
10 |
+
|
11 |
+
# Clamp to bounds
|
12 |
+
ppl = max(min_ppl, min(ppl, max_ppl))
|
13 |
+
|
14 |
+
# Invert and scale: lower perplexity -> higher AI-likelihood
|
15 |
+
likelihood = 1 - ((ppl - min_ppl) / (max_ppl - min_ppl))
|
16 |
+
|
17 |
+
return round(likelihood * 100, 2)
|
18 |
+
|
19 |
+
|
20 |
+
def classify_text(text: str):
|
21 |
+
model, tokenizer = get_model_tokenizer()
|
22 |
+
inputs = tokenizer(text, return_tensors="pt",
|
23 |
+
truncation=True, padding=True)
|
24 |
+
input_ids = inputs["input_ids"].to(device)
|
25 |
+
attention_mask = inputs["attention_mask"].to(device)
|
26 |
+
|
27 |
+
with torch.no_grad():
|
28 |
+
outputs = model(
|
29 |
+
input_ids, attention_mask=attention_mask, labels=input_ids)
|
30 |
+
loss = outputs.loss
|
31 |
+
perplexity = torch.exp(loss).item()
|
32 |
+
|
33 |
+
if perplexity < 55:
|
34 |
+
result = "AI-generated"
|
35 |
+
elif perplexity < 80:
|
36 |
+
result = "Probably AI-generated"
|
37 |
+
else:
|
38 |
+
result = "Human-written"
|
39 |
+
likelihood_result=perplexity_to_ai_likelihood(perplexity)
|
40 |
+
return result, perplexity,likelihood_result
|
features/text_classifier/model_loader.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import logging
|
4 |
+
from transformers import GPT2LMHeadModel, GPT2TokenizerFast, GPT2Config
|
5 |
+
from huggingface_hub import snapshot_download
|
6 |
+
import torch
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
import nltk
|
9 |
+
load_dotenv()
|
10 |
+
REPO_ID = "Pujan-Dev/AI-Text-Detector"
|
11 |
+
MODEL_DIR = "./models"
|
12 |
+
TOKENIZER_DIR = os.path.join(MODEL_DIR, "model")
|
13 |
+
WEIGHTS_PATH = os.path.join(MODEL_DIR, "model_weights.pth")
|
14 |
+
|
15 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
+
_model, _tokenizer = None, None
|
17 |
+
|
18 |
+
|
19 |
+
def warmup():
|
20 |
+
global _model, _tokenizer
|
21 |
+
# Ensure punkt is available
|
22 |
+
nltk.download("punkt")
|
23 |
+
|
24 |
+
nltk.download('punkt_tab')
|
25 |
+
|
26 |
+
download_model_repo()
|
27 |
+
_model, _tokenizer = load_model()
|
28 |
+
logging.info("Its ready")
|
29 |
+
|
30 |
+
|
31 |
+
def download_model_repo():
|
32 |
+
if os.path.exists(MODEL_DIR) and os.path.isdir(MODEL_DIR):
|
33 |
+
logging.info("Model already exists, skipping download.")
|
34 |
+
return
|
35 |
+
snapshot_path = snapshot_download(repo_id=REPO_ID)
|
36 |
+
os.makedirs(MODEL_DIR, exist_ok=True)
|
37 |
+
shutil.copytree(snapshot_path, MODEL_DIR, dirs_exist_ok=True)
|
38 |
+
|
39 |
+
|
40 |
+
def load_model():
|
41 |
+
tokenizer = GPT2TokenizerFast.from_pretrained(TOKENIZER_DIR)
|
42 |
+
config = GPT2Config.from_pretrained(TOKENIZER_DIR)
|
43 |
+
model = GPT2LMHeadModel(config)
|
44 |
+
model.load_state_dict(torch.load(WEIGHTS_PATH, map_location=device))
|
45 |
+
model.to(device)
|
46 |
+
model.eval()
|
47 |
+
return model, tokenizer
|
48 |
+
|
49 |
+
|
50 |
+
def get_model_tokenizer():
|
51 |
+
global _model, _tokenizer
|
52 |
+
if _model is None or _tokenizer is None:
|
53 |
+
download_model_repo()
|
54 |
+
_model, _tokenizer = load_model()
|
55 |
+
return _model, _tokenizer
|
features/text_classifier/preprocess.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fitz # PyMuPDF
|
2 |
+
import docx
|
3 |
+
from io import BytesIO
|
4 |
+
import logging
|
5 |
+
from fastapi import HTTPException
|
6 |
+
|
7 |
+
|
8 |
+
def parse_docx(file: BytesIO):
|
9 |
+
doc = docx.Document(file)
|
10 |
+
text = ""
|
11 |
+
for para in doc.paragraphs:
|
12 |
+
text += para.text + "\n"
|
13 |
+
return text
|
14 |
+
|
15 |
+
|
16 |
+
def parse_pdf(file: BytesIO):
|
17 |
+
try:
|
18 |
+
doc = fitz.open(stream=file, filetype="pdf")
|
19 |
+
text = ""
|
20 |
+
for page_num in range(doc.page_count):
|
21 |
+
page = doc.load_page(page_num)
|
22 |
+
text += page.get_text()
|
23 |
+
return text
|
24 |
+
except Exception as e:
|
25 |
+
logging.error(f"Error while processing PDF: {str(e)}")
|
26 |
+
raise HTTPException(
|
27 |
+
status_code=500, detail="Error processing PDF file")
|
28 |
+
|
29 |
+
|
30 |
+
def parse_txt(file: BytesIO):
|
31 |
+
return file.read().decode("utf-8")
|
32 |
+
|
features/text_classifier/routes.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Request
|
2 |
+
from fastapi.security import HTTPBearer
|
3 |
+
from pydantic import BaseModel
|
4 |
+
from slowapi.util import get_remote_address
|
5 |
+
from slowapi import Limiter
|
6 |
+
from slowapi.errors import RateLimitExceeded
|
7 |
+
from config import ACCESS_RATE
|
8 |
+
from .controller import (
|
9 |
+
handle_text_analysis,
|
10 |
+
handle_file_upload,
|
11 |
+
handle_sentence_level_analysis,
|
12 |
+
handle_file_sentence,
|
13 |
+
verify_token
|
14 |
+
)
|
15 |
+
|
16 |
+
limiter = Limiter(key_func=get_remote_address)
|
17 |
+
router = APIRouter()
|
18 |
+
security = HTTPBearer()
|
19 |
+
|
20 |
+
class TextInput(BaseModel):
|
21 |
+
text: str
|
22 |
+
|
23 |
+
@router.post("/analyse")
|
24 |
+
@limiter.limit(ACCESS_RATE)
|
25 |
+
async def analyze(request: Request, data: TextInput, token: str = Depends(verify_token)):
|
26 |
+
return await handle_text_analysis(data.text)
|
27 |
+
|
28 |
+
@router.post("/upload")
|
29 |
+
@limiter.limit(ACCESS_RATE)
|
30 |
+
async def upload_file(request: Request, file: UploadFile = File(...), token: str = Depends(verify_token)):
|
31 |
+
return await handle_file_upload(file)
|
32 |
+
|
33 |
+
@router.post("/analyse-sentences")
|
34 |
+
@limiter.limit(ACCESS_RATE)
|
35 |
+
async def analyze_sentences(request: Request, data: TextInput, token: str = Depends(verify_token)):
|
36 |
+
if not data.text:
|
37 |
+
raise HTTPException(status_code=400, detail="Missing 'text' in request body")
|
38 |
+
return await handle_sentence_level_analysis(data.text)
|
39 |
+
|
40 |
+
@router.post("/analyse-sentance-file")
|
41 |
+
@limiter.limit(ACCESS_RATE)
|
42 |
+
async def analyze_sentance_file(request: Request, file: UploadFile = File(...), token: str = Depends(verify_token)):
|
43 |
+
return await handle_file_sentence(file)
|
44 |
+
|
45 |
+
@router.get("/health")
|
46 |
+
@limiter.limit(ACCESS_RATE)
|
47 |
+
def health(request: Request):
|
48 |
+
return {"status": "ok"}
|
49 |
+
|
readme.md
ADDED
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### **FastAPI AI**
|
2 |
+
|
3 |
+
This FastAPI app loads a GPT-2 model, tokenizes input text, classifies it, and returns whether the text is AI-generated or human-written.
|
4 |
+
|
5 |
+
### **install Dependencies**
|
6 |
+
|
7 |
+
```bash
|
8 |
+
pip install -r requirements.txt
|
9 |
+
|
10 |
+
```
|
11 |
+
|
12 |
+
This command installs all the dependencies listed in the `requirements.txt` file. It ensures that your environment has the required packages to run the project smoothly.
|
13 |
+
|
14 |
+
**NOTE: IF YOU HAVE DONE ANY CHANGES DON'NT FORGOT TO PUT IT IN THE REQUIREMENTS.TXT USING `bash pip freeze > requirements.txt `**
|
15 |
+
|
16 |
+
---
|
17 |
+
### Files STructure
|
18 |
+
|
19 |
+
```
|
20 |
+
βββ app.py
|
21 |
+
βββ features
|
22 |
+
βΒ Β βββ text_classifier
|
23 |
+
βΒ Β βββ controller.py
|
24 |
+
βΒ Β βββ inferencer.py
|
25 |
+
βΒ Β βββ __init__.py
|
26 |
+
βΒ Β βββ model_loader.py
|
27 |
+
βΒ Β βββ preprocess.py
|
28 |
+
βΒ Β βββ routes.py
|
29 |
+
βββ __init__.py
|
30 |
+
βββ Procfile
|
31 |
+
βββ readme.md
|
32 |
+
βββ requirements.txt
|
33 |
+
```
|
34 |
+
**`app.py`**: Entry point initializing FastAPI app and routes
|
35 |
+
**`Procfile`**: Tells Railway how to run the program
|
36 |
+
**`requirements.txt`**:Have all the packages that we use in our project
|
37 |
+
**`__init__.py`** : Package initializer for the root module
|
38 |
+
**FOLDER :features/text_classifier**
|
39 |
+
**`controller.py`** :Handles logic between routes and model
|
40 |
+
**`inferencer.py`** : Runs inference and returns predictions as well as files system
|
41 |
+
**`__init__.py`** :Initializes the module as a package
|
42 |
+
**`model_loader.py`** : Loads the ML model and tokenizer
|
43 |
+
**`preprocess.py`** :Prepares input text for the model
|
44 |
+
**`routes.py`** :Defines API routes for text classification
|
45 |
+
|
46 |
+
### **Functions**
|
47 |
+
|
48 |
+
1. **`load_model()`**
|
49 |
+
Loads the GPT-2 model and tokenizer from the specified directory paths.
|
50 |
+
|
51 |
+
2. **`lifespan()`**
|
52 |
+
Manages the application lifecycle. It initializes the model at startup and performs cleanup during shutdown.
|
53 |
+
|
54 |
+
3. **`classify_text_sync()`**
|
55 |
+
Synchronously tokenizes the input text and performs classification using the GPT-2 model. Returns both the classification result and perplexity score.
|
56 |
+
|
57 |
+
4. **`classify_text()`**
|
58 |
+
Asynchronously runs `classify_text_sync()` in a thread pool for non-blocking text classification.
|
59 |
+
|
60 |
+
5. **`analyze_text()`**
|
61 |
+
**POST** endpoint: Accepts text input, classifies it using `classify_text()`, and returns the result along with perplexity.
|
62 |
+
|
63 |
+
6. **`health()`**
|
64 |
+
**GET** endpoint: Performs a simple health check to confirm the API is operational.
|
65 |
+
|
66 |
+
7. **`parse_docx()`, `parse_pdf()`, `parse_txt()`**
|
67 |
+
Utility functions to extract and convert the contents of `.docx`, `.pdf`, and `.txt` files into plain text for classification.
|
68 |
+
|
69 |
+
8. **`warmup()`**
|
70 |
+
Downloads the model repository and initializes the model and tokenizer using the `load_model()` function.
|
71 |
+
|
72 |
+
9. **`download_model_repo()`**
|
73 |
+
Handles downloading the model files from the designated `MODEL` folder.
|
74 |
+
|
75 |
+
10. **`get_model_tokenizer()`**
|
76 |
+
Similar to `warmup()`, but includes a check to see if the model already exists. If not, it downloads the model; otherwise, it uses the previously downloaded one.
|
77 |
+
|
78 |
+
11. **`handle_file_upload()`**
|
79 |
+
Manages file uploads from the `/upload` route. Extracts text from the uploaded file, classifies it, and returns the results.
|
80 |
+
|
81 |
+
12. **`extract_file_contents()`**
|
82 |
+
Extracts and returns plain text content from uploaded files (e.g., PDF, DOCX, TXT).
|
83 |
+
|
84 |
+
13. **`handle_file_sentence()`**
|
85 |
+
Processes uploaded files by analyzing each sentence. Ensures the total file text is under 10,000 characters before classification.
|
86 |
+
|
87 |
+
14. **`handle_sentence_level_analysis()`**
|
88 |
+
Strips and checks each sentenceβs length, then evaluates the likelihood of AI vs. human generation for each sentence.
|
89 |
+
|
90 |
+
15. **`analyze_sentences()`**
|
91 |
+
Divides long paragraphs into individual sentences, classifies each one, and returns a list of their classification results.
|
92 |
+
|
93 |
+
16. **`analyze_sentence_file()`**
|
94 |
+
A route function that analyzes sentences in uploaded files, similar to `handle_file_sentence()`.
|
95 |
+
|
96 |
+
|
97 |
+
---
|
98 |
+
|
99 |
+
### **Code Overview**
|
100 |
+
|
101 |
+
### **Running and Load Balancing:**
|
102 |
+
|
103 |
+
To run the app in production with load balancing:
|
104 |
+
|
105 |
+
```bash
|
106 |
+
uvicorn app:app --host 0.0.0.0 --port 8000
|
107 |
+
```
|
108 |
+
|
109 |
+
This command launches the FastAPI app.
|
110 |
+
|
111 |
+
|
112 |
+
### **Endpoints**
|
113 |
+
|
114 |
+
#### 1. **`/text/analyze`**
|
115 |
+
|
116 |
+
- **Method:** `POST`
|
117 |
+
- **Description:** Classifies whether the text is AI-generated or human-written.
|
118 |
+
- **Request:**
|
119 |
+
```json
|
120 |
+
{ "text": "sample text" }
|
121 |
+
```
|
122 |
+
- **Response:**
|
123 |
+
```json
|
124 |
+
{ "result": "AI-generated", "perplexity": 55.67,"ai_likelihood":66.6%}
|
125 |
+
```
|
126 |
+
|
127 |
+
#### 2. **`/health`**
|
128 |
+
|
129 |
+
- **Method:** `GET`
|
130 |
+
- **Description:** Returns the status of the API.
|
131 |
+
- **Response:**
|
132 |
+
```json
|
133 |
+
{ "status": "ok" }
|
134 |
+
```
|
135 |
+
#### 3. **`/text/upload`**
|
136 |
+
- **Method:** `POST`
|
137 |
+
- **Description:** Takes the files and check the contains inside and returns the results
|
138 |
+
- **Request:** Files
|
139 |
+
|
140 |
+
- **Response:**
|
141 |
+
```json
|
142 |
+
{ "result": "AI-generated", "perplexity": 55.67,"ai_likelihood":66.6%}
|
143 |
+
```
|
144 |
+
#### 4. **`/text/analyze_sentence_file`**
|
145 |
+
- **Method:** `POST`
|
146 |
+
- **Description:** Takes the files and check the contains inside and returns the results
|
147 |
+
- **Request:** Files
|
148 |
+
|
149 |
+
- **Response:**
|
150 |
+
```json
|
151 |
+
{
|
152 |
+
"content": "Artificial Intelligence (AI) and Machine Learning (ML) are rapidly transforming the way we \ninteract with technology. AI refers to the broader concept of machines being able to carry out \ntasks in a way that we would consider \"smart,\" while ML is a subset of AI that focuses on the \ndevelopment of algorithms that allow computers to learn from and make decisions based on \ndata. These technologies are behind innovations such as voice assistants, recommendation \nsystems, self-driving cars, and medical diagnosis tools. By analyzing large amounts of data, \nAI and ML can identify patterns, make predictions, and continuously improve their \nperformance over time, making them essential tools in modern industries ranging from \nhealthcare and finance to education and entertainment. \n \n",
|
153 |
+
"analysis": [
|
154 |
+
{
|
155 |
+
"sentence": "Artificial Intelligence (AI) and Machine Learning (ML) are rapidly transforming the way we interact with technology.",
|
156 |
+
"label": "AI-generated",
|
157 |
+
"perplexity": 8.17,
|
158 |
+
"ai_likelihood": 100
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"sentence": "AI refers to the broader concept of machines being able to carry out tasks in a way that we would consider \"smart,\" while ML is a subset of AI that focuses on the development of algorithms that allow computers to learn from and make decisions based on data.",
|
162 |
+
"label": "AI-generated",
|
163 |
+
"perplexity": 19.34,
|
164 |
+
"ai_likelihood": 89.62
|
165 |
+
},
|
166 |
+
{
|
167 |
+
"sentence": "These technologies are behind innovations such as voice assistants, recommendation systems, self-driving cars, and medical diagnosis tools.",
|
168 |
+
"label": "AI-generated",
|
169 |
+
"perplexity": 40.31,
|
170 |
+
"ai_likelihood": 66.32
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"sentence": "By analyzing large amounts of data, AI and ML can identify patterns, make predictions, and continuously improve their performance over time, making them essential tools in modern industries ranging from healthcare and finance to education and entertainment.",
|
174 |
+
"label": "AI-generated",
|
175 |
+
"perplexity": 26.15,
|
176 |
+
"ai_likelihood": 82.05
|
177 |
+
}
|
178 |
+
]
|
179 |
+
}```
|
180 |
+
|
181 |
+
#### 5. **`/text/analyze_sentences`**
|
182 |
+
- **Method:** `POST`
|
183 |
+
- **Description:** Takes the text and check the contains inside and returns the results
|
184 |
+
- **Request:**
|
185 |
+
```json
|
186 |
+
{
|
187 |
+
"text": "This is an test text. This is an another Text "
|
188 |
+
}
|
189 |
+
```
|
190 |
+
|
191 |
+
- **Response:**
|
192 |
+
```json
|
193 |
+
{
|
194 |
+
"analysis": [
|
195 |
+
{
|
196 |
+
"sentence": "This is an test text.",
|
197 |
+
"label": "Human-written",
|
198 |
+
"perplexity": 510.28,
|
199 |
+
"ai_likelihood": 0
|
200 |
+
},
|
201 |
+
{
|
202 |
+
"sentence": "This is an another Text",
|
203 |
+
"label": "Human-written",
|
204 |
+
"perplexity": 3926.05,
|
205 |
+
"ai_likelihood": 0
|
206 |
+
}
|
207 |
+
]
|
208 |
+
}```
|
209 |
+
|
210 |
+
|
211 |
+
---
|
212 |
+
|
213 |
+
### **Running the API**
|
214 |
+
|
215 |
+
Start the server with:
|
216 |
+
|
217 |
+
```bash
|
218 |
+
uvicorn app:app --host 0.0.0.0 --port 8000
|
219 |
+
```
|
220 |
+
|
221 |
+
---
|
222 |
+
|
223 |
+
### **π§ͺ Testing the API**
|
224 |
+
|
225 |
+
You can test the FastAPI endpoint using `curl` like this:
|
226 |
+
|
227 |
+
```bash
|
228 |
+
curl -X POST https://can-org-canspace.hf.space/analyze \
|
229 |
+
-H "Authorization: Bearer SECRET_CODE" \
|
230 |
+
-H "Content-Type: application/json" \
|
231 |
+
-d '{"text": "This is a sample sentence for analysis."}'
|
232 |
+
```
|
233 |
+
|
234 |
+
- The `-H "Authorization: Bearer SECRET_CODE"` part is used to simulate the **handshake**.
|
235 |
+
- FastAPI checks this token against the one loaded from the `.env` file.
|
236 |
+
- If the token matches, the request is accepted and processed.
|
237 |
+
- Otherwise, it responds with a `403 Unauthorized` error.
|
238 |
+
|
239 |
+
---
|
240 |
+
|
241 |
+
### **API Documentation**
|
242 |
+
|
243 |
+
- **Swagger UI:** `https://can-org-canspace.hf.space/docs` -> `/docs`
|
244 |
+
- **ReDoc:** `https://can-org-canspace.hf.space/redoc` -> `/redoc`
|
245 |
+
|
246 |
+
### **π Handshake Mechanism**
|
247 |
+
|
248 |
+
In this part, we're implementing a simple handshake to verify that the request is coming from a trusted source (e.g., our NestJS server). Here's how it works:
|
249 |
+
|
250 |
+
- We load a secret token from the `.env` file.
|
251 |
+
- When a request is made to the FastAPI server, we extract the `Authorization` header and compare it with our expected secret token.
|
252 |
+
- If the token does **not** match, we immediately return a **403 Forbidden** response with the message `"Unauthorized"`.
|
253 |
+
- If the token **does** match, we allow the request to proceed to the next step.
|
254 |
+
|
255 |
+
The verification function looks like this:
|
256 |
+
|
257 |
+
```python
|
258 |
+
def verify_token(auth: str):
|
259 |
+
if auth != f"Bearer {EXPECTED_TOKEN}":
|
260 |
+
raise HTTPException(status_code=403, detail="Unauthorized")
|
261 |
+
```
|
262 |
+
|
263 |
+
This provides a basic but effective layer of security to prevent unauthorized access to the API.
|
264 |
+
|
265 |
+
### **Implement it with NEST.js**
|
266 |
+
|
267 |
+
NOTE: Make an micro service in NEST.JS and implement it there and call it from app.controller.ts
|
268 |
+
|
269 |
+
in fastapi.service.ts file what we have done is
|
270 |
+
|
271 |
+
### Project Structure
|
272 |
+
|
273 |
+
```files
|
274 |
+
nestjs-fastapi-bridge/
|
275 |
+
βββ src/
|
276 |
+
β βββ app.controller.ts
|
277 |
+
β βββ app.module.ts
|
278 |
+
β βββ fastapi.service.ts
|
279 |
+
βββ .env
|
280 |
+
|
281 |
+
```
|
282 |
+
|
283 |
+
---
|
284 |
+
|
285 |
+
### Step-by-Step Setup
|
286 |
+
|
287 |
+
#### 1. `.env`
|
288 |
+
|
289 |
+
Create a `.env` file at the root with the following:
|
290 |
+
|
291 |
+
```environment
|
292 |
+
FASTAPI_BASE_URL=https://can-org-canspace.hf.space/
|
293 |
+
SECRET_TOKEN="SECRET_CODE_TOKEN"
|
294 |
+
```
|
295 |
+
|
296 |
+
#### 2. `fastapi.service.ts`
|
297 |
+
|
298 |
+
```javascript
|
299 |
+
// src/fastapi.service.ts
|
300 |
+
import { Injectable } from "@nestjs/common";
|
301 |
+
import { HttpService } from "@nestjs/axios";
|
302 |
+
import { ConfigService } from "@nestjs/config";
|
303 |
+
import { firstValueFrom } from "rxjs";
|
304 |
+
|
305 |
+
@Injectable()
|
306 |
+
export class FastAPIService {
|
307 |
+
constructor(
|
308 |
+
private http: HttpService,
|
309 |
+
private config: ConfigService,
|
310 |
+
) {}
|
311 |
+
|
312 |
+
async analyzeText(text: string) {
|
313 |
+
const url = `${this.config.get("FASTAPI_BASE_URL")}/analyze`;
|
314 |
+
const token = this.config.get("SECRET_TOKEN");
|
315 |
+
|
316 |
+
const response = await firstValueFrom(
|
317 |
+
this.http.post(
|
318 |
+
url,
|
319 |
+
{ text },
|
320 |
+
{
|
321 |
+
headers: {
|
322 |
+
Authorization: `Bearer ${token}`,
|
323 |
+
},
|
324 |
+
},
|
325 |
+
),
|
326 |
+
);
|
327 |
+
|
328 |
+
return response.data;
|
329 |
+
}
|
330 |
+
}
|
331 |
+
```
|
332 |
+
|
333 |
+
#### 3. `app.module.ts`
|
334 |
+
|
335 |
+
```javascript
|
336 |
+
// src/app.module.ts
|
337 |
+
import { Module } from "@nestjs/common";
|
338 |
+
import { ConfigModule } from "@nestjs/config";
|
339 |
+
import { HttpModule } from "@nestjs/axios";
|
340 |
+
import { AppController } from "./app.controller";
|
341 |
+
import { FastAPIService } from "./fastapi.service";
|
342 |
+
|
343 |
+
@Module({
|
344 |
+
imports: [ConfigModule.forRoot(), HttpModule],
|
345 |
+
controllers: [AppController],
|
346 |
+
providers: [FastAPIService],
|
347 |
+
})
|
348 |
+
export class AppModule {}
|
349 |
+
```
|
350 |
+
|
351 |
+
---
|
352 |
+
|
353 |
+
#### 4. `app.controller.ts`
|
354 |
+
|
355 |
+
```javascript
|
356 |
+
// src/app.controller.ts
|
357 |
+
import { Body, Controller, Post, Get, Query } from '@nestjs/common';
|
358 |
+
import { FastAPIService } from './fastapi.service';
|
359 |
+
|
360 |
+
@Controller()
|
361 |
+
export class AppController {
|
362 |
+
constructor(private readonly fastapiService: FastAPIService) {}
|
363 |
+
|
364 |
+
@Post('analyze-text')
|
365 |
+
async callFastAPI(@Body('text') text: string) {
|
366 |
+
return this.fastapiService.analyzeText(text);
|
367 |
+
}
|
368 |
+
|
369 |
+
@Get()
|
370 |
+
getHello(): string {
|
371 |
+
return 'NestJS is connected to FastAPI ';
|
372 |
+
}
|
373 |
+
}
|
374 |
+
```
|
375 |
+
|
376 |
+
### π How to Run
|
377 |
+
|
378 |
+
Run the server of flask and nest.js:
|
379 |
+
|
380 |
+
- for nest.js
|
381 |
+
```bash
|
382 |
+
npm run start
|
383 |
+
```
|
384 |
+
- for Fastapi
|
385 |
+
|
386 |
+
```bash
|
387 |
+
uvicorn app:app --reload
|
388 |
+
```
|
389 |
+
|
390 |
+
Make sure your FastAPI service is running at `http://localhost:8000`.
|
391 |
+
|
392 |
+
### Test with CURL
|
393 |
+
http://localhost:3000/-> Server of nest.js
|
394 |
+
```bash
|
395 |
+
curl -X POST http://localhost:3000/analyze-text \
|
396 |
+
-H 'Content-Type: application/json' \
|
397 |
+
-d '{"text": "This is a test input"}'
|
398 |
+
```
|
399 |
+
|
400 |
+
|
401 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
torch
|
4 |
+
transformers
|
5 |
+
huggingface_hub
|
6 |
+
python-dotenv
|
7 |
+
python-docx
|
8 |
+
pydantic
|
9 |
+
PyMuPDF
|
10 |
+
nltk
|
11 |
+
python-multipart
|
12 |
+
slowapi
|