Spaces:
Running
Running
feat:added basic
Browse files- README.md +2 -1
- app.py +2 -1
- __init__.py → features/nepali_text_classifier/__init__.py +0 -0
- features/nepali_text_classifier/controller.py +36 -0
- features/nepali_text_classifier/inferencer.py +21 -0
- features/nepali_text_classifier/model_loader.py +54 -0
- features/nepali_text_classifier/preprocess.py +32 -0
- features/nepali_text_classifier/routes.py +29 -0
- features/text_classifier/controller.py +2 -4
- features/text_classifier/model_loader.py +1 -1
- np_text_model/.gitattributes +36 -0
- np_text_model/classifier/sentencepiece.bpe.model +3 -0
- np_text_model/classifier/special_tokens_map.json +15 -0
- np_text_model/classifier/tokenizer.json +3 -0
- np_text_model/classifier/tokenizer_config.json +55 -0
README.md
CHANGED
@@ -5,4 +5,5 @@ colorFrom: yellow
|
|
5 |
colorTo: blue
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
-
---
|
|
|
|
5 |
colorTo: blue
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
+
---
|
9 |
+
|
app.py
CHANGED
@@ -5,6 +5,7 @@ from slowapi.errors import RateLimitExceeded
|
|
5 |
from slowapi.util import get_remote_address
|
6 |
from fastapi.responses import JSONResponse
|
7 |
from features.text_classifier.routes import router as text_classifier_router
|
|
|
8 |
from config import ACCESS_RATE
|
9 |
import requests
|
10 |
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
@@ -25,7 +26,7 @@ app.add_middleware(SlowAPIMiddleware)
|
|
25 |
|
26 |
# Include your routes
|
27 |
app.include_router(text_classifier_router, prefix="/text")
|
28 |
-
|
29 |
@app.get("/")
|
30 |
@limiter.limit(ACCESS_RATE)
|
31 |
async def root(request: Request):
|
|
|
5 |
from slowapi.util import get_remote_address
|
6 |
from fastapi.responses import JSONResponse
|
7 |
from features.text_classifier.routes import router as text_classifier_router
|
8 |
+
from features.nepali_text_classifier.routes import router as nepali_text_classifier_router
|
9 |
from config import ACCESS_RATE
|
10 |
import requests
|
11 |
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
|
|
26 |
|
27 |
# Include your routes
|
28 |
app.include_router(text_classifier_router, prefix="/text")
|
29 |
+
app.include_router(nepali_text_classifier_router,prefix="/NP")
|
30 |
@app.get("/")
|
31 |
@limiter.limit(ACCESS_RATE)
|
32 |
async def root(request: Request):
|
__init__.py → features/nepali_text_classifier/__init__.py
RENAMED
File without changes
|
features/nepali_text_classifier/controller.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
from fastapi import HTTPException, status, Depends
|
3 |
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
4 |
+
import os
|
5 |
+
|
6 |
+
from features.nepali_text_classifier.inferencer import classify_text
|
7 |
+
|
8 |
+
security = HTTPBearer()
|
9 |
+
|
10 |
+
async def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
11 |
+
token = credentials.credentials
|
12 |
+
expected_token = os.getenv("MY_SECRET_TOKEN")
|
13 |
+
if token != expected_token:
|
14 |
+
raise HTTPException(
|
15 |
+
status_code=status.HTTP_403_FORBIDDEN,
|
16 |
+
detail="Invalid or expired token"
|
17 |
+
)
|
18 |
+
return token
|
19 |
+
|
20 |
+
async def nepali_text_analysis(text: str):
|
21 |
+
# Fix: split once and reuse
|
22 |
+
words = text.split()
|
23 |
+
if len(words) < 10:
|
24 |
+
raise HTTPException(status_code=400, detail="Text must contain at least 10 words")
|
25 |
+
if len(text) > 10000:
|
26 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
27 |
+
|
28 |
+
label, confidence = await asyncio.to_thread(classify_text, text)
|
29 |
+
return {
|
30 |
+
"result": label,
|
31 |
+
"ai_likelihood": confidence
|
32 |
+
}
|
33 |
+
|
34 |
+
def classify(text: str):
|
35 |
+
return classify_text(text)
|
36 |
+
|
features/nepali_text_classifier/inferencer.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from .model_loader import get_model_tokenizer
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
6 |
+
|
7 |
+
|
8 |
+
def classify_text(text: str):
|
9 |
+
model, tokenizer = get_model_tokenizer()
|
10 |
+
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=512)
|
11 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
12 |
+
|
13 |
+
with torch.no_grad():
|
14 |
+
outputs = model(**inputs)
|
15 |
+
logits = outputs if isinstance(outputs, torch.Tensor) else outputs.logits
|
16 |
+
probs = F.softmax(logits, dim=1)
|
17 |
+
pred = torch.argmax(probs, dim=1).item()
|
18 |
+
prob_percent = probs[0][pred].item() * 100
|
19 |
+
|
20 |
+
return {"label": "Human" if pred == 0 else "AI", "confidence": round(prob_percent, 2)}
|
21 |
+
|
features/nepali_text_classifier/model_loader.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
import logging
|
7 |
+
from huggingface_hub import snapshot_download
|
8 |
+
from transformers import AutoTokenizer, AutoModel
|
9 |
+
|
10 |
+
# Configs
|
11 |
+
REPO_ID = "Pujan-Dev/Nepali-AI-VS-HUMAN"
|
12 |
+
BASE_DIR = "./np_text_model"
|
13 |
+
TOKENIZER_DIR = os.path.join(BASE_DIR, "classifier") # <- update this to match your uploaded folder
|
14 |
+
WEIGHTS_PATH = os.path.join(BASE_DIR, "model_95_acc.pth") # <- change to match actual uploaded weight
|
15 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
+
|
17 |
+
# Define model class
|
18 |
+
class XLMRClassifier(nn.Module):
|
19 |
+
def __init__(self):
|
20 |
+
super(XLMRClassifier, self).__init__()
|
21 |
+
self.bert = AutoModel.from_pretrained("xlm-roberta-base")
|
22 |
+
self.classifier = nn.Linear(self.bert.config.hidden_size, 2)
|
23 |
+
|
24 |
+
def forward(self, input_ids, attention_mask):
|
25 |
+
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
|
26 |
+
cls_output = outputs.last_hidden_state[:, 0, :]
|
27 |
+
return self.classifier(cls_output)
|
28 |
+
|
29 |
+
# Globals for caching
|
30 |
+
_model = None
|
31 |
+
_tokenizer = None
|
32 |
+
|
33 |
+
def download_model_repo():
|
34 |
+
if os.path.exists(BASE_DIR) and os.path.isdir(BASE_DIR):
|
35 |
+
logging.info("Model already downloaded.")
|
36 |
+
return
|
37 |
+
snapshot_path = snapshot_download(repo_id=REPO_ID)
|
38 |
+
os.makedirs(BASE_DIR, exist_ok=True)
|
39 |
+
shutil.copytree(snapshot_path, BASE_DIR, dirs_exist_ok=True)
|
40 |
+
|
41 |
+
def load_model():
|
42 |
+
download_model_repo()
|
43 |
+
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_DIR)
|
44 |
+
model = XLMRClassifier().to(device)
|
45 |
+
model.load_state_dict(torch.load(WEIGHTS_PATH, map_location=device))
|
46 |
+
model.eval()
|
47 |
+
return model, tokenizer
|
48 |
+
|
49 |
+
def get_model_tokenizer():
|
50 |
+
global _model, _tokenizer
|
51 |
+
if _model is None or _tokenizer is None:
|
52 |
+
_model, _tokenizer = load_model()
|
53 |
+
return _model, _tokenizer
|
54 |
+
|
features/nepali_text_classifier/preprocess.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fitz # PyMuPDF
|
2 |
+
import docx
|
3 |
+
from io import BytesIO
|
4 |
+
import logging
|
5 |
+
from fastapi import HTTPException
|
6 |
+
|
7 |
+
|
8 |
+
def parse_docx(file: BytesIO):
|
9 |
+
doc = docx.Document(file)
|
10 |
+
text = ""
|
11 |
+
for para in doc.paragraphs:
|
12 |
+
text += para.text + "\n"
|
13 |
+
return text
|
14 |
+
|
15 |
+
|
16 |
+
def parse_pdf(file: BytesIO):
|
17 |
+
try:
|
18 |
+
doc = fitz.open(stream=file, filetype="pdf")
|
19 |
+
text = ""
|
20 |
+
for page_num in range(doc.page_count):
|
21 |
+
page = doc.load_page(page_num)
|
22 |
+
text += page.get_text()
|
23 |
+
return text
|
24 |
+
except Exception as e:
|
25 |
+
logging.error(f"Error while processing PDF: {str(e)}")
|
26 |
+
raise HTTPException(
|
27 |
+
status_code=500, detail="Error processing PDF file")
|
28 |
+
|
29 |
+
|
30 |
+
def parse_txt(file: BytesIO):
|
31 |
+
return file.read().decode("utf-8")
|
32 |
+
|
features/nepali_text_classifier/routes.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from slowapi import Limiter
|
2 |
+
from config import ACCESS_RATE
|
3 |
+
from .controller import nepali_text_analysis
|
4 |
+
from .inferencer import classify_text
|
5 |
+
from fastapi import APIRouter, Request, Depends, HTTPException
|
6 |
+
from fastapi.security import HTTPBearer
|
7 |
+
from slowapi import Limiter
|
8 |
+
from slowapi.util import get_remote_address
|
9 |
+
from pydantic import BaseModel
|
10 |
+
router = APIRouter()
|
11 |
+
limiter = Limiter(key_func=get_remote_address)
|
12 |
+
security = HTTPBearer()
|
13 |
+
|
14 |
+
# Input schema
|
15 |
+
class TextInput(BaseModel):
|
16 |
+
text: str
|
17 |
+
|
18 |
+
@router.post("/analyse")
|
19 |
+
@limiter.limit(ACCESS_RATE)
|
20 |
+
async def analyse(request: Request, data: TextInput, token: str = Depends(security)):
|
21 |
+
# Token is available as `token.credentials`, add validation if needed
|
22 |
+
result = classify_text(data.text)
|
23 |
+
return result
|
24 |
+
|
25 |
+
@router.get("/health")
|
26 |
+
@limiter.limit(ACCESS_RATE)
|
27 |
+
def health(request: Request):
|
28 |
+
return {"status": "ok"}
|
29 |
+
|
features/text_classifier/controller.py
CHANGED
@@ -60,7 +60,7 @@ async def handle_file_upload(file: UploadFile):
|
|
60 |
try:
|
61 |
file_contents = await extract_file_contents(file)
|
62 |
if len(file_contents) > 10000:
|
63 |
-
|
64 |
|
65 |
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
66 |
if not cleaned_text:
|
@@ -87,7 +87,6 @@ async def handle_sentence_level_analysis(text: str):
|
|
87 |
if len(text) > 10000:
|
88 |
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
89 |
|
90 |
-
# Use SpaCy for sentence splitting
|
91 |
doc = nlp(text)
|
92 |
sentences = [sent.text.strip() for sent in doc.sents]
|
93 |
|
@@ -108,7 +107,7 @@ async def handle_file_sentence(file: UploadFile):
|
|
108 |
try:
|
109 |
file_contents = await extract_file_contents(file)
|
110 |
if len(file_contents) > 10000:
|
111 |
-
|
112 |
|
113 |
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
114 |
if not cleaned_text:
|
@@ -123,7 +122,6 @@ async def handle_file_sentence(file: UploadFile):
|
|
123 |
logging.error(f"Error processing file: {e}")
|
124 |
raise HTTPException(status_code=500, detail="Error processing the file")
|
125 |
|
126 |
-
# Optional synchronous helper function
|
127 |
def classify(text: str):
|
128 |
return classify_text(text)
|
129 |
|
|
|
60 |
try:
|
61 |
file_contents = await extract_file_contents(file)
|
62 |
if len(file_contents) > 10000:
|
63 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
64 |
|
65 |
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
66 |
if not cleaned_text:
|
|
|
87 |
if len(text) > 10000:
|
88 |
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
89 |
|
|
|
90 |
doc = nlp(text)
|
91 |
sentences = [sent.text.strip() for sent in doc.sents]
|
92 |
|
|
|
107 |
try:
|
108 |
file_contents = await extract_file_contents(file)
|
109 |
if len(file_contents) > 10000:
|
110 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
111 |
|
112 |
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
113 |
if not cleaned_text:
|
|
|
122 |
logging.error(f"Error processing file: {e}")
|
123 |
raise HTTPException(status_code=500, detail="Error processing the file")
|
124 |
|
|
|
125 |
def classify(text: str):
|
126 |
return classify_text(text)
|
127 |
|
features/text_classifier/model_loader.py
CHANGED
@@ -18,9 +18,9 @@ _model, _tokenizer = None, None
|
|
18 |
def warmup():
|
19 |
global _model, _tokenizer
|
20 |
# Ensure punkt is available
|
21 |
-
|
22 |
download_model_repo()
|
23 |
_model, _tokenizer = load_model()
|
|
|
24 |
|
25 |
|
26 |
def download_model_repo():
|
|
|
18 |
def warmup():
|
19 |
global _model, _tokenizer
|
20 |
# Ensure punkt is available
|
|
|
21 |
download_model_repo()
|
22 |
_model, _tokenizer = load_model()
|
23 |
+
logging.info("Its ready")
|
24 |
|
25 |
|
26 |
def download_model_repo():
|
np_text_model/.gitattributes
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
classifier/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
np_text_model/classifier/sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
|
3 |
+
size 5069051
|
np_text_model/classifier/special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
np_text_model/classifier/tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:883b037111086fd4dfebbbc9b7cee11e1517b5e0c0514879478661440f137085
|
3 |
+
size 17082987
|
np_text_model/classifier/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"250001": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": true,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": false,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"eos_token": "</s>",
|
48 |
+
"extra_special_tokens": {},
|
49 |
+
"mask_token": "<mask>",
|
50 |
+
"model_max_length": 512,
|
51 |
+
"pad_token": "<pad>",
|
52 |
+
"sep_token": "</s>",
|
53 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
54 |
+
"unk_token": "<unk>"
|
55 |
+
}
|