Spaces:
Running
Running
First basic sms filter for iOS app.
Browse files- .gitignore +71 -0
- Dockerfile +5 -1
- app.py +60 -3
- requirements.txt +3 -0
.gitignore
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
build/
|
11 |
+
dist/
|
12 |
+
*.egg-info/
|
13 |
+
.eggs/
|
14 |
+
|
15 |
+
# Installer logs
|
16 |
+
pip-log.txt
|
17 |
+
pip-delete-this-directory.txt
|
18 |
+
|
19 |
+
# Environments
|
20 |
+
.env
|
21 |
+
.venv
|
22 |
+
env/
|
23 |
+
venv/
|
24 |
+
ENV/
|
25 |
+
env.bak/
|
26 |
+
venv.bak/
|
27 |
+
|
28 |
+
# Jupyter Notebook checkpoints
|
29 |
+
.ipynb_checkpoints
|
30 |
+
|
31 |
+
# VSCode settings
|
32 |
+
.vscode/
|
33 |
+
|
34 |
+
# PyCharm
|
35 |
+
.idea/
|
36 |
+
|
37 |
+
# macOS and system files
|
38 |
+
.DS_Store
|
39 |
+
Thumbs.db
|
40 |
+
|
41 |
+
# Logs and temporary files
|
42 |
+
*.log
|
43 |
+
*.tmp
|
44 |
+
*.swp
|
45 |
+
|
46 |
+
# Hugging Face cache
|
47 |
+
~/.cache/huggingface/
|
48 |
+
huggingface_cache/
|
49 |
+
cached_models/
|
50 |
+
|
51 |
+
# Datasets or model outputs
|
52 |
+
data/
|
53 |
+
output/
|
54 |
+
results/
|
55 |
+
*.ckpt
|
56 |
+
*.pth
|
57 |
+
|
58 |
+
# Hugging Face Spaces
|
59 |
+
# (if using Gradio, Streamlit, etc.)
|
60 |
+
app/__pycache__/
|
61 |
+
app/*.pyc
|
62 |
+
*.gradio/
|
63 |
+
|
64 |
+
# Optional: ignore weights or tokenizer files (use with care)
|
65 |
+
pytorch_model.bin
|
66 |
+
tf_model.h5
|
67 |
+
tokenizer_config.json
|
68 |
+
special_tokens_map.json
|
69 |
+
merges.txt
|
70 |
+
vocab.json
|
71 |
+
config.json
|
Dockerfile
CHANGED
@@ -1,13 +1,17 @@
|
|
1 |
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
# you will also find guides on how best to write your Dockerfile
|
3 |
|
4 |
-
FROM python:3.12
|
5 |
|
6 |
WORKDIR /app
|
7 |
|
8 |
COPY requirements.txt .
|
9 |
|
|
|
10 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
11 |
|
12 |
COPY . /app
|
|
|
|
|
|
|
13 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
1 |
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
# you will also find guides on how best to write your Dockerfile
|
3 |
|
4 |
+
FROM python:3.12-slim
|
5 |
|
6 |
WORKDIR /app
|
7 |
|
8 |
COPY requirements.txt .
|
9 |
|
10 |
+
RUN pip install --upgrade pip
|
11 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
12 |
|
13 |
COPY . /app
|
14 |
+
|
15 |
+
ENV HF_HOME=/tmp/huggingface_home
|
16 |
+
|
17 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
CHANGED
@@ -1,8 +1,65 @@
|
|
1 |
from fastapi import FastAPI
|
|
|
|
|
|
|
|
|
2 |
|
3 |
app = FastAPI()
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
return {"Hello": "World!"}
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
from pydantic import BaseModel
|
4 |
+
from enum import Enum
|
5 |
+
from transformers import pipeline
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
9 |
+
class MessageModel(BaseModel):
|
10 |
+
text: str
|
|
|
11 |
|
12 |
+
class QueryModel(BaseModel):
|
13 |
+
sender: str
|
14 |
+
message: MessageModel
|
15 |
+
|
16 |
+
class AppModel(BaseModel):
|
17 |
+
version: str
|
18 |
+
|
19 |
+
class InputModel(BaseModel):
|
20 |
+
_version: int
|
21 |
+
query: QueryModel
|
22 |
+
app: AppModel
|
23 |
+
|
24 |
+
class ActionModel(Enum):
|
25 |
+
# Insufficient information to determine an action to take. In a query response, has the effect of allowing the message to be shown normally.
|
26 |
+
NONE = 0
|
27 |
+
# Allow the message to be shown normally.
|
28 |
+
ALLOW = 1
|
29 |
+
# Prevent the message from being shown normally, filtered as Junk message.
|
30 |
+
JUNK = 2
|
31 |
+
# Prevent the message from being shown normally, filtered as Promotional message.
|
32 |
+
PROMOTION = 3
|
33 |
+
# Prevent the message from being shown normally, filtered as Transactional message.
|
34 |
+
TRANSACTION = 4
|
35 |
+
|
36 |
+
class SubActionModel(Enum):
|
37 |
+
NONE = 0
|
38 |
+
|
39 |
+
class OutputModel(BaseModel):
|
40 |
+
action: ActionModel
|
41 |
+
sub_action: SubActionModel
|
42 |
+
|
43 |
+
pipe = pipeline(task="text-classification", model="mrm8488/bert-tiny-finetuned-sms-spam-detection")
|
44 |
+
|
45 |
+
@app.get("/.well-known/apple-app-site-association", include_in_schema=False)
|
46 |
+
def get_well_known_aasa():
|
47 |
+
return JSONResponse(
|
48 |
+
content={
|
49 |
+
"messagefilter": {
|
50 |
+
"apps": [
|
51 |
+
"X9NN3FSS3T.com.lela.Serenity.SerenityMessageFilterExtension",
|
52 |
+
"X9NN3FSS3T.com.lela.Serenity"
|
53 |
+
]
|
54 |
+
}
|
55 |
+
},
|
56 |
+
media_type="application/json"
|
57 |
+
)
|
58 |
+
|
59 |
+
@app.post("/predict")
|
60 |
+
def predict(input: InputModel) -> OutputModel:
|
61 |
+
label = pipe(input.query.message.text)
|
62 |
+
if label[0]['label'] == 'LABEL_1':
|
63 |
+
return OutputModel(action=ActionModel.JUNK, sub_action=SubActionModel.NONE)
|
64 |
+
else:
|
65 |
+
return OutputModel(action=ActionModel.NONE, sub_action=SubActionModel.NONE)
|
requirements.txt
CHANGED
@@ -1,2 +1,5 @@
|
|
1 |
fastapi
|
2 |
uvicorn[standard]
|
|
|
|
|
|
|
|
1 |
fastapi
|
2 |
uvicorn[standard]
|
3 |
+
pydantic
|
4 |
+
transformers
|
5 |
+
torch
|