Spaces:
Runtime error
Runtime error
Adonai Vera
commited on
Commit
·
a20b345
1
Parent(s):
1b9e40e
Improvements in message feedback
Browse files- .DS_Store +0 -0
- app.py +2 -3
- app_save.py +50 -0
- examples/.DS_Store +0 -0
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
from turtle import title
|
2 |
import gradio as gr
|
3 |
from transformers import pipeline
|
4 |
from PIL import Image
|
@@ -8,7 +7,7 @@ import os
|
|
8 |
# Initialize the pipeline with your model
|
9 |
pipe = pipeline("image-classification", model="SubterraAI/ofwat_cleaner_classification")
|
10 |
HF_TOKEN = os.environ.get('HF_TOKEN')
|
11 |
-
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN,
|
12 |
|
13 |
|
14 |
def classify_image(image):
|
@@ -40,4 +39,4 @@ iface = gr.Interface(
|
|
40 |
)
|
41 |
|
42 |
# Launch the interface
|
43 |
-
iface.launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
from PIL import Image
|
|
|
7 |
# Initialize the pipeline with your model
|
8 |
pipe = pipeline("image-classification", model="SubterraAI/ofwat_cleaner_classification")
|
9 |
HF_TOKEN = os.environ.get('HF_TOKEN')
|
10 |
+
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "ofwat_cleaner_loop", private=True, separate_dirs=True)
|
11 |
|
12 |
|
13 |
def classify_image(image):
|
|
|
39 |
)
|
40 |
|
41 |
# Launch the interface
|
42 |
+
iface.launch()
|
app_save.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
from PIL import Image
|
4 |
+
import os
|
5 |
+
|
6 |
+
# Initialize the pipeline with your model
|
7 |
+
pipe = pipeline("image-classification", model="SubterraAI/ofwat_cleaner_classification")
|
8 |
+
HF_TOKEN = os.environ.get('HF_TOKEN')
|
9 |
+
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, dataset_name="ofwat_cleaner_loop", private=True, separate_dirs=True)
|
10 |
+
|
11 |
+
def classify_image(image):
|
12 |
+
# Convert the input image to PIL format
|
13 |
+
PIL_image = Image.fromarray(image).convert('RGB')
|
14 |
+
|
15 |
+
# Classify the image using the pipeline
|
16 |
+
res = pipe(PIL_image)
|
17 |
+
|
18 |
+
# Extract labels and scores
|
19 |
+
return {dic["label"]: dic["score"] for dic in res}
|
20 |
+
|
21 |
+
def flag_feedback(image, option, flag_status):
|
22 |
+
# Perform flagging action here using hf_writer
|
23 |
+
hf_writer.flag((image, option))
|
24 |
+
|
25 |
+
# Update the flag status to indicate feedback has been submitted
|
26 |
+
flag_status.update("Feedback submitted. Thank you!")
|
27 |
+
return flag_status
|
28 |
+
|
29 |
+
# Create a state variable for the flag status
|
30 |
+
flag_status = gr.State("")
|
31 |
+
|
32 |
+
# Create the Gradio interface
|
33 |
+
iface = gr.Interface(
|
34 |
+
classify_image,
|
35 |
+
inputs=[gr.Image(), gr.Radio(["obstruction", "no_obstruction"])],
|
36 |
+
outputs=[gr.Label(), gr.Textbox(label="Flag Status", value=flag_status)],
|
37 |
+
examples=[
|
38 |
+
["examples/CS.jpg"],
|
39 |
+
["examples/GI.jpg"],
|
40 |
+
["examples/PP.jpg"]
|
41 |
+
],
|
42 |
+
description="Upload an image to view a classification demonstration...",
|
43 |
+
title="Sewer Obstruction Classification with AI by Subterra",
|
44 |
+
allow_flagging="manual",
|
45 |
+
flagging_options=["obstruction", "no_obstruction"],
|
46 |
+
flagging_callback=lambda image, option: flag_feedback(image, option, flag_status)
|
47 |
+
)
|
48 |
+
|
49 |
+
# Launch the interface
|
50 |
+
iface.launch()
|
examples/.DS_Store
CHANGED
Binary files a/examples/.DS_Store and b/examples/.DS_Store differ
|
|