Spaces:
Running
Running
Commit
·
48cddcb
1
Parent(s):
06ad0a5
Update app.py
Browse files
app.py
CHANGED
@@ -1,136 +1,131 @@
|
|
1 |
-
import
|
2 |
-
import streamlit as st
|
3 |
import numpy as np
|
|
|
4 |
from PIL import Image
|
5 |
import urllib.request
|
6 |
import io
|
7 |
from utils import *
|
8 |
-
from IPython.display import display, Javascript, Image
|
9 |
from google.colab.output import eval_js
|
10 |
from base64 import b64decode, b64encode
|
11 |
-
import cv2
|
12 |
-
import numpy as np
|
13 |
-
import html
|
14 |
-
from google.colab.patches import cv2_imshow
|
15 |
-
from IPython.display import clear_output
|
16 |
-
import matplotlib.pyplot as plt
|
17 |
-
|
18 |
|
19 |
# Initialize labels and model
|
20 |
labels = gen_labels()
|
21 |
-
model = model_arc()
|
22 |
|
23 |
|
24 |
# Streamlit UI
|
25 |
st.markdown('''
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
''', unsafe_allow_html=True)
|
30 |
|
31 |
st.markdown('''
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
''', unsafe_allow_html=True)
|
36 |
|
37 |
# Function to take a photo using the webcam
|
38 |
-
def take_photo(filename='photo.jpg', quality=
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
77 |
|
78 |
opt = st.selectbox("How do you want to upload the image for classification?",
|
79 |
-
|
80 |
|
81 |
# Image processing based on user selection
|
82 |
image = None
|
83 |
if opt == 'Upload image from device':
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
90 |
|
91 |
elif opt == 'Upload image via link':
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
99 |
|
100 |
elif opt == 'Capture a picture':
|
101 |
-
|
102 |
|
103 |
try:
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
|
134 |
except Exception as e:
|
135 |
-
|
136 |
-
|
|
|
1 |
+
import cv2
|
|
|
2 |
import numpy as np
|
3 |
+
import streamlit as st
|
4 |
from PIL import Image
|
5 |
import urllib.request
|
6 |
import io
|
7 |
from utils import *
|
|
|
8 |
from google.colab.output import eval_js
|
9 |
from base64 import b64decode, b64encode
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# Initialize labels and model
|
12 |
labels = gen_labels()
|
13 |
+
model = model_arc() # Assuming this function initializes and returns a trained model
|
14 |
|
15 |
|
16 |
# Streamlit UI
|
17 |
st.markdown('''
|
18 |
+
<div style="padding-bottom: 20px; padding-top: 20px; padding-left: 5px; padding-right: 5px">
|
19 |
+
<center><h1>EcoIdentify (Test)</h1></center>
|
20 |
+
</div>
|
21 |
''', unsafe_allow_html=True)
|
22 |
|
23 |
st.markdown('''
|
24 |
+
<div>
|
25 |
+
<center><h3>Please upload Waste Image to find its Category</h3></center>
|
26 |
+
</div>
|
27 |
''', unsafe_allow_html=True)
|
28 |
|
29 |
# Function to take a photo using the webcam
|
30 |
+
def take_photo(filename='photo.jpg', quality=
|
31 |
+
0.8):
|
32 |
+
js = Javascript('''
|
33 |
+
async function takePhoto(quality) {
|
34 |
+
const div = document.createElement('div');
|
35 |
+
const capture = document.createElement('button');
|
36 |
+
capture.textContent = 'Capture';
|
37 |
+
div.appendChild(capture);
|
38 |
+
|
39 |
+
const video = document.createElement('video');
|
40 |
+
video.style.display = 'block';
|
41 |
+
const stream = await navigator.mediaDevices.getUserMedia({ video: true });
|
42 |
+
|
43 |
+
document.body.appendChild(div);
|
44 |
+
div.appendChild(video);
|
45 |
+
video.srcObject = stream;
|
46 |
+
await video.play();
|
47 |
+
|
48 |
+
// Resize the output to fit the video element.
|
49 |
+
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
|
50 |
+
|
51 |
+
// Wait for Capture to be clicked.
|
52 |
+
await new Promise((resolve) => capture.onclick = resolve);
|
53 |
+
|
54 |
+
const canvas = document.createElement('canvas');
|
55 |
+
canvas.width = video.videoWidth;
|
56 |
+
canvas.height = video.videoHeight;
|
57 |
+
canvas.getContext('2d').drawImage(video, 0, 0);
|
58 |
+
stream.getVideoTracks()[0].stop();
|
59 |
+
div.remove();
|
60 |
+
return canvas.toDataURL('image/jpeg', quality);
|
61 |
+
}
|
62 |
+
''')
|
63 |
+
display(js)
|
64 |
+
data = eval_js('takePhoto({})'.format(quality))
|
65 |
+
binary = b64decode(data.split(',')[1])
|
66 |
+
with open(filename, 'wb') as f:
|
67 |
+
f.write(binary)
|
68 |
+
return filename
|
69 |
+
|
70 |
|
71 |
opt = st.selectbox("How do you want to upload the image for classification?",
|
72 |
+
('Please Select', 'Upload image via link', 'Upload image from device', 'Capture a picture'))
|
73 |
|
74 |
# Image processing based on user selection
|
75 |
image = None
|
76 |
if opt == 'Upload image from device':
|
77 |
+
file = st.file_uploader('Select', type=['jpg', 'png', 'jpeg'])
|
78 |
+
if file:
|
79 |
+
try:
|
80 |
+
image = cv2.imread(file)
|
81 |
+
image = cv2.resize(image, (256, 256))
|
82 |
+
except Exception as e:
|
83 |
+
st.error(f"Error reading the file: {e}")
|
84 |
|
85 |
elif opt == 'Upload image via link':
|
86 |
+
img_url = st.text_input('Enter the Image Address')
|
87 |
+
if st.button('Submit'):
|
88 |
+
try:
|
89 |
+
response = urllib.request.urlopen(img_url)
|
90 |
+
image = cv2.imread(io.BytesIO(response.read()))
|
91 |
+
image = cv2.resize(image, (256, 256))
|
92 |
+
except ValueError:
|
93 |
+
st.error("Please Enter a valid Image Address!")
|
94 |
|
95 |
elif opt == 'Capture a picture':
|
96 |
+
take_photo()
|
97 |
|
98 |
try:
|
99 |
+
if image is not None:
|
100 |
+
st.image(image, width=256, caption='Uploaded Image')
|
101 |
+
if st.button('Predict'):
|
102 |
+
img = preprocess(image)
|
103 |
+
#model = model_arc()
|
104 |
+
# model.load_weights("classify_model.h5")
|
105 |
+
|
106 |
+
print("---------------img-array---------------------")
|
107 |
+
print(img[np.newaxis, ...])
|
108 |
+
prediction = model.predict(img[np.newaxis, ...])
|
109 |
+
|
110 |
+
print("------------summary------------------------")
|
111 |
+
print(model.summary())
|
112 |
+
print("------------------------------------")
|
113 |
+
print(prediction)
|
114 |
+
|
115 |
+
st.info('Hey! The uploaded image has been classified as " {} waste " '.format(labels[np.argmax(prediction[0], axis=-1)]))
|
116 |
+
|
117 |
+
def message(img):
|
118 |
+
if img == 'paper' or 'cardboard' or 'metal' or 'glass':
|
119 |
+
return (
|
120 |
+
" therefore your item is recyclable. Please refer to https://www.wm.com/us/en/drop-off-locations to find a drop-off location near you.")
|
121 |
+
elif img == 'plastic':
|
122 |
+
return (
|
123 |
+
' therefore you item may have a chance of being recyclable. Since this model has yet to recognize types of plastics, please refer to https://www.bing.com/ck/a?!&&p=c1474e95017548dfJmltdHM9MTcwMzcyMTYwMCZpZ3VpZD0xNmNjOTFiOS1hMDgwLTY5MmItMzBmNi04MmE1YTE3ODY4NDImaW5zaWQ9NTIyMA&ptn=3&ver=2&hsh=3&fclid=16cc91b9-a080-692b-30f6-82a5a1786842&psq=what+type+of+plastic+can+be+recycled&u=a1aHR0cHM6Ly93d3cucGxhc3RpY3Nmb3JjaGFuZ2Uub3JnL2Jsb2cvd2hpY2gtcGxhc3RpYy1jYW4tYmUtcmVjeWNsZWQ&ntb=1 to check if this item can be recycled.')
|
124 |
+
else:
|
125 |
+
return ('Your item is not recyclable. Please discard it safely.')
|
126 |
+
|
127 |
+
st.info(message(labels[np.argmax(prediction[0], axis=-1)]))
|
128 |
|
129 |
except Exception as e:
|
130 |
+
st.info(e)
|
131 |
+
pass
|