File size: 3,594 Bytes
58bd1b2
48cddcb
baf7aa0
c26ff88
 
 
35e7ead
5d7fbd2
 
5867cce
59da87a
5d7fbd2
 
418cf06
5d7fbd2
 
 
b85395e
 
 
 
 
 
 
2bbf53b
404895b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc530f2
846e726
bc530f2
 
 
 
b85395e
 
 
 
c26ff88
581f508
b85395e
 
48fa5e6
b85395e
581f508
b85395e
1614706
bc530f2
b85395e
2bbf53b
b85395e
 
 
 
c26ff88
 
 
 
 
 
 
 
 
b85395e
c26ff88
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import numpy as np
import streamlit as st
from PIL import Image
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from utils import preprocess_image
import torch
from transformers import ViTForImageClassification

labels = ['cardboard', 'glass', 'metal', 'paper', 'plastic', 'trash']
# Initialize a ViT model
model = ViTForImageClassification.from_pretrained("facebook/deit-base-distilled-patch16-224")

# Load your weights
model.load_state_dict(torch.load("best.pt", map_location="cpu"))
model.eval()  # Set to evaluation mode
# Customized Streamlit layout
st.set_page_config(
    page_title="EcoIdentify by EcoClim Solutions",
    page_icon="https://ecoclimsolutions.files.wordpress.com/2024/01/rmcai-removebg.png?resize=48%2C48",
    layout="wide",
    initial_sidebar_state="expanded",
)

# Customized Streamlit styles
st.markdown(
    """
    <style>
        body {
            color: #333333;
            background-color: #f9f9f9;
            font-family: 'Helvetica', sans-serif;
        }
        .st-bb {
            padding: 0rem;
        }
        .st-ec {
            color: #666666;
        }
        .st-ef {
            color: #666666;
        }
        .st-ei {
            color: #333333;
        }
        .st-dh {
            font-size: 36px;
            font-weight: bold;
            color: #4CAF50;
            text-align: center;
            margin-bottom: 20px;
        }
        .st-gf {
            background-color: #4CAF50;
            color: white;
            padding: 15px 30px;
            font-size: 18px;
            border: none;
            border-radius: 8px;
            cursor: pointer;
            transition: background-color 0.3s;
        }
        .st-gf:hover {
            background-color: #45a049;
        }
        .st-gh {
            text-align: center;
            font-size: 24px;
            font-weight: bold;
            margin-bottom: 20px;
        }
        .st-logo {
            max-width: 100%;
            height: auto;
            margin: 20px auto;
            display: block;
        }
    </style>
    """,
    unsafe_allow_html=True,
)

# Logo
st.image("https://ecoclimsolutions.files.wordpress.com/2024/01/rmcai-removebg.png?resize=48%2C48")

# Page title
st.title("EcoIdentify by EcoClim Solutions")

# Subheader
st.header("Upload a waste image to find its category")

# Note
st.markdown("* Please note that our dataset is trained primarily with images that contain a white background. Therefore, images with white background would produce maximum accuracy *")

# Image upload section
opt = st.selectbox("How do you want to upload the image for classification?", ("Please Select", "Upload image from device"))

image = None

if opt == 'Upload image from device':
    file = st.file_uploader('Select', type=['jpg', 'png', 'jpeg'])
    if file:
        image = preprocess_image(file)

try:
    if image is not None:
        st.image(image, width=256, caption='Uploaded Image')
        if st.button('Predict'):
            transform = transforms.Compose([
                transforms.Resize((256, 256)),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ])
            image = transform(image).unsqueeze(0)
            with torch.no_grad():
                prediction = model(image)
            st.success(f'Prediction: {labels[torch.argmax(prediction, dim=1).item()]}')
except Exception as e:
    st.error(f"An error occurred: {e}. Please contact us EcoClim Solutions at EcoClimSolutions.wordpress.com.")