Update src/streamlit_app.py
Browse files- src/streamlit_app.py +72 -61
src/streamlit_app.py
CHANGED
@@ -1,40 +1,50 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
-
from PIL import Image
|
5 |
-
import time
|
6 |
import mediapipe as mp
|
|
|
7 |
|
|
|
8 |
mp_pose = mp.solutions.pose
|
9 |
-
pose = mp_pose.Pose(
|
10 |
-
min_detection_confidence=0.5,
|
11 |
-
min_tracking_confidence=0.5,
|
12 |
-
model_complexity=1
|
13 |
-
)
|
14 |
mp_drawing = mp.solutions.drawing_utils
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
def analyze_posture(image):
|
17 |
-
"""Analyze posture on the image and return
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
def check_posture(landmarks, image_shape):
|
37 |
-
"""Analyze posture and return
|
38 |
h, w, _ = image_shape
|
39 |
|
40 |
# Get key points
|
@@ -93,54 +103,55 @@ def check_posture(landmarks, image_shape):
|
|
93 |
|
94 |
return "\n\n".join(report)
|
95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
def main():
|
97 |
st.set_page_config(layout="wide")
|
98 |
-
st.title("π·
|
99 |
|
|
|
100 |
col1, col2 = st.columns([2, 1])
|
101 |
|
102 |
with col1:
|
103 |
st.header("γ«γ‘γ©γγ₯γΌ (Camera View)")
|
104 |
|
105 |
-
if st.
|
106 |
-
st.session_state.camera_allowed = True
|
107 |
-
|
108 |
-
if not st.session_state.get('camera_allowed', False):
|
109 |
st.warning("β οΈ γ«γ‘γ©γδ½Ώη¨γγγ«γ―許ε―γεΏ
θ¦γ§γ (Camera access requires permission)")
|
110 |
-
st.
|
111 |
-
|
|
|
112 |
else:
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
with col2:
|
117 |
-
st.header("
|
118 |
status_placeholder = st.empty()
|
119 |
-
|
120 |
-
uploaded_file = st.file_uploader("γΎγγ―η»εγγ’γγγγΌγ (Or upload image)",
|
121 |
-
type=["jpg", "png", "jpeg"])
|
122 |
-
|
123 |
-
if run and st.session_state.camera_allowed:
|
124 |
-
camera = cv2.VideoCapture(0)
|
125 |
-
if not camera.isOpened():
|
126 |
-
st.error("γ«γ‘γ©γ¨γ©γΌ: γγγ€γΉγ«ζ₯ηΆγ§γγΎγγ (Camera error: Device not accessible)")
|
127 |
-
run = False
|
128 |
-
|
129 |
-
while run:
|
130 |
-
ret, frame = camera.read()
|
131 |
-
if ret:
|
132 |
-
analyzed_frame, posture_status = analyze_posture(frame)
|
133 |
-
FRAME_WINDOW.image(analyzed_frame)
|
134 |
-
status_placeholder.markdown(posture_status)
|
135 |
-
time.sleep(0.1)
|
136 |
-
camera.release()
|
137 |
-
|
138 |
-
elif uploaded_file:
|
139 |
-
file_bytes = np.frombuffer(uploaded_file.read(), np.uint8)
|
140 |
-
frame = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
141 |
-
analyzed_frame, posture_status = analyze_posture(frame)
|
142 |
-
col1.image(analyzed_frame)
|
143 |
-
status_placeholder.markdown(posture_status)
|
144 |
|
145 |
if __name__ == "__main__":
|
146 |
main()
|
|
|
1 |
import streamlit as st
|
2 |
+
import av
|
3 |
import cv2
|
4 |
import numpy as np
|
|
|
|
|
5 |
import mediapipe as mp
|
6 |
+
from streamlit_webrtc import webrtc_streamer, WebRtcMode
|
7 |
|
8 |
+
# Initialize MediaPipe Pose
|
9 |
mp_pose = mp.solutions.pose
|
|
|
|
|
|
|
|
|
|
|
10 |
mp_drawing = mp.solutions.drawing_utils
|
11 |
|
12 |
+
# Session state initialization
|
13 |
+
if 'camera_access' not in st.session_state:
|
14 |
+
st.session_state.camera_access = False
|
15 |
+
if 'posture_status' not in st.session_state:
|
16 |
+
st.session_state.posture_status = "γ«γ‘γ©γθ΅·εγγ¦γγ γγ (Please enable camera)"
|
17 |
+
if 'last_status' not in st.session_state:
|
18 |
+
st.session_state.last_status = ""
|
19 |
+
|
20 |
def analyze_posture(image):
|
21 |
+
"""Analyze posture on the image and return annotated image and status"""
|
22 |
+
with mp_pose.Pose(
|
23 |
+
min_detection_confidence=0.5,
|
24 |
+
min_tracking_confidence=0.5,
|
25 |
+
model_complexity=1
|
26 |
+
) as pose:
|
27 |
+
|
28 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
29 |
+
results = pose.process(image_rgb)
|
30 |
+
|
31 |
+
annotated_image = image.copy()
|
32 |
+
if results.pose_landmarks:
|
33 |
+
mp_drawing.draw_landmarks(
|
34 |
+
annotated_image,
|
35 |
+
results.pose_landmarks,
|
36 |
+
mp_pose.POSE_CONNECTIONS,
|
37 |
+
mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2),
|
38 |
+
mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2)
|
39 |
+
)
|
40 |
+
posture_status = check_posture(results.pose_landmarks, image.shape)
|
41 |
+
else:
|
42 |
+
posture_status = "γγΌγγ€γ³γγζ€εΊγγγΎγγγ§γγ (Key points not detected)"
|
43 |
+
|
44 |
+
return annotated_image, posture_status
|
45 |
|
46 |
def check_posture(landmarks, image_shape):
|
47 |
+
"""Analyze posture and return text report"""
|
48 |
h, w, _ = image_shape
|
49 |
|
50 |
# Get key points
|
|
|
103 |
|
104 |
return "\n\n".join(report)
|
105 |
|
106 |
+
def video_frame_callback(frame):
|
107 |
+
"""Process each video frame"""
|
108 |
+
img = frame.to_ndarray(format="bgr24")
|
109 |
+
|
110 |
+
try:
|
111 |
+
analyzed_img, posture_status = analyze_posture(img)
|
112 |
+
if posture_status != st.session_state.last_status:
|
113 |
+
st.session_state.posture_status = posture_status
|
114 |
+
st.session_state.last_status = posture_status
|
115 |
+
return av.VideoFrame.from_ndarray(analyzed_img, format="bgr24")
|
116 |
+
except Exception as e:
|
117 |
+
st.error(f"ε¦ηγ¨γ©γΌ: {str(e)} (Processing error)")
|
118 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
119 |
+
|
120 |
def main():
|
121 |
st.set_page_config(layout="wide")
|
122 |
+
st.title("π· γͺγ’γ«γΏγ€γ ε§Ώε’εζγ’γγͺ (Real-time Posture Analyzer)")
|
123 |
|
124 |
+
# Create columns
|
125 |
col1, col2 = st.columns([2, 1])
|
126 |
|
127 |
with col1:
|
128 |
st.header("γ«γ‘γ©γγ₯γΌ (Camera View)")
|
129 |
|
130 |
+
if not st.session_state.camera_access:
|
|
|
|
|
|
|
131 |
st.warning("β οΈ γ«γ‘γ©γδ½Ώη¨γγγ«γ―許ε―γεΏ
θ¦γ§γ (Camera access requires permission)")
|
132 |
+
if st.button("γ«γ‘γ©γ’γ―γ»γΉγθ¨±ε― (Allow camera access)"):
|
133 |
+
st.session_state.camera_access = True
|
134 |
+
st.rerun()
|
135 |
else:
|
136 |
+
webrtc_ctx = webrtc_streamer(
|
137 |
+
key="posture-analysis",
|
138 |
+
mode=WebRtcMode.SENDRECV,
|
139 |
+
video_frame_callback=video_frame_callback,
|
140 |
+
media_stream_constraints={"video": True, "audio": False},
|
141 |
+
async_processing=True,
|
142 |
+
rtc_configuration={
|
143 |
+
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
|
144 |
+
}
|
145 |
+
)
|
146 |
+
|
147 |
+
if not webrtc_ctx.state.playing:
|
148 |
+
st.session_state.posture_status = "γ«γ‘γ©γεζ’γγΎγγ (Camera stopped)"
|
149 |
+
st.session_state.last_status = ""
|
150 |
|
151 |
with col2:
|
152 |
+
st.header("ε§Ώε’εζη΅ζ (Posture Analysis)")
|
153 |
status_placeholder = st.empty()
|
154 |
+
status_placeholder.markdown(st.session_state.posture_status)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
if __name__ == "__main__":
|
157 |
main()
|