ivorobyev commited on
Commit
8535b44
Β·
verified Β·
1 Parent(s): 1543d19

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +72 -61
src/streamlit_app.py CHANGED
@@ -1,40 +1,50 @@
1
  import streamlit as st
 
2
  import cv2
3
  import numpy as np
4
- from PIL import Image
5
- import time
6
  import mediapipe as mp
 
7
 
 
8
  mp_pose = mp.solutions.pose
9
- pose = mp_pose.Pose(
10
- min_detection_confidence=0.5,
11
- min_tracking_confidence=0.5,
12
- model_complexity=1
13
- )
14
  mp_drawing = mp.solutions.drawing_utils
15
 
 
 
 
 
 
 
 
 
16
  def analyze_posture(image):
17
- """Analyze posture on the image and return the image with keypoints and analysis text"""
18
- image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
19
- results = pose.process(image_rgb)
20
-
21
- annotated_image = image.copy()
22
- if results.pose_landmarks:
23
- mp_drawing.draw_landmarks(
24
- annotated_image,
25
- results.pose_landmarks,
26
- mp_pose.POSE_CONNECTIONS,
27
- mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2),
28
- mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2)
29
- )
30
- posture_status = check_posture(results.pose_landmarks, image.shape)
31
- else:
32
- posture_status = "γ‚­γƒΌγƒγ‚€γƒ³γƒˆγŒζ€œε‡Ίγ•γ‚ŒγΎγ›γ‚“γ§γ—γŸ (Key points not detected)"
33
-
34
- return annotated_image, posture_status
 
 
 
 
 
 
35
 
36
  def check_posture(landmarks, image_shape):
37
- """Analyze posture and return a text report"""
38
  h, w, _ = image_shape
39
 
40
  # Get key points
@@ -93,54 +103,55 @@ def check_posture(landmarks, image_shape):
93
 
94
  return "\n\n".join(report)
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  def main():
97
  st.set_page_config(layout="wide")
98
- st.title("πŸ“· ε§Ώε‹’εˆ†ζžγ‚’γƒ—γƒͺ (Posture Analyzer)")
99
 
 
100
  col1, col2 = st.columns([2, 1])
101
 
102
  with col1:
103
  st.header("カパラビγƒ₯γƒΌ (Camera View)")
104
 
105
- if st.button("カパラをクセスを許可 (Allow camera access)"):
106
- st.session_state.camera_allowed = True
107
-
108
- if not st.session_state.get('camera_allowed', False):
109
  st.warning("⚠️ γ‚«γƒ‘γƒ©γ‚’δ½Ώη”¨γ™γ‚‹γ«γ―θ¨±ε―γŒεΏ…θ¦γ§γ™ (Camera access requires permission)")
110
- st.image("demo_pose.jpg") # Π—Π°Π³Π»ΡƒΡˆΠΊΠ°
111
- run = False
 
112
  else:
113
- run = st.checkbox("カパラを衷動 (Enable camera)", value=True)
114
- FRAME_WINDOW = st.image([])
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
  with col2:
117
- st.header("ε§Ώε‹’εˆ†ζž (Posture Analysis)")
118
  status_placeholder = st.empty()
119
-
120
- uploaded_file = st.file_uploader("γΎγŸγ―η”»εƒγ‚’γ‚’γƒƒγƒ—γƒ­γƒΌγƒ‰ (Or upload image)",
121
- type=["jpg", "png", "jpeg"])
122
-
123
- if run and st.session_state.camera_allowed:
124
- camera = cv2.VideoCapture(0)
125
- if not camera.isOpened():
126
- st.error("カパラエラー: デバむスにζŽ₯ηΆšγ§γγΎγ›γ‚“ (Camera error: Device not accessible)")
127
- run = False
128
-
129
- while run:
130
- ret, frame = camera.read()
131
- if ret:
132
- analyzed_frame, posture_status = analyze_posture(frame)
133
- FRAME_WINDOW.image(analyzed_frame)
134
- status_placeholder.markdown(posture_status)
135
- time.sleep(0.1)
136
- camera.release()
137
-
138
- elif uploaded_file:
139
- file_bytes = np.frombuffer(uploaded_file.read(), np.uint8)
140
- frame = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
141
- analyzed_frame, posture_status = analyze_posture(frame)
142
- col1.image(analyzed_frame)
143
- status_placeholder.markdown(posture_status)
144
 
145
  if __name__ == "__main__":
146
  main()
 
1
  import streamlit as st
2
+ import av
3
  import cv2
4
  import numpy as np
 
 
5
  import mediapipe as mp
6
+ from streamlit_webrtc import webrtc_streamer, WebRtcMode
7
 
8
+ # Initialize MediaPipe Pose
9
  mp_pose = mp.solutions.pose
 
 
 
 
 
10
  mp_drawing = mp.solutions.drawing_utils
11
 
12
+ # Session state initialization
13
+ if 'camera_access' not in st.session_state:
14
+ st.session_state.camera_access = False
15
+ if 'posture_status' not in st.session_state:
16
+ st.session_state.posture_status = "カパラを衷動してください (Please enable camera)"
17
+ if 'last_status' not in st.session_state:
18
+ st.session_state.last_status = ""
19
+
20
  def analyze_posture(image):
21
+ """Analyze posture on the image and return annotated image and status"""
22
+ with mp_pose.Pose(
23
+ min_detection_confidence=0.5,
24
+ min_tracking_confidence=0.5,
25
+ model_complexity=1
26
+ ) as pose:
27
+
28
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
29
+ results = pose.process(image_rgb)
30
+
31
+ annotated_image = image.copy()
32
+ if results.pose_landmarks:
33
+ mp_drawing.draw_landmarks(
34
+ annotated_image,
35
+ results.pose_landmarks,
36
+ mp_pose.POSE_CONNECTIONS,
37
+ mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2),
38
+ mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2)
39
+ )
40
+ posture_status = check_posture(results.pose_landmarks, image.shape)
41
+ else:
42
+ posture_status = "γ‚­γƒΌγƒγ‚€γƒ³γƒˆγŒζ€œε‡Ίγ•γ‚ŒγΎγ›γ‚“γ§γ—γŸ (Key points not detected)"
43
+
44
+ return annotated_image, posture_status
45
 
46
  def check_posture(landmarks, image_shape):
47
+ """Analyze posture and return text report"""
48
  h, w, _ = image_shape
49
 
50
  # Get key points
 
103
 
104
  return "\n\n".join(report)
105
 
106
+ def video_frame_callback(frame):
107
+ """Process each video frame"""
108
+ img = frame.to_ndarray(format="bgr24")
109
+
110
+ try:
111
+ analyzed_img, posture_status = analyze_posture(img)
112
+ if posture_status != st.session_state.last_status:
113
+ st.session_state.posture_status = posture_status
114
+ st.session_state.last_status = posture_status
115
+ return av.VideoFrame.from_ndarray(analyzed_img, format="bgr24")
116
+ except Exception as e:
117
+ st.error(f"処理エラー: {str(e)} (Processing error)")
118
+ return av.VideoFrame.from_ndarray(img, format="bgr24")
119
+
120
  def main():
121
  st.set_page_config(layout="wide")
122
+ st.title("πŸ“· γƒͺγ‚’γƒ«γ‚Ώγ‚€γƒ ε§Ώε‹’εˆ†ζžγ‚’γƒ—γƒͺ (Real-time Posture Analyzer)")
123
 
124
+ # Create columns
125
  col1, col2 = st.columns([2, 1])
126
 
127
  with col1:
128
  st.header("カパラビγƒ₯γƒΌ (Camera View)")
129
 
130
+ if not st.session_state.camera_access:
 
 
 
131
  st.warning("⚠️ γ‚«γƒ‘γƒ©γ‚’δ½Ώη”¨γ™γ‚‹γ«γ―θ¨±ε―γŒεΏ…θ¦γ§γ™ (Camera access requires permission)")
132
+ if st.button("カパラをクセスを許可 (Allow camera access)"):
133
+ st.session_state.camera_access = True
134
+ st.rerun()
135
  else:
136
+ webrtc_ctx = webrtc_streamer(
137
+ key="posture-analysis",
138
+ mode=WebRtcMode.SENDRECV,
139
+ video_frame_callback=video_frame_callback,
140
+ media_stream_constraints={"video": True, "audio": False},
141
+ async_processing=True,
142
+ rtc_configuration={
143
+ "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
144
+ }
145
+ )
146
+
147
+ if not webrtc_ctx.state.playing:
148
+ st.session_state.posture_status = "γ‚«γƒ‘γƒ©γŒεœζ­’γ—γΎγ—γŸ (Camera stopped)"
149
+ st.session_state.last_status = ""
150
 
151
  with col2:
152
+ st.header("ε§Ώε‹’εˆ†ζžη΅ζžœ (Posture Analysis)")
153
  status_placeholder = st.empty()
154
+ status_placeholder.markdown(st.session_state.posture_status)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
  if __name__ == "__main__":
157
  main()