Sor0ush commited on
Commit
83dfbe9
·
verified ·
1 Parent(s): c4e5bf3

Upload folder using huggingface_hub

Browse files
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .eggs/
2
+ dist/
3
+ *.pyc
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+ __tmp/*
8
+ *.pyi
9
+ .mypycache
10
+ .ruff_cache
11
+ node_modules
12
+ backend/**/templates/
13
+
14
+ .venv
README.md CHANGED
@@ -1,12 +1,227 @@
1
  ---
2
- title: Gradio Screenrecorder
3
- emoji: 🐢
4
- colorFrom: purple
5
- colorTo: green
 
6
  sdk: gradio
7
- sdk_version: 5.32.1
8
- app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags: [custom-component-track, gradio-custom-component, screen-recorder, PIP, picture-in-picture]
3
+ title: gradio_screenrecorder
4
+ short_description: Screen Recorder + Picture in Picture Gradio Custom Component
5
+ colorFrom: blue
6
+ colorTo: yellow
7
  sdk: gradio
 
 
8
  pinned: false
9
+ app_file: space.py
10
  ---
11
 
12
+ # `gradio_screenrecorder`
13
+ <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20orange">
14
+
15
+ Screen Recorder Gradio Custom Component
16
+
17
+ ## Installation
18
+
19
+ ```bash
20
+ pip install gradio_screenrecorder
21
+ ```
22
+
23
+ ## Usage
24
+
25
+ ```python
26
+ import gradio as gr
27
+ from gradio_screenrecorder import ScreenRecorder
28
+
29
+ def handle_recording(recording_data):
30
+ """Handle recorded video data"""
31
+ print(f'Received recording data: {recording_data}')
32
+
33
+ if not recording_data or not recording_data.get('video'):
34
+ return None
35
+
36
+ try:
37
+ video_info = recording_data['video']
38
+ # Return the video path that can be used by the Video component
39
+ return video_info.get('path')
40
+ except Exception as e:
41
+ print(f'Error processing recording: {e}')
42
+ return None
43
+
44
+
45
+ css = """
46
+ .screen-recorder-demo {
47
+ max-width: 800px;
48
+ margin: 0 auto;
49
+ }
50
+ """
51
+
52
+ with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
53
+ gr.HTML("""
54
+ <h1 style='text-align: center'>
55
+ Gradio Screen Recorder Component Demo
56
+ </h1>
57
+ """)
58
+
59
+ with gr.Row():
60
+ with gr.Column():
61
+ recorder = ScreenRecorder(
62
+ audio_enabled=True,
63
+ webcam_overlay=True, # Disabled for now
64
+ webcam_position="top-left",
65
+ recording_format="webm",
66
+ max_duration=60,
67
+ label="Screen Recorder"
68
+ )
69
+
70
+ with gr.Column():
71
+ output_video = gr.Video(label="Recorded Video")
72
+
73
+ # Event handler
74
+ recorder.change(
75
+ fn=handle_recording,
76
+ inputs=recorder,
77
+ outputs=output_video
78
+ )
79
+
80
+ if __name__ == "__main__":
81
+ demo.launch()
82
+
83
+ ```
84
+
85
+ ## `ScreenRecorder`
86
+
87
+ ### Initialization
88
+
89
+ <table>
90
+ <thead>
91
+ <tr>
92
+ <th align="left">name</th>
93
+ <th align="left" style="width: 25%;">type</th>
94
+ <th align="left">default</th>
95
+ <th align="left">description</th>
96
+ </tr>
97
+ </thead>
98
+ <tbody>
99
+ <tr>
100
+ <td align="left"><code>audio_enabled</code></td>
101
+ <td align="left" style="width: 25%;">
102
+
103
+ ```python
104
+ bool
105
+ ```
106
+
107
+ </td>
108
+ <td align="left"><code>True</code></td>
109
+ <td align="left">None</td>
110
+ </tr>
111
+
112
+ <tr>
113
+ <td align="left"><code>webcam_overlay</code></td>
114
+ <td align="left" style="width: 25%;">
115
+
116
+ ```python
117
+ bool
118
+ ```
119
+
120
+ </td>
121
+ <td align="left"><code>False</code></td>
122
+ <td align="left">None</td>
123
+ </tr>
124
+
125
+ <tr>
126
+ <td align="left"><code>webcam_position</code></td>
127
+ <td align="left" style="width: 25%;">
128
+
129
+ ```python
130
+ "top-left" | "top-right" | "bottom-left" | "bottom-right"
131
+ ```
132
+
133
+ </td>
134
+ <td align="left"><code>"bottom-right"</code></td>
135
+ <td align="left">None</td>
136
+ </tr>
137
+
138
+ <tr>
139
+ <td align="left"><code>recording_format</code></td>
140
+ <td align="left" style="width: 25%;">
141
+
142
+ ```python
143
+ str
144
+ ```
145
+
146
+ </td>
147
+ <td align="left"><code>"webm"</code></td>
148
+ <td align="left">None</td>
149
+ </tr>
150
+
151
+ <tr>
152
+ <td align="left"><code>max_duration</code></td>
153
+ <td align="left" style="width: 25%;">
154
+
155
+ ```python
156
+ typing.Optional[int][int, None]
157
+ ```
158
+
159
+ </td>
160
+ <td align="left"><code>None</code></td>
161
+ <td align="left">None</td>
162
+ </tr>
163
+
164
+ <tr>
165
+ <td align="left"><code>interactive</code></td>
166
+ <td align="left" style="width: 25%;">
167
+
168
+ ```python
169
+ bool
170
+ ```
171
+
172
+ </td>
173
+ <td align="left"><code>True</code></td>
174
+ <td align="left">None</td>
175
+ </tr>
176
+ </tbody></table>
177
+
178
+
179
+ ### Events
180
+
181
+ | name | description |
182
+ |:-----|:------------|
183
+ | `record_start` | |
184
+ | `record_stop` | |
185
+ | `stream_update` | |
186
+ | `change` | |
187
+
188
+
189
+
190
+ ### User function
191
+
192
+ The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
193
+
194
+ - When used as an Input, the component only impacts the input signature of the user function.
195
+ - When used as an output, the component only impacts the return signature of the user function.
196
+
197
+ The code snippet below is accurate in cases where the component is used as both an input and an output.
198
+
199
+
200
+
201
+ ```python
202
+ def predict(
203
+ value: typing.Optional[
204
+ gradio_screenrecorder.screenrecorder.ScreenRecorderData
205
+ ][ScreenRecorderData, None]
206
+ ) -> Unknown:
207
+ return value
208
+ ```
209
+
210
+
211
+ ## `ScreenRecorderData`
212
+ ```python
213
+ class ScreenRecorderData(GradioModel):
214
+ video: Optional[FileData] = None
215
+ duration: Optional[float] = None
216
+ audio_enabled: bool = True
217
+ status: Literal["recording", "stopped", "error"] = (
218
+ "stopped"
219
+ )
220
+
221
+ class Config:
222
+ json_encoders = {
223
+ FileData: lambda v: v.model_dump()
224
+ if v
225
+ else None
226
+ }
227
+ ```
__init__.py ADDED
File without changes
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_screenrecorder import ScreenRecorder
3
+
4
+ def handle_recording(recording_data):
5
+ """Handle recorded video data"""
6
+ print(f'Received recording data: {recording_data}')
7
+
8
+ if not recording_data or not recording_data.get('video'):
9
+ return None
10
+
11
+ try:
12
+ video_info = recording_data['video']
13
+ # Return the video path that can be used by the Video component
14
+ return video_info.get('path')
15
+ except Exception as e:
16
+ print(f'Error processing recording: {e}')
17
+ return None
18
+
19
+
20
+ css = """
21
+ .screen-recorder-demo {
22
+ max-width: 800px;
23
+ margin: 0 auto;
24
+ }
25
+ """
26
+
27
+ with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
28
+ gr.HTML("""
29
+ <h1 style='text-align: center'>
30
+ Gradio Screen Recorder Component Demo
31
+ </h1>
32
+ """)
33
+
34
+ with gr.Row():
35
+ with gr.Column():
36
+ recorder = ScreenRecorder(
37
+ audio_enabled=True,
38
+ webcam_overlay=True, # Disabled for now
39
+ webcam_position="top-left",
40
+ recording_format="webm",
41
+ max_duration=60,
42
+ label="Screen Recorder"
43
+ )
44
+
45
+ with gr.Column():
46
+ output_video = gr.Video(label="Recorded Video")
47
+
48
+ # Event handler
49
+ recorder.change(
50
+ fn=handle_recording,
51
+ inputs=recorder,
52
+ outputs=output_video
53
+ )
54
+
55
+ if __name__ == "__main__":
56
+ demo.launch()
css.css ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ html {
2
+ font-family: Inter;
3
+ font-size: 16px;
4
+ font-weight: 400;
5
+ line-height: 1.5;
6
+ -webkit-text-size-adjust: 100%;
7
+ background: #fff;
8
+ color: #323232;
9
+ -webkit-font-smoothing: antialiased;
10
+ -moz-osx-font-smoothing: grayscale;
11
+ text-rendering: optimizeLegibility;
12
+ }
13
+
14
+ :root {
15
+ --space: 1;
16
+ --vspace: calc(var(--space) * 1rem);
17
+ --vspace-0: calc(3 * var(--space) * 1rem);
18
+ --vspace-1: calc(2 * var(--space) * 1rem);
19
+ --vspace-2: calc(1.5 * var(--space) * 1rem);
20
+ --vspace-3: calc(0.5 * var(--space) * 1rem);
21
+ }
22
+
23
+ .app {
24
+ max-width: 748px !important;
25
+ }
26
+
27
+ .prose p {
28
+ margin: var(--vspace) 0;
29
+ line-height: var(--vspace * 2);
30
+ font-size: 1rem;
31
+ }
32
+
33
+ code {
34
+ font-family: "Inconsolata", sans-serif;
35
+ font-size: 16px;
36
+ }
37
+
38
+ h1,
39
+ h1 code {
40
+ font-weight: 400;
41
+ line-height: calc(2.5 / var(--space) * var(--vspace));
42
+ }
43
+
44
+ h1 code {
45
+ background: none;
46
+ border: none;
47
+ letter-spacing: 0.05em;
48
+ padding-bottom: 5px;
49
+ position: relative;
50
+ padding: 0;
51
+ }
52
+
53
+ h2 {
54
+ margin: var(--vspace-1) 0 var(--vspace-2) 0;
55
+ line-height: 1em;
56
+ }
57
+
58
+ h3,
59
+ h3 code {
60
+ margin: var(--vspace-1) 0 var(--vspace-2) 0;
61
+ line-height: 1em;
62
+ }
63
+
64
+ h4,
65
+ h5,
66
+ h6 {
67
+ margin: var(--vspace-3) 0 var(--vspace-3) 0;
68
+ line-height: var(--vspace);
69
+ }
70
+
71
+ .bigtitle,
72
+ h1,
73
+ h1 code {
74
+ font-size: calc(8px * 4.5);
75
+ word-break: break-word;
76
+ }
77
+
78
+ .title,
79
+ h2,
80
+ h2 code {
81
+ font-size: calc(8px * 3.375);
82
+ font-weight: lighter;
83
+ word-break: break-word;
84
+ border: none;
85
+ background: none;
86
+ }
87
+
88
+ .subheading1,
89
+ h3,
90
+ h3 code {
91
+ font-size: calc(8px * 1.8);
92
+ font-weight: 600;
93
+ border: none;
94
+ background: none;
95
+ letter-spacing: 0.1em;
96
+ text-transform: uppercase;
97
+ }
98
+
99
+ h2 code {
100
+ padding: 0;
101
+ position: relative;
102
+ letter-spacing: 0.05em;
103
+ }
104
+
105
+ blockquote {
106
+ font-size: calc(8px * 1.1667);
107
+ font-style: italic;
108
+ line-height: calc(1.1667 * var(--vspace));
109
+ margin: var(--vspace-2) var(--vspace-2);
110
+ }
111
+
112
+ .subheading2,
113
+ h4 {
114
+ font-size: calc(8px * 1.4292);
115
+ text-transform: uppercase;
116
+ font-weight: 600;
117
+ }
118
+
119
+ .subheading3,
120
+ h5 {
121
+ font-size: calc(8px * 1.2917);
122
+ line-height: calc(1.2917 * var(--vspace));
123
+
124
+ font-weight: lighter;
125
+ text-transform: uppercase;
126
+ letter-spacing: 0.15em;
127
+ }
128
+
129
+ h6 {
130
+ font-size: calc(8px * 1.1667);
131
+ font-size: 1.1667em;
132
+ font-weight: normal;
133
+ font-style: italic;
134
+ font-family: "le-monde-livre-classic-byol", serif !important;
135
+ letter-spacing: 0px !important;
136
+ }
137
+
138
+ #start .md > *:first-child {
139
+ margin-top: 0;
140
+ }
141
+
142
+ h2 + h3 {
143
+ margin-top: 0;
144
+ }
145
+
146
+ .md hr {
147
+ border: none;
148
+ border-top: 1px solid var(--block-border-color);
149
+ margin: var(--vspace-2) 0 var(--vspace-2) 0;
150
+ }
151
+ .prose ul {
152
+ margin: var(--vspace-2) 0 var(--vspace-1) 0;
153
+ }
154
+
155
+ .gap {
156
+ gap: 0;
157
+ }
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ gradio_screenrecorder
space.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from app import demo as app
4
+ import os
5
+
6
+ _docs = {'ScreenRecorder': {'description': 'Custom Gradio component for comprehensive screen recording functionality.', 'members': {'__init__': {'audio_enabled': {'type': 'bool', 'default': 'True', 'description': None}, 'webcam_overlay': {'type': 'bool', 'default': 'False', 'description': None}, 'webcam_position': {'type': '"top-left" | "top-right" | "bottom-left" | "bottom-right"', 'default': '"bottom-right"', 'description': None}, 'recording_format': {'type': 'str', 'default': '"webm"', 'description': None}, 'max_duration': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'interactive': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {}, 'preprocess': {'return': {'type': 'typing.Optional[\n gradio_screenrecorder.screenrecorder.ScreenRecorderData\n][ScreenRecorderData, None]', 'description': None}, 'value': None}}, 'events': {'record_start': {'type': None, 'default': None, 'description': ''}, 'record_stop': {'type': None, 'default': None, 'description': ''}, 'stream_update': {'type': None, 'default': None, 'description': ''}, 'change': {'type': None, 'default': None, 'description': ''}}}, '__meta__': {'additional_interfaces': {'ScreenRecorderData': {'source': 'class ScreenRecorderData(GradioModel):\n video: Optional[FileData] = None\n duration: Optional[float] = None\n audio_enabled: bool = True\n status: Literal["recording", "stopped", "error"] = (\n "stopped"\n )\n\n class Config:\n json_encoders = {\n FileData: lambda v: v.model_dump()\n if v\n else None\n }'}}, 'user_fn_refs': {'ScreenRecorder': ['ScreenRecorderData']}}}
7
+
8
+ abs_path = os.path.join(os.path.dirname(__file__), "css.css")
9
+
10
+ with gr.Blocks(
11
+ css=abs_path,
12
+ theme=gr.themes.Default(
13
+ font_mono=[
14
+ gr.themes.GoogleFont("Inconsolata"),
15
+ "monospace",
16
+ ],
17
+ ),
18
+ ) as demo:
19
+ gr.Markdown(
20
+ """
21
+ # `gradio_screenrecorder`
22
+
23
+ <div style="display: flex; gap: 7px;">
24
+ <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20orange">
25
+ </div>
26
+
27
+ Screen Recorder Gradio Custom Component
28
+ """, elem_classes=["md-custom"], header_links=True)
29
+ app.render()
30
+ gr.Markdown(
31
+ """
32
+ ## Installation
33
+
34
+ ```bash
35
+ pip install gradio_screenrecorder
36
+ ```
37
+
38
+ ## Usage
39
+
40
+ ```python
41
+ import gradio as gr
42
+ from gradio_screenrecorder import ScreenRecorder
43
+
44
+ def handle_recording(recording_data):
45
+ \"\"\"Handle recorded video data\"\"\"
46
+ print(f'Received recording data: {recording_data}')
47
+
48
+ if not recording_data or not recording_data.get('video'):
49
+ return None
50
+
51
+ try:
52
+ video_info = recording_data['video']
53
+ # Return the video path that can be used by the Video component
54
+ return video_info.get('path')
55
+ except Exception as e:
56
+ print(f'Error processing recording: {e}')
57
+ return None
58
+
59
+
60
+ css = \"\"\"
61
+ .screen-recorder-demo {
62
+ max-width: 800px;
63
+ margin: 0 auto;
64
+ }
65
+ \"\"\"
66
+
67
+ with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
68
+ gr.HTML(\"\"\"
69
+ <h1 style='text-align: center'>
70
+ Gradio Screen Recorder Component Demo
71
+ </h1>
72
+ \"\"\")
73
+
74
+ with gr.Row():
75
+ with gr.Column():
76
+ recorder = ScreenRecorder(
77
+ audio_enabled=True,
78
+ webcam_overlay=True, # Disabled for now
79
+ webcam_position="top-left",
80
+ recording_format="webm",
81
+ max_duration=60,
82
+ label="Screen Recorder"
83
+ )
84
+
85
+ with gr.Column():
86
+ output_video = gr.Video(label="Recorded Video")
87
+
88
+ # Event handler
89
+ recorder.change(
90
+ fn=handle_recording,
91
+ inputs=recorder,
92
+ outputs=output_video
93
+ )
94
+
95
+ if __name__ == "__main__":
96
+ demo.launch()
97
+
98
+ ```
99
+ """, elem_classes=["md-custom"], header_links=True)
100
+
101
+
102
+ gr.Markdown("""
103
+ ## `ScreenRecorder`
104
+
105
+ ### Initialization
106
+ """, elem_classes=["md-custom"], header_links=True)
107
+
108
+ gr.ParamViewer(value=_docs["ScreenRecorder"]["members"]["__init__"], linkify=['ScreenRecorderData'])
109
+
110
+
111
+ gr.Markdown("### Events")
112
+ gr.ParamViewer(value=_docs["ScreenRecorder"]["events"], linkify=['Event'])
113
+
114
+
115
+
116
+
117
+ gr.Markdown("""
118
+
119
+ ### User function
120
+
121
+ The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
122
+
123
+ - When used as an Input, the component only impacts the input signature of the user function.
124
+ - When used as an output, the component only impacts the return signature of the user function.
125
+
126
+ The code snippet below is accurate in cases where the component is used as both an input and an output.
127
+
128
+
129
+
130
+ ```python
131
+ def predict(
132
+ value: typing.Optional[
133
+ gradio_screenrecorder.screenrecorder.ScreenRecorderData
134
+ ][ScreenRecorderData, None]
135
+ ) -> Unknown:
136
+ return value
137
+ ```
138
+ """, elem_classes=["md-custom", "ScreenRecorder-user-fn"], header_links=True)
139
+
140
+
141
+
142
+
143
+ code_ScreenRecorderData = gr.Markdown("""
144
+ ## `ScreenRecorderData`
145
+ ```python
146
+ class ScreenRecorderData(GradioModel):
147
+ video: Optional[FileData] = None
148
+ duration: Optional[float] = None
149
+ audio_enabled: bool = True
150
+ status: Literal["recording", "stopped", "error"] = (
151
+ "stopped"
152
+ )
153
+
154
+ class Config:
155
+ json_encoders = {
156
+ FileData: lambda v: v.model_dump()
157
+ if v
158
+ else None
159
+ }
160
+ ```""", elem_classes=["md-custom", "ScreenRecorderData"], header_links=True)
161
+
162
+ demo.load(None, js=r"""function() {
163
+ const refs = {
164
+ ScreenRecorderData: [], };
165
+ const user_fn_refs = {
166
+ ScreenRecorder: ['ScreenRecorderData'], };
167
+ requestAnimationFrame(() => {
168
+
169
+ Object.entries(user_fn_refs).forEach(([key, refs]) => {
170
+ if (refs.length > 0) {
171
+ const el = document.querySelector(`.${key}-user-fn`);
172
+ if (!el) return;
173
+ refs.forEach(ref => {
174
+ el.innerHTML = el.innerHTML.replace(
175
+ new RegExp("\\b"+ref+"\\b", "g"),
176
+ `<a href="#h-${ref.toLowerCase()}">${ref}</a>`
177
+ );
178
+ })
179
+ }
180
+ })
181
+
182
+ Object.entries(refs).forEach(([key, refs]) => {
183
+ if (refs.length > 0) {
184
+ const el = document.querySelector(`.${key}`);
185
+ if (!el) return;
186
+ refs.forEach(ref => {
187
+ el.innerHTML = el.innerHTML.replace(
188
+ new RegExp("\\b"+ref+"\\b", "g"),
189
+ `<a href="#h-${ref.toLowerCase()}">${ref}</a>`
190
+ );
191
+ })
192
+ }
193
+ })
194
+ })
195
+ }
196
+
197
+ """)
198
+
199
+ demo.launch()
src/.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .eggs/
2
+ dist/
3
+ *.pyc
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+ __tmp/*
8
+ *.pyi
9
+ .mypycache
10
+ .ruff_cache
11
+ node_modules
12
+ backend/**/templates/
13
+
14
+ .venv
src/.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.13
src/README.md ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: [custom-component-track, gradio-custom-component, screen-recorder, PIP, picture-in-picture]
3
+ title: gradio_screenrecorder
4
+ short_description: Screen Recorder + Picture in Picture Gradio Custom Component
5
+ colorFrom: blue
6
+ colorTo: yellow
7
+ sdk: gradio
8
+ pinned: false
9
+ app_file: space.py
10
+ ---
11
+
12
+ # `gradio_screenrecorder`
13
+ <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20orange">
14
+
15
+ Screen Recorder Gradio Custom Component
16
+
17
+ ## Installation
18
+
19
+ ```bash
20
+ pip install gradio_screenrecorder
21
+ ```
22
+
23
+ ## Usage
24
+
25
+ ```python
26
+ import gradio as gr
27
+ from gradio_screenrecorder import ScreenRecorder
28
+
29
+ def handle_recording(recording_data):
30
+ """Handle recorded video data"""
31
+ print(f'Received recording data: {recording_data}')
32
+
33
+ if not recording_data or not recording_data.get('video'):
34
+ return None
35
+
36
+ try:
37
+ video_info = recording_data['video']
38
+ # Return the video path that can be used by the Video component
39
+ return video_info.get('path')
40
+ except Exception as e:
41
+ print(f'Error processing recording: {e}')
42
+ return None
43
+
44
+
45
+ css = """
46
+ .screen-recorder-demo {
47
+ max-width: 800px;
48
+ margin: 0 auto;
49
+ }
50
+ """
51
+
52
+ with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
53
+ gr.HTML("""
54
+ <h1 style='text-align: center'>
55
+ Gradio Screen Recorder Component Demo
56
+ </h1>
57
+ """)
58
+
59
+ with gr.Row():
60
+ with gr.Column():
61
+ recorder = ScreenRecorder(
62
+ audio_enabled=True,
63
+ webcam_overlay=True, # Disabled for now
64
+ webcam_position="top-left",
65
+ recording_format="webm",
66
+ max_duration=60,
67
+ label="Screen Recorder"
68
+ )
69
+
70
+ with gr.Column():
71
+ output_video = gr.Video(label="Recorded Video")
72
+
73
+ # Event handler
74
+ recorder.change(
75
+ fn=handle_recording,
76
+ inputs=recorder,
77
+ outputs=output_video
78
+ )
79
+
80
+ if __name__ == "__main__":
81
+ demo.launch()
82
+
83
+ ```
84
+
85
+ ## `ScreenRecorder`
86
+
87
+ ### Initialization
88
+
89
+ <table>
90
+ <thead>
91
+ <tr>
92
+ <th align="left">name</th>
93
+ <th align="left" style="width: 25%;">type</th>
94
+ <th align="left">default</th>
95
+ <th align="left">description</th>
96
+ </tr>
97
+ </thead>
98
+ <tbody>
99
+ <tr>
100
+ <td align="left"><code>audio_enabled</code></td>
101
+ <td align="left" style="width: 25%;">
102
+
103
+ ```python
104
+ bool
105
+ ```
106
+
107
+ </td>
108
+ <td align="left"><code>True</code></td>
109
+ <td align="left">None</td>
110
+ </tr>
111
+
112
+ <tr>
113
+ <td align="left"><code>webcam_overlay</code></td>
114
+ <td align="left" style="width: 25%;">
115
+
116
+ ```python
117
+ bool
118
+ ```
119
+
120
+ </td>
121
+ <td align="left"><code>False</code></td>
122
+ <td align="left">None</td>
123
+ </tr>
124
+
125
+ <tr>
126
+ <td align="left"><code>webcam_position</code></td>
127
+ <td align="left" style="width: 25%;">
128
+
129
+ ```python
130
+ "top-left" | "top-right" | "bottom-left" | "bottom-right"
131
+ ```
132
+
133
+ </td>
134
+ <td align="left"><code>"bottom-right"</code></td>
135
+ <td align="left">None</td>
136
+ </tr>
137
+
138
+ <tr>
139
+ <td align="left"><code>recording_format</code></td>
140
+ <td align="left" style="width: 25%;">
141
+
142
+ ```python
143
+ str
144
+ ```
145
+
146
+ </td>
147
+ <td align="left"><code>"webm"</code></td>
148
+ <td align="left">None</td>
149
+ </tr>
150
+
151
+ <tr>
152
+ <td align="left"><code>max_duration</code></td>
153
+ <td align="left" style="width: 25%;">
154
+
155
+ ```python
156
+ typing.Optional[int][int, None]
157
+ ```
158
+
159
+ </td>
160
+ <td align="left"><code>None</code></td>
161
+ <td align="left">None</td>
162
+ </tr>
163
+
164
+ <tr>
165
+ <td align="left"><code>interactive</code></td>
166
+ <td align="left" style="width: 25%;">
167
+
168
+ ```python
169
+ bool
170
+ ```
171
+
172
+ </td>
173
+ <td align="left"><code>True</code></td>
174
+ <td align="left">None</td>
175
+ </tr>
176
+ </tbody></table>
177
+
178
+
179
+ ### Events
180
+
181
+ | name | description |
182
+ |:-----|:------------|
183
+ | `record_start` | |
184
+ | `record_stop` | |
185
+ | `stream_update` | |
186
+ | `change` | |
187
+
188
+
189
+
190
+ ### User function
191
+
192
+ The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
193
+
194
+ - When used as an Input, the component only impacts the input signature of the user function.
195
+ - When used as an output, the component only impacts the return signature of the user function.
196
+
197
+ The code snippet below is accurate in cases where the component is used as both an input and an output.
198
+
199
+
200
+
201
+ ```python
202
+ def predict(
203
+ value: typing.Optional[
204
+ gradio_screenrecorder.screenrecorder.ScreenRecorderData
205
+ ][ScreenRecorderData, None]
206
+ ) -> Unknown:
207
+ return value
208
+ ```
209
+
210
+
211
+ ## `ScreenRecorderData`
212
+ ```python
213
+ class ScreenRecorderData(GradioModel):
214
+ video: Optional[FileData] = None
215
+ duration: Optional[float] = None
216
+ audio_enabled: bool = True
217
+ status: Literal["recording", "stopped", "error"] = (
218
+ "stopped"
219
+ )
220
+
221
+ class Config:
222
+ json_encoders = {
223
+ FileData: lambda v: v.model_dump()
224
+ if v
225
+ else None
226
+ }
227
+ ```
src/backend/gradio_screenrecorder/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ from .screenrecorder import ScreenRecorder
3
+
4
+ __all__ = ['ScreenRecorder']
src/backend/gradio_screenrecorder/screenrecorder.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio.components.base import Component
3
+ from gradio.data_classes import FileData, GradioModel
4
+ from typing import Optional, Literal, Any
5
+ import tempfile
6
+ import os
7
+ import json
8
+
9
+ class ScreenRecorderData(GradioModel):
10
+ video: Optional[FileData] = None
11
+ duration: Optional[float] = None
12
+ audio_enabled: bool = True
13
+ status: Literal["recording", "stopped", "error"] = "stopped"
14
+
15
+ class Config:
16
+ json_encoders = {
17
+ FileData: lambda v: v.model_dump() if v else None
18
+ }
19
+
20
+
21
+ class ScreenRecorder(Component):
22
+ """
23
+ Custom Gradio component for comprehensive screen recording functionality.
24
+ """
25
+
26
+ data_model = ScreenRecorderData
27
+
28
+ EVENTS = [
29
+ "record_start",
30
+ "record_stop",
31
+ "stream_update",
32
+ "change"
33
+ ]
34
+
35
+ def __init__(
36
+ self,
37
+ value=None,
38
+ audio_enabled: bool = True,
39
+ webcam_overlay: bool = False,
40
+ webcam_position: Literal["top-left", "top-right", "bottom-left", "bottom-right"] = "bottom-right",
41
+ recording_format: str = "webm",
42
+ max_duration: Optional[int] = None,
43
+ interactive: bool = True,
44
+ **kwargs
45
+ ):
46
+ self.audio_enabled = audio_enabled
47
+ self.webcam_overlay = webcam_overlay
48
+ self.webcam_position = webcam_position
49
+ self.recording_format = recording_format
50
+ self.max_duration = max_duration
51
+ self._status = "stopped"
52
+
53
+ super().__init__(
54
+ value=value,
55
+ interactive=interactive,
56
+ **kwargs
57
+ )
58
+
59
+ def example_payload(self) -> dict:
60
+ """
61
+ The example inputs for this component for API usage. Must be JSON-serializable.
62
+ """
63
+ return {
64
+ "video": {
65
+ "path": "https://sample-videos.com/zip/10/mp4/SampleVideo_360x240_1mb.mp4",
66
+ "orig_name": "example_recording.webm",
67
+ "size": 1024000
68
+ },
69
+ "duration": 30.5,
70
+ "audio_enabled": True,
71
+ "status": "stopped"
72
+ }
73
+
74
+ def example_value(self) -> ScreenRecorderData:
75
+ """
76
+ An example value for this component for the default app.
77
+ """
78
+ return ScreenRecorderData(
79
+ video=FileData(
80
+ path="https://sample-videos.com/zip/10/mp4/SampleVideo_360x240_1mb.mp4",
81
+ orig_name="example_recording.webm",
82
+ size=1024000
83
+ ),
84
+ duration=30.5,
85
+ audio_enabled=True,
86
+ status="stopped"
87
+ )
88
+
89
+ def flag(self, x, flag_dir: str = "") -> str:
90
+ """
91
+ Write the component's value to a format for flagging (CSV storage).
92
+ """
93
+ if x is None:
94
+ return ""
95
+
96
+ if isinstance(x, ScreenRecorderData) and x.video:
97
+ return f"Recording: {x.video.orig_name} ({x.duration}s) - Status: {x.status}"
98
+
99
+ if isinstance(x, dict) and "video" in x:
100
+ duration = x.get("duration", "unknown")
101
+ status = x.get("status", "unknown")
102
+ video_name = x["video"].get("orig_name", "unknown") if x["video"] else "none"
103
+ return f"Recording: {video_name} ({duration}s) - Status: {status}"
104
+
105
+ return str(x)
106
+
107
+ def preprocess(self, payload) -> Optional[ScreenRecorderData]:
108
+ """Process incoming recording data from frontend."""
109
+ if payload is None:
110
+ return None
111
+
112
+ if isinstance(payload, dict):
113
+ if payload.get("status") == "error": # Early exit for errors from frontend
114
+ raise gr.Error(f"Recording failed on frontend: {payload.get('error', 'Unknown error')}")
115
+
116
+ # If 'video' field is a string, assume it's JSON and parse it.
117
+ if "video" in payload and isinstance(payload["video"], str):
118
+ try:
119
+ video_json_string = payload["video"]
120
+ if video_json_string.strip().startswith("{") and video_json_string.strip().endswith("}"):
121
+ payload["video"] = json.loads(video_json_string)
122
+ # If it's a string but not our expected JSON (e.g. 'null', or empty string, or simple path)
123
+ # json.loads would fail or Pydantic validation later will catch it if structure is wrong.
124
+ # For 'null' string, json.loads results in None for payload["video"].
125
+ elif video_json_string.lower() == 'null':
126
+ payload["video"] = None
127
+ else:
128
+ # This case implies a string that isn't a JSON object or 'null',
129
+ # e.g. a direct file path string, which FileData might not directly accept
130
+ # if it expects a dict. Pydantic will raise error later if type is incompatible.
131
+ gr.Warning(f"Video data is a string but not a recognized JSON object or 'null': {video_json_string[:100]}")
132
+ # To be safe, if it's not a JSON object string, we might want to error or handle specifically
133
+ # For now, let Pydantic try to handle it or fail.
134
+
135
+ except json.JSONDecodeError:
136
+ raise gr.Error(f"Invalid JSON for video data: {payload['video'][:100]}")
137
+
138
+ # --- Validations from here ---
139
+ video_data = payload.get("video") # Use .get() for safety, as 'video' might be absent or None
140
+
141
+ if video_data is not None: # Only validate video_data if it exists
142
+ if not isinstance(video_data, dict):
143
+ # This can happen if payload["video"] was a string like "some_path.webm" and not parsed to dict
144
+ # Or if it was parsed to something unexpected.
145
+ raise gr.Error(f"Video data is not a dictionary after processing: {type(video_data)}. Value: {str(video_data)[:100]}")
146
+
147
+ if video_data.get("size", 0) == 0:
148
+ gr.Warning("Received recording with zero size. This might be an empty recording or an issue with data capture.")
149
+ # Depending on requirements, could raise gr.Error here.
150
+
151
+ max_size = 500 * 1024 * 1024 # 500MB
152
+ if video_data.get("size", 0) > max_size:
153
+ raise gr.Error(f"Recording file too large ({video_data.get('size', 0)} bytes). Maximum allowed: {max_size} bytes.")
154
+ # If video_data is None (e.g. 'video': null was sent, or 'video' key missing),
155
+ # ScreenRecorderData will have video=None, which is allowed by Optional[FileData].
156
+
157
+ duration = payload.get("duration", 0)
158
+ if duration <= 0 and video_data is not None: # Only warn about duration if there's video data
159
+ gr.Warning("Recording duration is 0 or invalid. The recording might be corrupted.")
160
+
161
+ try:
162
+ return ScreenRecorderData(**payload)
163
+ except Exception as e: # Catch Pydantic validation errors or other issues during model instantiation
164
+ # Log the payload for easier debugging if there's a Pydantic error
165
+ # Be careful with logging sensitive data in production.
166
+ # print(f"Error creating ScreenRecorderData. Payload: {payload}")
167
+ raise gr.Error(f"Error creating ScreenRecorderData from payload: {e}")
168
+
169
+ elif isinstance(payload, ScreenRecorderData): # If it's already the correct type
170
+ return payload
171
+
172
+ gr.Warning(f"Unexpected payload format: {type(payload)}. Payload: {str(payload)[:200]}")
173
+ return None
174
+
175
+ # def postprocess(self, value) -> Optional[dict]:
176
+ # """Process outgoing data to frontend."""
177
+ # if value is None:
178
+ # return {"status": "stopped"} # Ensure valid empty state
179
+
180
+ # try:
181
+ # if isinstance(value, ScreenRecorderData):
182
+ # return value.model_dump()
183
+ # elif isinstance(value, dict):
184
+ # return value
185
+ # return None
186
+ # except Exception as e:
187
+ # return {"status": "error", "error": str(e)}
188
+
189
+
190
+ def postprocess(self, value) -> Optional[dict]:
191
+ """Process outgoing data to frontend."""
192
+ print(f'value in postprocess: {value}')
193
+ if value is None:
194
+ return None
195
+
196
+ try:
197
+ # If it's already a dict, return as is
198
+ if isinstance(value, dict):
199
+ return value
200
+
201
+ # If it's a ScreenRecorderData object, convert to dict
202
+ if hasattr(value, 'model_dump'):
203
+ return value.model_dump()
204
+
205
+ # Handle string values
206
+ if isinstance(value, str):
207
+ return {"video": {"path": value}}
208
+
209
+ return None
210
+
211
+ except Exception as e:
212
+ print(f'Error in postprocess: {e}')
213
+ return None
214
+
215
+
216
+ # try:
217
+ # if isinstance(value, ScreenRecorderData):
218
+ # # Ensure video data exists before sending
219
+ # if not value.video:
220
+ # return {"status": "error", "error": "No video recorded"}
221
+
222
+ # return {
223
+ # "video": value.video,
224
+ # "duration": value.duration,
225
+ # "audio_enabled": value.audio_enabled,
226
+ # "status": value.status
227
+ # }
228
+
229
+ # # Handle raw dict format from frontend
230
+ # if isinstance(value, dict):
231
+ # return {
232
+ # "video": FileData(**value.get("video", {})),
233
+ # "duration": value.get("duration"),
234
+ # "audio_enabled": value.get("audio_enabled", True),
235
+ # "status": value.get("status", "stopped")
236
+ # }
237
+
238
+ # except Exception as e:
239
+ # return {"status": "error", "error": str(e)}
240
+
241
+ # return {"status": "stopped"}
242
+
243
+ def as_example(self, input_data):
244
+ """Handle example data display."""
245
+ if input_data is None:
246
+ return None
247
+
248
+ if isinstance(input_data, (ScreenRecorderData, dict)):
249
+ return input_data
250
+
251
+ # Convert simple video path to proper format
252
+ if isinstance(input_data, str):
253
+ return {
254
+ "video": {
255
+ "path": input_data,
256
+ "orig_name": os.path.basename(input_data),
257
+ "size": 0
258
+ },
259
+ "duration": None,
260
+ "audio_enabled": self.audio_enabled,
261
+ "status": "stopped"
262
+ }
263
+
264
+ return input_data
265
+
266
+ def update_status(self, status: Literal["recording", "stopped", "error"]):
267
+ """Update the internal status of the recorder."""
268
+ self._status = status
269
+
270
+ def get_status(self) -> str:
271
+ """Get the current status of the recorder."""
272
+ return self._status
src/backend/gradio_screenrecorder/templates/component/index.js ADDED
The diff for this file is too large to render. See raw diff
 
src/backend/gradio_screenrecorder/templates/component/style.css ADDED
The diff for this file is too large to render. See raw diff
 
src/backend/gradio_screenrecorder/templates/example/index.js ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const {
2
+ SvelteComponent: U,
3
+ append_hydration: d,
4
+ attr: u,
5
+ children: p,
6
+ claim_element: _,
7
+ claim_space: O,
8
+ claim_text: R,
9
+ detach: c,
10
+ element: f,
11
+ get_svelte_dataset: $,
12
+ init: j,
13
+ insert_hydration: y,
14
+ noop: V,
15
+ safe_not_equal: z,
16
+ set_data: x,
17
+ set_style: I,
18
+ space: A,
19
+ src_url_equal: M,
20
+ text: N
21
+ } = window.__gradio__svelte__internal;
22
+ function B(r) {
23
+ let e, t = "📹 Screen Recording";
24
+ return {
25
+ c() {
26
+ e = f("div"), e.textContent = t, this.h();
27
+ },
28
+ l(a) {
29
+ e = _(a, "DIV", { class: !0, "data-svelte-h": !0 }), $(e) !== "svelte-1bxe4bf" && (e.textContent = t), this.h();
30
+ },
31
+ h() {
32
+ u(e, "class", "placeholder svelte-118e1ql");
33
+ },
34
+ m(a, s) {
35
+ y(a, e, s);
36
+ },
37
+ p: V,
38
+ d(a) {
39
+ a && c(e);
40
+ }
41
+ };
42
+ }
43
+ function F(r) {
44
+ var C, S;
45
+ let e, t, a, s, l, n, m = (
46
+ /*value*/
47
+ (r[0].duration ? P(
48
+ /*value*/
49
+ r[0].duration
50
+ ) : "Recording") + ""
51
+ ), E, D, h, g = (
52
+ /*value*/
53
+ (((S = (C = r[0].video.orig_name) == null ? void 0 : C.split(".").pop()) == null ? void 0 : S.toUpperCase()) || "VIDEO") + ""
54
+ ), b;
55
+ return {
56
+ c() {
57
+ e = f("div"), t = f("video"), s = A(), l = f("div"), n = f("span"), E = N(m), D = A(), h = f("span"), b = N(g), this.h();
58
+ },
59
+ l(i) {
60
+ e = _(i, "DIV", { class: !0 });
61
+ var o = p(e);
62
+ t = _(o, "VIDEO", { src: !0, style: !0 });
63
+ var q = p(t);
64
+ q.forEach(c), s = O(o), l = _(o, "DIV", { class: !0 });
65
+ var v = p(l);
66
+ n = _(v, "SPAN", { class: !0 });
67
+ var k = p(n);
68
+ E = R(k, m), k.forEach(c), D = O(v), h = _(v, "SPAN", { class: !0 });
69
+ var w = p(h);
70
+ b = R(w, g), w.forEach(c), v.forEach(c), o.forEach(c), this.h();
71
+ },
72
+ h() {
73
+ M(t.src, a = /*value*/
74
+ r[0].video.path) || u(t, "src", a), t.controls = !1, t.muted = !0, I(t, "width", "100%"), I(t, "height", "60px"), I(t, "object-fit", "cover"), u(n, "class", "duration svelte-118e1ql"), u(h, "class", "format svelte-118e1ql"), u(l, "class", "overlay svelte-118e1ql"), u(e, "class", "video-thumbnail svelte-118e1ql");
75
+ },
76
+ m(i, o) {
77
+ y(i, e, o), d(e, t), d(e, s), d(e, l), d(l, n), d(n, E), d(l, D), d(l, h), d(h, b);
78
+ },
79
+ p(i, o) {
80
+ var q, v;
81
+ o & /*value*/
82
+ 1 && !M(t.src, a = /*value*/
83
+ i[0].video.path) && u(t, "src", a), o & /*value*/
84
+ 1 && m !== (m = /*value*/
85
+ (i[0].duration ? P(
86
+ /*value*/
87
+ i[0].duration
88
+ ) : "Recording") + "") && x(E, m), o & /*value*/
89
+ 1 && g !== (g = /*value*/
90
+ (((v = (q = i[0].video.orig_name) == null ? void 0 : q.split(".").pop()) == null ? void 0 : v.toUpperCase()) || "VIDEO") + "") && x(b, g);
91
+ },
92
+ d(i) {
93
+ i && c(e);
94
+ }
95
+ };
96
+ }
97
+ function G(r) {
98
+ let e;
99
+ function t(l, n) {
100
+ return (
101
+ /*value*/
102
+ l[0] && /*value*/
103
+ l[0].video ? F : B
104
+ );
105
+ }
106
+ let a = t(r), s = a(r);
107
+ return {
108
+ c() {
109
+ e = f("div"), s.c(), this.h();
110
+ },
111
+ l(l) {
112
+ e = _(l, "DIV", { class: !0 });
113
+ var n = p(e);
114
+ s.l(n), n.forEach(c), this.h();
115
+ },
116
+ h() {
117
+ u(e, "class", "example-container svelte-118e1ql");
118
+ },
119
+ m(l, n) {
120
+ y(l, e, n), s.m(e, null);
121
+ },
122
+ p(l, [n]) {
123
+ a === (a = t(l)) && s ? s.p(l, n) : (s.d(1), s = a(l), s && (s.c(), s.m(e, null)));
124
+ },
125
+ i: V,
126
+ o: V,
127
+ d(l) {
128
+ l && c(e), s.d();
129
+ }
130
+ };
131
+ }
132
+ function P(r) {
133
+ const e = Math.floor(r / 60), t = Math.floor(r % 60);
134
+ return `${e}:${t.toString().padStart(2, "0")}`;
135
+ }
136
+ function H(r, e, t) {
137
+ let { value: a } = e;
138
+ return r.$$set = (s) => {
139
+ "value" in s && t(0, a = s.value);
140
+ }, [a];
141
+ }
142
+ class J extends U {
143
+ constructor(e) {
144
+ super(), j(this, e, H, G, z, { value: 0 });
145
+ }
146
+ }
147
+ export {
148
+ J as default
149
+ };
src/backend/gradio_screenrecorder/templates/example/style.css ADDED
@@ -0,0 +1 @@
 
 
1
+ .example-container.svelte-118e1ql{width:100%;height:80px;border-radius:4px;overflow:hidden;position:relative}.video-thumbnail.svelte-118e1ql{position:relative;width:100%;height:100%}.overlay.svelte-118e1ql{position:absolute;bottom:0;left:0;right:0;background:linear-gradient(transparent,#000000b3);padding:4px 8px;display:flex;justify-content:space-between;align-items:flex-end}.duration.svelte-118e1ql,.format.svelte-118e1ql{color:#fff;font-size:10px;font-weight:700}.placeholder.svelte-118e1ql{display:flex;align-items:center;justify-content:center;width:100%;height:100%;background:#f0f0f0;color:#666;font-size:12px}
src/demo/__init__.py ADDED
File without changes
src/demo/app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_screenrecorder import ScreenRecorder
3
+
4
+ def handle_recording(recording_data):
5
+ """Handle recorded video data"""
6
+ print(f'Received recording data: {recording_data}')
7
+
8
+ if not recording_data or not recording_data.get('video'):
9
+ return None
10
+
11
+ try:
12
+ video_info = recording_data['video']
13
+ # Return the video path that can be used by the Video component
14
+ return video_info.get('path')
15
+ except Exception as e:
16
+ print(f'Error processing recording: {e}')
17
+ return None
18
+
19
+
20
+ css = """
21
+ .screen-recorder-demo {
22
+ max-width: 800px;
23
+ margin: 0 auto;
24
+ }
25
+ """
26
+
27
+ with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
28
+ gr.HTML("""
29
+ <h1 style='text-align: center'>
30
+ Gradio Screen Recorder Component Demo
31
+ </h1>
32
+ """)
33
+
34
+ with gr.Row():
35
+ with gr.Column():
36
+ recorder = ScreenRecorder(
37
+ audio_enabled=True,
38
+ webcam_overlay=True, # Disabled for now
39
+ webcam_position="top-left",
40
+ recording_format="webm",
41
+ max_duration=60,
42
+ label="Screen Recorder"
43
+ )
44
+
45
+ with gr.Column():
46
+ output_video = gr.Video(label="Recorded Video")
47
+
48
+ # Event handler
49
+ recorder.change(
50
+ fn=handle_recording,
51
+ inputs=recorder,
52
+ outputs=output_video
53
+ )
54
+
55
+ if __name__ == "__main__":
56
+ demo.launch()
src/demo/css.css ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ html {
2
+ font-family: Inter;
3
+ font-size: 16px;
4
+ font-weight: 400;
5
+ line-height: 1.5;
6
+ -webkit-text-size-adjust: 100%;
7
+ background: #fff;
8
+ color: #323232;
9
+ -webkit-font-smoothing: antialiased;
10
+ -moz-osx-font-smoothing: grayscale;
11
+ text-rendering: optimizeLegibility;
12
+ }
13
+
14
+ :root {
15
+ --space: 1;
16
+ --vspace: calc(var(--space) * 1rem);
17
+ --vspace-0: calc(3 * var(--space) * 1rem);
18
+ --vspace-1: calc(2 * var(--space) * 1rem);
19
+ --vspace-2: calc(1.5 * var(--space) * 1rem);
20
+ --vspace-3: calc(0.5 * var(--space) * 1rem);
21
+ }
22
+
23
+ .app {
24
+ max-width: 748px !important;
25
+ }
26
+
27
+ .prose p {
28
+ margin: var(--vspace) 0;
29
+ line-height: var(--vspace * 2);
30
+ font-size: 1rem;
31
+ }
32
+
33
+ code {
34
+ font-family: "Inconsolata", sans-serif;
35
+ font-size: 16px;
36
+ }
37
+
38
+ h1,
39
+ h1 code {
40
+ font-weight: 400;
41
+ line-height: calc(2.5 / var(--space) * var(--vspace));
42
+ }
43
+
44
+ h1 code {
45
+ background: none;
46
+ border: none;
47
+ letter-spacing: 0.05em;
48
+ padding-bottom: 5px;
49
+ position: relative;
50
+ padding: 0;
51
+ }
52
+
53
+ h2 {
54
+ margin: var(--vspace-1) 0 var(--vspace-2) 0;
55
+ line-height: 1em;
56
+ }
57
+
58
+ h3,
59
+ h3 code {
60
+ margin: var(--vspace-1) 0 var(--vspace-2) 0;
61
+ line-height: 1em;
62
+ }
63
+
64
+ h4,
65
+ h5,
66
+ h6 {
67
+ margin: var(--vspace-3) 0 var(--vspace-3) 0;
68
+ line-height: var(--vspace);
69
+ }
70
+
71
+ .bigtitle,
72
+ h1,
73
+ h1 code {
74
+ font-size: calc(8px * 4.5);
75
+ word-break: break-word;
76
+ }
77
+
78
+ .title,
79
+ h2,
80
+ h2 code {
81
+ font-size: calc(8px * 3.375);
82
+ font-weight: lighter;
83
+ word-break: break-word;
84
+ border: none;
85
+ background: none;
86
+ }
87
+
88
+ .subheading1,
89
+ h3,
90
+ h3 code {
91
+ font-size: calc(8px * 1.8);
92
+ font-weight: 600;
93
+ border: none;
94
+ background: none;
95
+ letter-spacing: 0.1em;
96
+ text-transform: uppercase;
97
+ }
98
+
99
+ h2 code {
100
+ padding: 0;
101
+ position: relative;
102
+ letter-spacing: 0.05em;
103
+ }
104
+
105
+ blockquote {
106
+ font-size: calc(8px * 1.1667);
107
+ font-style: italic;
108
+ line-height: calc(1.1667 * var(--vspace));
109
+ margin: var(--vspace-2) var(--vspace-2);
110
+ }
111
+
112
+ .subheading2,
113
+ h4 {
114
+ font-size: calc(8px * 1.4292);
115
+ text-transform: uppercase;
116
+ font-weight: 600;
117
+ }
118
+
119
+ .subheading3,
120
+ h5 {
121
+ font-size: calc(8px * 1.2917);
122
+ line-height: calc(1.2917 * var(--vspace));
123
+
124
+ font-weight: lighter;
125
+ text-transform: uppercase;
126
+ letter-spacing: 0.15em;
127
+ }
128
+
129
+ h6 {
130
+ font-size: calc(8px * 1.1667);
131
+ font-size: 1.1667em;
132
+ font-weight: normal;
133
+ font-style: italic;
134
+ font-family: "le-monde-livre-classic-byol", serif !important;
135
+ letter-spacing: 0px !important;
136
+ }
137
+
138
+ #start .md > *:first-child {
139
+ margin-top: 0;
140
+ }
141
+
142
+ h2 + h3 {
143
+ margin-top: 0;
144
+ }
145
+
146
+ .md hr {
147
+ border: none;
148
+ border-top: 1px solid var(--block-border-color);
149
+ margin: var(--vspace-2) 0 var(--vspace-2) 0;
150
+ }
151
+ .prose ul {
152
+ margin: var(--vspace-2) 0 var(--vspace-1) 0;
153
+ }
154
+
155
+ .gap {
156
+ gap: 0;
157
+ }
src/demo/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ gradio_screenrecorder
src/demo/space.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from app import demo as app
4
+ import os
5
+
6
+ _docs = {'ScreenRecorder': {'description': 'Custom Gradio component for comprehensive screen recording functionality.', 'members': {'__init__': {'audio_enabled': {'type': 'bool', 'default': 'True', 'description': None}, 'webcam_overlay': {'type': 'bool', 'default': 'False', 'description': None}, 'webcam_position': {'type': '"top-left" | "top-right" | "bottom-left" | "bottom-right"', 'default': '"bottom-right"', 'description': None}, 'recording_format': {'type': 'str', 'default': '"webm"', 'description': None}, 'max_duration': {'type': 'typing.Optional[int][int, None]', 'default': 'None', 'description': None}, 'interactive': {'type': 'bool', 'default': 'True', 'description': None}}, 'postprocess': {}, 'preprocess': {'return': {'type': 'typing.Optional[\n gradio_screenrecorder.screenrecorder.ScreenRecorderData\n][ScreenRecorderData, None]', 'description': None}, 'value': None}}, 'events': {'record_start': {'type': None, 'default': None, 'description': ''}, 'record_stop': {'type': None, 'default': None, 'description': ''}, 'stream_update': {'type': None, 'default': None, 'description': ''}, 'change': {'type': None, 'default': None, 'description': ''}}}, '__meta__': {'additional_interfaces': {'ScreenRecorderData': {'source': 'class ScreenRecorderData(GradioModel):\n video: Optional[FileData] = None\n duration: Optional[float] = None\n audio_enabled: bool = True\n status: Literal["recording", "stopped", "error"] = (\n "stopped"\n )\n\n class Config:\n json_encoders = {\n FileData: lambda v: v.model_dump()\n if v\n else None\n }'}}, 'user_fn_refs': {'ScreenRecorder': ['ScreenRecorderData']}}}
7
+
8
+ abs_path = os.path.join(os.path.dirname(__file__), "css.css")
9
+
10
+ with gr.Blocks(
11
+ css=abs_path,
12
+ theme=gr.themes.Default(
13
+ font_mono=[
14
+ gr.themes.GoogleFont("Inconsolata"),
15
+ "monospace",
16
+ ],
17
+ ),
18
+ ) as demo:
19
+ gr.Markdown(
20
+ """
21
+ # `gradio_screenrecorder`
22
+
23
+ <div style="display: flex; gap: 7px;">
24
+ <img alt="Static Badge" src="https://img.shields.io/badge/version%20-%200.0.1%20-%20orange">
25
+ </div>
26
+
27
+ Screen Recorder Gradio Custom Component
28
+ """, elem_classes=["md-custom"], header_links=True)
29
+ app.render()
30
+ gr.Markdown(
31
+ """
32
+ ## Installation
33
+
34
+ ```bash
35
+ pip install gradio_screenrecorder
36
+ ```
37
+
38
+ ## Usage
39
+
40
+ ```python
41
+ import gradio as gr
42
+ from gradio_screenrecorder import ScreenRecorder
43
+
44
+ def handle_recording(recording_data):
45
+ \"\"\"Handle recorded video data\"\"\"
46
+ print(f'Received recording data: {recording_data}')
47
+
48
+ if not recording_data or not recording_data.get('video'):
49
+ return None
50
+
51
+ try:
52
+ video_info = recording_data['video']
53
+ # Return the video path that can be used by the Video component
54
+ return video_info.get('path')
55
+ except Exception as e:
56
+ print(f'Error processing recording: {e}')
57
+ return None
58
+
59
+
60
+ css = \"\"\"
61
+ .screen-recorder-demo {
62
+ max-width: 800px;
63
+ margin: 0 auto;
64
+ }
65
+ \"\"\"
66
+
67
+ with gr.Blocks(css=css, title="Screen Recorder Demo") as demo:
68
+ gr.HTML(\"\"\"
69
+ <h1 style='text-align: center'>
70
+ Gradio Screen Recorder Component Demo
71
+ </h1>
72
+ \"\"\")
73
+
74
+ with gr.Row():
75
+ with gr.Column():
76
+ recorder = ScreenRecorder(
77
+ audio_enabled=True,
78
+ webcam_overlay=True, # Disabled for now
79
+ webcam_position="top-left",
80
+ recording_format="webm",
81
+ max_duration=60,
82
+ label="Screen Recorder"
83
+ )
84
+
85
+ with gr.Column():
86
+ output_video = gr.Video(label="Recorded Video")
87
+
88
+ # Event handler
89
+ recorder.change(
90
+ fn=handle_recording,
91
+ inputs=recorder,
92
+ outputs=output_video
93
+ )
94
+
95
+ if __name__ == "__main__":
96
+ demo.launch()
97
+
98
+ ```
99
+ """, elem_classes=["md-custom"], header_links=True)
100
+
101
+
102
+ gr.Markdown("""
103
+ ## `ScreenRecorder`
104
+
105
+ ### Initialization
106
+ """, elem_classes=["md-custom"], header_links=True)
107
+
108
+ gr.ParamViewer(value=_docs["ScreenRecorder"]["members"]["__init__"], linkify=['ScreenRecorderData'])
109
+
110
+
111
+ gr.Markdown("### Events")
112
+ gr.ParamViewer(value=_docs["ScreenRecorder"]["events"], linkify=['Event'])
113
+
114
+
115
+
116
+
117
+ gr.Markdown("""
118
+
119
+ ### User function
120
+
121
+ The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
122
+
123
+ - When used as an Input, the component only impacts the input signature of the user function.
124
+ - When used as an output, the component only impacts the return signature of the user function.
125
+
126
+ The code snippet below is accurate in cases where the component is used as both an input and an output.
127
+
128
+
129
+
130
+ ```python
131
+ def predict(
132
+ value: typing.Optional[
133
+ gradio_screenrecorder.screenrecorder.ScreenRecorderData
134
+ ][ScreenRecorderData, None]
135
+ ) -> Unknown:
136
+ return value
137
+ ```
138
+ """, elem_classes=["md-custom", "ScreenRecorder-user-fn"], header_links=True)
139
+
140
+
141
+
142
+
143
+ code_ScreenRecorderData = gr.Markdown("""
144
+ ## `ScreenRecorderData`
145
+ ```python
146
+ class ScreenRecorderData(GradioModel):
147
+ video: Optional[FileData] = None
148
+ duration: Optional[float] = None
149
+ audio_enabled: bool = True
150
+ status: Literal["recording", "stopped", "error"] = (
151
+ "stopped"
152
+ )
153
+
154
+ class Config:
155
+ json_encoders = {
156
+ FileData: lambda v: v.model_dump()
157
+ if v
158
+ else None
159
+ }
160
+ ```""", elem_classes=["md-custom", "ScreenRecorderData"], header_links=True)
161
+
162
+ demo.load(None, js=r"""function() {
163
+ const refs = {
164
+ ScreenRecorderData: [], };
165
+ const user_fn_refs = {
166
+ ScreenRecorder: ['ScreenRecorderData'], };
167
+ requestAnimationFrame(() => {
168
+
169
+ Object.entries(user_fn_refs).forEach(([key, refs]) => {
170
+ if (refs.length > 0) {
171
+ const el = document.querySelector(`.${key}-user-fn`);
172
+ if (!el) return;
173
+ refs.forEach(ref => {
174
+ el.innerHTML = el.innerHTML.replace(
175
+ new RegExp("\\b"+ref+"\\b", "g"),
176
+ `<a href="#h-${ref.toLowerCase()}">${ref}</a>`
177
+ );
178
+ })
179
+ }
180
+ })
181
+
182
+ Object.entries(refs).forEach(([key, refs]) => {
183
+ if (refs.length > 0) {
184
+ const el = document.querySelector(`.${key}`);
185
+ if (!el) return;
186
+ refs.forEach(ref => {
187
+ el.innerHTML = el.innerHTML.replace(
188
+ new RegExp("\\b"+ref+"\\b", "g"),
189
+ `<a href="#h-${ref.toLowerCase()}">${ref}</a>`
190
+ );
191
+ })
192
+ }
193
+ })
194
+ })
195
+ }
196
+
197
+ """)
198
+
199
+ demo.launch()
src/frontend/Example.svelte ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ export let value: any;
3
+
4
+ function formatDuration(duration: number): string {
5
+ const minutes = Math.floor(duration / 60);
6
+ const seconds = Math.floor(duration % 60);
7
+ return `${minutes}:${seconds.toString().padStart(2, '0')}`;
8
+ }
9
+ </script>
10
+
11
+ <div class="example-container">
12
+ {#if value && value.video}
13
+ <div class="video-thumbnail">
14
+ <video
15
+ src={value.video.path}
16
+ controls={false}
17
+ muted
18
+ style="width: 100%; height: 60px; object-fit: cover;"
19
+ >
20
+ </video>
21
+ <div class="overlay">
22
+ <span class="duration">
23
+ {value.duration ? formatDuration(value.duration) : 'Recording'}
24
+ </span>
25
+ <span class="format">
26
+ {value.video.orig_name?.split('.').pop()?.toUpperCase() || 'VIDEO'}
27
+ </span>
28
+ </div>
29
+ </div>
30
+ {:else}
31
+ <div class="placeholder">
32
+ 📹 Screen Recording
33
+ </div>
34
+ {/if}
35
+ </div>
36
+
37
+ <style>
38
+ .example-container {
39
+ width: 100%;
40
+ height: 80px;
41
+ border-radius: 4px;
42
+ overflow: hidden;
43
+ position: relative;
44
+ }
45
+
46
+ .video-thumbnail {
47
+ position: relative;
48
+ width: 100%;
49
+ height: 100%;
50
+ }
51
+
52
+ .overlay {
53
+ position: absolute;
54
+ bottom: 0;
55
+ left: 0;
56
+ right: 0;
57
+ background: linear-gradient(transparent, rgba(0,0,0,0.7));
58
+ padding: 4px 8px;
59
+ display: flex;
60
+ justify-content: space-between;
61
+ align-items: flex-end;
62
+ }
63
+
64
+ .duration, .format {
65
+ color: white;
66
+ font-size: 10px;
67
+ font-weight: bold;
68
+ }
69
+
70
+ .placeholder {
71
+ display: flex;
72
+ align-items: center;
73
+ justify-content: center;
74
+ width: 100%;
75
+ height: 100%;
76
+ background: #f0f0f0;
77
+ color: #666;
78
+ font-size: 12px;
79
+ }
80
+ </style>
src/frontend/Index.svelte ADDED
@@ -0,0 +1,727 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ import { onMount, onDestroy, createEventDispatcher } from 'svelte';
3
+ import { Block } from '@gradio/atoms';
4
+ import { StatusTracker } from '@gradio/statustracker';
5
+ import type { LoadingStatus } from "@gradio/statustracker";
6
+ import type { Gradio } from "@gradio/utils";
7
+ import fixWebmDuration from 'fix-webm-duration';
8
+
9
+ // Type definitions
10
+ interface MediaRecorderOptions {
11
+ mimeType?: string;
12
+ audioBitsPerSecond?: number;
13
+ videoBitsPerSecond?: number;
14
+ bitsPerSecond?: number;
15
+ }
16
+
17
+ interface MediaTrackConstraints {
18
+ displaySurface?: 'browser' | 'monitor' | 'window';
19
+ cursor?: 'always' | 'motion' | 'never';
20
+ }
21
+
22
+ // Type definitions
23
+ interface RecordingData {
24
+ video: string;
25
+ duration: number;
26
+ audio_enabled?: boolean;
27
+ status?: string;
28
+ orig_name?: string;
29
+ size?: number | null;
30
+ data?: string; // Base64 encoded data for Gradio
31
+ name?: string; // Alias for orig_name for Gradio compatibility
32
+ is_file?: boolean;
33
+ type?: string; // MIME type of the recording
34
+ }
35
+
36
+ interface Position {
37
+ x: number;
38
+ y: number;
39
+ }
40
+
41
+ // Event types for the component
42
+ type EventMap = {
43
+ 'error': { message: string; error: string };
44
+ 'recording-started': void;
45
+ 'recording-stopped': RecordingData;
46
+ 'record_stop': RecordingData;
47
+ 'change': RecordingData;
48
+ 'webcam-error': { message: string; error: string };
49
+ };
50
+
51
+ // Component props with proper types and defaults
52
+ export let gradio: Gradio<any>;
53
+ export let value: Partial<RecordingData> | null = null;
54
+ export const elem_id = ''; // Marked as const since it's not modified
55
+ export let elem_classes: string[] = [];
56
+ export let scale: number | null = null;
57
+ export let min_width: number | null = null;
58
+ export let visible = true;
59
+ export let interactive = true;
60
+ export let loading_status: LoadingStatus | null = null;
61
+ export let audio_enabled = false;
62
+ export let webcam_overlay = false;
63
+ export let webcam_position: 'top-left' | 'top-right' | 'bottom-left' | 'bottom-right' = 'bottom-right';
64
+ export let recording_format: 'webm' | 'mp4' | 'gif' = 'webm';
65
+ export let max_duration: number | null = null;
66
+
67
+ // Computed styles for the container
68
+ let containerStyle = '';
69
+
70
+ // Component methods interface
71
+ interface ComponentMethods {
72
+ startRecording: () => Promise<void>;
73
+ stopRecording: () => void;
74
+ togglePause: () => void;
75
+ cleanup: () => void;
76
+ }
77
+
78
+ // Component state with explicit types and initial values
79
+ let isPaused = false;
80
+ let isRecording = false;
81
+ let recordingTime = 0;
82
+ let recordingTimer: number | null = null;
83
+ let recordedChunks: Blob[] = [];
84
+
85
+ // Media streams and elements
86
+ let screenStream: MediaStream | null = null;
87
+ let webcamStream: MediaStream | null = null;
88
+ let combinedStream: MediaStream | null = null;
89
+ let canvas: HTMLCanvasElement | null = null;
90
+ let ctx: CanvasRenderingContext2D | null = null;
91
+ let animationFrameId: number | null = null;
92
+ let previewVideo: HTMLVideoElement | null = null;
93
+ let webcamVideo: HTMLVideoElement | null = null;
94
+ let recordingStartTime = 0;
95
+ let mediaRecorder: MediaRecorder | null = null;
96
+
97
+ // Internal video elements
98
+ let webcamVideoInternal: HTMLVideoElement | null = null;
99
+ let screenVideoInternal: HTMLVideoElement | null = null;
100
+
101
+ // Bind canvas element
102
+ function bindCanvas(node: HTMLCanvasElement) {
103
+ canvas = node;
104
+ if (canvas) {
105
+ const context = canvas.getContext('2d', { willReadFrequently: true });
106
+ if (context) {
107
+ ctx = context;
108
+ // Set canvas dimensions with null checks
109
+ const width = canvas.offsetWidth;
110
+ const height = canvas.offsetHeight;
111
+ if (width && height) {
112
+ canvas.width = width;
113
+ canvas.height = height;
114
+ }
115
+ }
116
+ }
117
+ return {
118
+ destroy() {
119
+ canvas = null;
120
+ ctx = null;
121
+ }
122
+ };
123
+ }
124
+
125
+ // Canvas binding is now handled by the bindCanvas function
126
+
127
+ // Configuration
128
+ const webcam_size = 200;
129
+ const webcam_border = 10;
130
+ const webcam_radius = '50%';
131
+
132
+ // Ensure max_duration has a default value if null
133
+ $: effectiveMaxDuration = max_duration ?? 0;
134
+
135
+ // Computed styles for the container
136
+ $: containerStyle = [
137
+ scale !== null ? `--scale: ${scale};` : '',
138
+ min_width !== null ? `min-width: ${min_width}px;` : ''
139
+ ].filter(Boolean).join(' ');
140
+
141
+ onDestroy(() => {
142
+ if (isRecording) {
143
+ componentMethods.stopRecording();
144
+ }
145
+ componentMethods.cleanup();
146
+ if (animationFrameId) {
147
+ cancelAnimationFrame(animationFrameId);
148
+ animationFrameId = null;
149
+ }
150
+ });
151
+
152
+ // Component state and props are already declared above
153
+
154
+ // Event dispatcher with proper typing
155
+ const dispatch = createEventDispatcher<EventMap>();
156
+
157
+ // Type guard for error handling
158
+ function isErrorWithMessage(error: unknown): error is Error {
159
+ return error instanceof Error;
160
+ }
161
+
162
+ // Component methods implementation
163
+ const componentMethods: ComponentMethods = {
164
+ startRecording: async (): Promise<void> => {
165
+ if (isRecording) return;
166
+ isRecording = true;
167
+ recordedChunks = [];
168
+ recordingTime = 0;
169
+
170
+ try {
171
+ // Composite screen and optional webcam overlay via hidden canvas
172
+ const screenStreamCapture = await navigator.mediaDevices.getDisplayMedia({ video: true, audio: false });
173
+ screenStream = screenStreamCapture;
174
+ // Assign to hidden video for composition
175
+ if (screenVideoInternal) {
176
+ screenVideoInternal.srcObject = screenStreamCapture;
177
+ await screenVideoInternal.play().catch(() => {});
178
+ }
179
+ let captureStream: MediaStream;
180
+ if (webcam_overlay && webcamVideoInternal && canvas && ctx) {
181
+ try {
182
+ webcamStream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
183
+ webcamVideoInternal.srcObject = webcamStream;
184
+ await webcamVideoInternal.play().catch(() => {});
185
+ // Resize canvas to match screen video
186
+ canvas.width = screenVideoInternal!.videoWidth;
187
+ canvas.height = screenVideoInternal!.videoHeight;
188
+ const overlaySize = Math.min(canvas.width, canvas.height) / 4;
189
+ const posMap: Record<string, [number, number]> = {
190
+ 'top-left': [10, 10],
191
+ 'top-right': [canvas.width - overlaySize - 10, 10],
192
+ 'bottom-left': [10, canvas.height - overlaySize - 10],
193
+ 'bottom-right': [canvas.width - overlaySize - 10, canvas.height - overlaySize - 10]
194
+ };
195
+ const [ox, oy] = posMap[webcam_position];
196
+ function draw() {
197
+ ctx!.drawImage(screenVideoInternal!, 0, 0, canvas.width, canvas.height);
198
+ ctx!.drawImage(webcamVideoInternal!, ox, oy, overlaySize, overlaySize);
199
+ animationFrameId = requestAnimationFrame(draw);
200
+ }
201
+ draw();
202
+ const canvasStream = canvas.captureStream(30);
203
+ const audioTracks = audio_enabled
204
+ ? (await navigator.mediaDevices.getUserMedia({ audio: true })).getAudioTracks()
205
+ : screenStreamCapture.getAudioTracks();
206
+ combinedStream = new MediaStream([...canvasStream.getVideoTracks(), ...audioTracks]);
207
+ captureStream = combinedStream;
208
+ } catch (err) {
209
+ console.warn('Webcam overlay failed, falling back to screen only', err);
210
+ captureStream = screenStreamCapture;
211
+ }
212
+ } else {
213
+ // No overlay: combine audio if enabled with screen
214
+ const audioTracks = audio_enabled
215
+ ? (await navigator.mediaDevices.getUserMedia({ audio: true })).getAudioTracks()
216
+ : screenStreamCapture.getAudioTracks();
217
+ combinedStream = new MediaStream([...screenStreamCapture.getVideoTracks(), ...audioTracks]);
218
+ captureStream = combinedStream;
219
+ }
220
+
221
+ // Handle track ended event
222
+ screenStreamCapture.getVideoTracks()[0].onended = () => {
223
+ if (isRecording) {
224
+ componentMethods.stopRecording();
225
+ }
226
+ };
227
+
228
+ // Start recording
229
+ const options: MediaRecorderOptions = {
230
+ mimeType: recording_format === 'webm' ? 'video/webm;codecs=vp9' : 'video/mp4'
231
+ };
232
+
233
+ mediaRecorder = new MediaRecorder(captureStream, options);
234
+ mediaRecorder.ondataavailable = handleDataAvailable;
235
+ mediaRecorder.onstop = handleRecordingStop;
236
+ mediaRecorder.start();
237
+
238
+ recordingStartTime = Date.now();
239
+ updateRecordingTime();
240
+
241
+ dispatch('recording-started');
242
+ } catch (error) {
243
+ isRecording = false;
244
+ if (isErrorWithMessage(error)) {
245
+ dispatch('error', {
246
+ message: 'Failed to start recording',
247
+ error: error.message
248
+ });
249
+ }
250
+ }
251
+ },
252
+
253
+ stopRecording: (): void => {
254
+ if (!isRecording || !mediaRecorder) return;
255
+
256
+ try {
257
+ mediaRecorder.stop();
258
+ isRecording = false;
259
+
260
+ // Stop all tracks
261
+ [screenStream, webcamStream, combinedStream].forEach(stream => {
262
+ if (stream) {
263
+ stream.getTracks().forEach(track => track.stop());
264
+ }
265
+ });
266
+
267
+ if (recordingTimer) {
268
+ clearTimeout(recordingTimer);
269
+ recordingTimer = null;
270
+ }
271
+
272
+ const recordingData: RecordingData = {
273
+ video: '',
274
+ duration: recordingTime / 1000,
275
+ audio_enabled: audio_enabled,
276
+ status: 'completed'
277
+ };
278
+
279
+ dispatch('recording-stopped', recordingData);
280
+ dispatch('record_stop', recordingData);
281
+ dispatch('change', recordingData);
282
+ } catch (error) {
283
+ isRecording = false;
284
+ if (isErrorWithMessage(error)) {
285
+ dispatch('error', {
286
+ message: 'Error stopping recording',
287
+ error: error.message
288
+ });
289
+ }
290
+ }
291
+ },
292
+
293
+ togglePause: (): void => {
294
+ if (!mediaRecorder) return;
295
+
296
+ isPaused = !isPaused;
297
+
298
+ if (isPaused) {
299
+ mediaRecorder.pause();
300
+ if (recordingTimer) {
301
+ clearTimeout(recordingTimer);
302
+ recordingTimer = null;
303
+ }
304
+ } else {
305
+ mediaRecorder.resume();
306
+ updateRecordingTime();
307
+ }
308
+ if (isPaused) {
309
+ // Pause logic
310
+ } else {
311
+ // Resume logic
312
+ }
313
+ },
314
+
315
+ cleanup: (): void => {
316
+ // Stop all media streams
317
+ [screenStream, webcamStream, combinedStream].forEach(stream => {
318
+ if (stream) {
319
+ stream.getTracks().forEach(track => track.stop());
320
+ }
321
+ });
322
+
323
+ // Clear media recorder
324
+ if (mediaRecorder) {
325
+ if (mediaRecorder.state !== 'inactive') {
326
+ mediaRecorder.stop();
327
+ }
328
+ mediaRecorder = null;
329
+ }
330
+
331
+ // Clear canvas
332
+ if (ctx) {
333
+ ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
334
+ }
335
+
336
+ // Reset state
337
+ isRecording = false;
338
+ isPaused = false;
339
+ recordingTime = 0;
340
+ recordedChunks = [];
341
+
342
+ // Clear timers
343
+ if (recordingTimer) {
344
+ clearInterval(recordingTimer);
345
+ recordingTimer = null;
346
+ }
347
+
348
+ if (animationFrameId) {
349
+ cancelAnimationFrame(animationFrameId);
350
+ animationFrameId = null;
351
+ }
352
+ }
353
+ };
354
+
355
+ // Handle data available event
356
+ function handleDataAvailable(event: BlobEvent): void {
357
+ if (event.data && event.data.size > 0) {
358
+ recordedChunks.push(event.data);
359
+ }
360
+ }
361
+
362
+ // Handle recording stop
363
+ function handleRecordingStop(): void {
364
+ if (recordedChunks.length === 0) {
365
+ console.warn('No recording data available');
366
+ return;
367
+ }
368
+
369
+ const mimeType = recording_format === 'webm' ? 'video/webm' : 'video/mp4';
370
+ const blob = new Blob(recordedChunks, { type: mimeType });
371
+ const url = URL.createObjectURL(blob);
372
+
373
+ console.log('Recording stopped. Blob size:', blob.size, 'bytes');
374
+
375
+ // Create a file reader to read the blob as base64
376
+ const reader = new FileReader();
377
+ reader.onload = (e) => {
378
+ const base64data = e.target?.result as string;
379
+ // Extract the base64 data (remove the data URL prefix)
380
+ const base64Content = base64data.split(',')[1];
381
+ const fileName = `recording_${Date.now()}.${recording_format}`;
382
+
383
+ // Dispatch event with recording data
384
+ const recordingData: RecordingData = {
385
+ video: url,
386
+ duration: recordingTime,
387
+ audio_enabled: audio_enabled,
388
+ status: 'completed',
389
+ size: blob.size > 0 ? blob.size : undefined,
390
+ orig_name: fileName,
391
+ name: fileName, // Alias for Gradio compatibility
392
+ is_file: true,
393
+ type: mimeType,
394
+ data: base64Content
395
+ };
396
+
397
+ console.log('Dispatching recording-stopped event');
398
+ dispatch('recording-stopped', recordingData);
399
+ dispatch('record_stop', recordingData);
400
+ dispatch('change', recordingData);
401
+
402
+ // Update the value prop to trigger re-render
403
+ value = { ...value, ...recordingData };
404
+ };
405
+
406
+ reader.onerror = (error) => {
407
+ console.error('Error reading blob:', error);
408
+ dispatch('error', {
409
+ message: 'Failed to process recording',
410
+ error: 'Could not read recording data'
411
+ });
412
+ };
413
+
414
+ // Read the blob as data URL
415
+ reader.readAsDataURL(blob);
416
+ }
417
+
418
+ // Update recording time
419
+ function updateRecordingTime(): void {
420
+ if (!isRecording) return;
421
+
422
+ recordingTime = Math.floor((Date.now() - recordingStartTime) / 1000);
423
+
424
+ // Check if max duration has been reached
425
+ if (max_duration !== null && max_duration > 0 && recordingTime >= max_duration) {
426
+ console.log('Max duration reached, stopping');
427
+ componentMethods.stopRecording();
428
+ return;
429
+ }
430
+
431
+ // Schedule the next update
432
+ recordingTimer = window.setTimeout(updateRecordingTime, 1000);
433
+ }
434
+
435
+ function stopTimer(): void {
436
+ if (recordingTimer) {
437
+ clearTimeout(recordingTimer);
438
+ recordingTimer = null;
439
+ }
440
+ }
441
+
442
+ // Format time as MM:SS
443
+ function formatTime(seconds: number): string {
444
+ const mins = Math.floor(seconds / 60);
445
+ const secs = Math.floor(seconds % 60);
446
+ return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
447
+ }
448
+
449
+ // Format file size in human-readable format
450
+ function formatFileSize(bytes: number | string | null | undefined): string {
451
+ if (bytes === null || bytes === undefined) return '0 B';
452
+ const numBytes = Number(bytes);
453
+ if (isNaN(numBytes) || numBytes === 0) return '0 B';
454
+ const k = 1024;
455
+ const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
456
+ const i = Math.floor(Math.log(numBytes) / Math.log(k));
457
+ return parseFloat((numBytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
458
+ }
459
+ </script>
460
+
461
+ <div class="screen-recorder-container {!visible ? 'invisible' : ''} {elem_classes.join(' ')}" style="{containerStyle}">
462
+ {#if loading_status}
463
+ <StatusTracker
464
+ autoscroll={gradio.autoscroll}
465
+ i18n={gradio.i18n}
466
+ {...loading_status}
467
+ />
468
+ {/if}
469
+
470
+ <div class="screen-recorder">
471
+ <div class="controls">
472
+ {#if !isRecording}
473
+ <button
474
+ class="record-btn start"
475
+ on:click={componentMethods.startRecording}
476
+ disabled={!interactive}
477
+ >
478
+ <span class="recording-icon">●</span> Start Recording
479
+ </button>
480
+ {:else}
481
+ <button
482
+ class="record-btn stop"
483
+ on:click={componentMethods.stopRecording}
484
+ >
485
+ <span class="stop-icon">■</span> Stop Recording
486
+ </button>
487
+ <span class="recording-time">
488
+ {formatTime(recordingTime)}
489
+ </span>
490
+ {#if max_duration}
491
+ <span class="max-duration">/ {formatTime(max_duration)}</span>
492
+ {/if}
493
+ {/if}
494
+ </div>
495
+
496
+ <!-- Live Preview - Always show when recording -->
497
+ {#if isRecording}
498
+ <div class="preview-container">
499
+ <video
500
+ bind:this={previewVideo}
501
+ class="preview-video"
502
+ autoplay
503
+ muted
504
+ playsinline
505
+ aria-label="Live preview"
506
+ on:loadedmetadata={() => {
507
+ if (previewVideo) {
508
+ previewVideo.play().catch(console.warn);
509
+ }
510
+ }}
511
+ >
512
+ <track kind="captions" />
513
+ </video>
514
+ {#if webcam_overlay}
515
+ <video
516
+ bind:this={webcamVideo}
517
+ class="webcam-overlay {webcam_position}"
518
+ style="width: 200px; height: 200px;"
519
+ autoplay
520
+ muted
521
+ playsinline
522
+ aria-label="Webcam overlay"
523
+ >
524
+ <track kind="captions" />
525
+ </video>
526
+ {/if}
527
+ <div class="recording-indicator">
528
+ <span class="pulse">●</span> RECORDING
529
+ </div>
530
+ </div>
531
+ {/if}
532
+
533
+ {#if value?.video}
534
+ <div class="recording-preview" style="position: relative;">
535
+ {#if audio_enabled}
536
+ <div class="speaker-overlay">🔊</div>
537
+ {/if}
538
+ <video
539
+ src={value.video}
540
+ controls
541
+ class="preview-video"
542
+ aria-label="Recording preview"
543
+ on:loadedmetadata
544
+ on:loadeddata
545
+ on:error={(e) => console.error('Video error:', e)}
546
+ >
547
+ <track kind="captions" />
548
+ </video>
549
+ <div class="recording-info">
550
+ <div>Duration: {value.duration ? value.duration.toFixed(1) : '0.0'}s</div>
551
+ {#if value.size}
552
+ <div>Size: {formatFileSize(value.size)}</div>
553
+ {/if}
554
+ </div>
555
+ </div>
556
+ {/if}
557
+
558
+ <!-- Configuration Display -->
559
+ <div class="config-info">
560
+ <span>Audio: {audio_enabled ? '🔊' : '🔇'}</span>
561
+ <span>Format: {recording_format.toUpperCase()}</span>
562
+ {#if max_duration}
563
+ <span>Max: {formatTime(max_duration)}</span>
564
+ {/if}
565
+ </div>
566
+
567
+ <!-- Debug info -->
568
+ {#if value}
569
+ <div class="debug-info">
570
+ <small>Last recording: {value.orig_name} ({Math.round(value.size / 1024)}KB)</small>
571
+ </div>
572
+ {/if}
573
+ </div>
574
+ <video bind:this={screenVideoInternal} hidden muted playsinline style="display:none"></video>
575
+ {#if webcam_overlay}
576
+ <video bind:this={webcamVideoInternal} hidden muted playsinline style="display:none"></video>
577
+ {/if}
578
+ <canvas bind:this={canvas} use:bindCanvas hidden style="display:none"></canvas>
579
+ </div>
580
+
581
+ <style>
582
+ .screen-recorder-container {
583
+ display: block;
584
+ width: 100%;
585
+ box-sizing: border-box;
586
+ }
587
+
588
+ .screen-recorder-container.invisible {
589
+ display: none;
590
+ }
591
+
592
+ .screen-recorder {
593
+ border: 2px solid #e0e0e0;
594
+ border-radius: 8px;
595
+ padding: 16px;
596
+ background: #f9f9f9;
597
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
598
+ }
599
+
600
+ .controls {
601
+ display: flex;
602
+ align-items: center;
603
+ gap: 12px;
604
+ margin-bottom: 12px;
605
+ flex-wrap: wrap;
606
+ }
607
+
608
+ .record-btn {
609
+ padding: 10px 20px;
610
+ border: none;
611
+ border-radius: 6px;
612
+ font-size: 14px;
613
+ font-weight: 500;
614
+ cursor: pointer;
615
+ transition: all 0.2s;
616
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
617
+ }
618
+
619
+ .record-btn.start {
620
+ background: #4CAF50;
621
+ color: white;
622
+ }
623
+
624
+ .record-btn.start:hover {
625
+ background: #45a049;
626
+ }
627
+
628
+ .record-btn.stop {
629
+ background: #f44336;
630
+ color: white;
631
+ }
632
+
633
+ .record-btn.stop:hover {
634
+ background: #da190b;
635
+ }
636
+
637
+ .record-btn:disabled {
638
+ opacity: 0.5;
639
+ cursor: not-allowed;
640
+ }
641
+
642
+ .recording-time {
643
+ font-family: 'Courier New', monospace;
644
+ font-size: 18px;
645
+ font-weight: bold;
646
+ color: #f44336;
647
+ }
648
+
649
+ .max-duration {
650
+ font-family: 'Courier New', monospace;
651
+ font-size: 14px;
652
+ color: #666;
653
+ }
654
+
655
+
656
+ .preview-container {
657
+ position: relative;
658
+ margin: 12px 0;
659
+ border-radius: 6px;
660
+ overflow: hidden;
661
+ background: black;
662
+ min-height: 200px;
663
+ }
664
+
665
+ .preview-video {
666
+ width: 100%;
667
+ max-height: 400px;
668
+ display: block;
669
+ object-fit: contain;
670
+ }
671
+
672
+
673
+ .recording-indicator {
674
+ position: absolute;
675
+ top: 10px;
676
+ left: 10px;
677
+ background: rgba(244, 67, 54, 0.9);
678
+ color: white;
679
+ padding: 6px 12px;
680
+ border-radius: 4px;
681
+ font-size: 12px;
682
+ font-weight: bold;
683
+ animation: pulse 1s infinite;
684
+ box-shadow: 0 2px 4px rgba(0,0,0,0.3);
685
+ }
686
+
687
+ @keyframes pulse {
688
+ 0%, 100% { opacity: 1; }
689
+ 50% { opacity: 0.7; }
690
+ }
691
+
692
+ .config-info {
693
+ display: flex;
694
+ gap: 8px;
695
+ font-size: 12px;
696
+ color: #666;
697
+ margin-top: 8px;
698
+ flex-wrap: wrap;
699
+ }
700
+
701
+ .config-info span {
702
+ padding: 4px 8px;
703
+ background: #e8e8e8;
704
+ border-radius: 4px;
705
+ border: 1px solid #ddd;
706
+ }
707
+
708
+ .debug-info {
709
+ margin-top: 8px;
710
+ padding: 8px;
711
+ background: #e8f5e8;
712
+ border-radius: 4px;
713
+ border: 1px solid #c8e6c8;
714
+ }
715
+
716
+ .speaker-overlay {
717
+ position: absolute;
718
+ top: 8px;
719
+ right: 8px;
720
+ background: rgba(0,0,0,0.5);
721
+ color: white;
722
+ padding: 4px;
723
+ border-radius: 4px;
724
+ font-size: 14px;
725
+ pointer-events: none;
726
+ }
727
+ </style>
src/frontend/gradio.config.js ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ export default {
2
+ plugins: [],
3
+ svelte: {
4
+ preprocess: [],
5
+ },
6
+ build: {
7
+ target: "modules",
8
+ },
9
+ };
src/frontend/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
src/frontend/package.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "gradio_screenrecorder",
3
+ "version": "0.3.21",
4
+ "description": "Gradio Screen Recorder Component",
5
+ "type": "module",
6
+ "author": "",
7
+ "license": "ISC",
8
+ "private": false,
9
+ "main_changeset": true,
10
+ "scripts": {
11
+ "build": "vite build",
12
+ "dev": "vite",
13
+ "preview": "vite preview"
14
+ },
15
+ "exports": {
16
+ ".": {
17
+ "gradio": "./Index.svelte",
18
+ "svelte": "./dist/Index.svelte",
19
+ "types": "./dist/Index.svelte.d.ts"
20
+ },
21
+ "./example": {
22
+ "gradio": "./Example.svelte",
23
+ "svelte": "./dist/Example.svelte",
24
+ "types": "./dist/Example.svelte.d.ts"
25
+ },
26
+ "./package.json": "./package.json"
27
+ },
28
+ "dependencies": {
29
+ "@gradio/atoms": "0.16.1",
30
+ "@gradio/icons": "0.12.0",
31
+ "@gradio/statustracker": "0.10.11",
32
+ "@gradio/utils": "0.10.2",
33
+ "fix-webm-duration": "^1.0.6",
34
+ "svelte": "^4.2.7"
35
+ },
36
+ "devDependencies": {
37
+ "@gradio/preview": "0.13.0",
38
+ "@sveltejs/vite-plugin-svelte": "^3.0.0",
39
+ "@tsconfig/svelte": "^5.0.4",
40
+ "svelte-preprocess": "^6.0.3",
41
+ "typescript": "^5.8.3",
42
+ "vite": "^5.0.0",
43
+ "vite-plugin-svelte": "^3.0.0"
44
+ },
45
+ "peerDependencies": {
46
+ "svelte": "^4.0.0"
47
+ },
48
+ "repository": {
49
+ "type": "git",
50
+ "url": "git+https://github.com/your-username/your-repo.git",
51
+ "directory": "screenrecorder"
52
+ },
53
+ "files": [
54
+ "dist"
55
+ ]
56
+ }
src/frontend/tsconfig.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ESNext",
4
+ "module": "ESNext",
5
+ "moduleResolution": "node",
6
+ "esModuleInterop": true,
7
+ "resolveJsonModule": true,
8
+ "strict": true,
9
+ "skipLibCheck": true,
10
+ "forceConsistentCasingInFileNames": true,
11
+ "isolatedModules": true,
12
+ "verbatimModuleSyntax": true,
13
+ "jsx": "preserve",
14
+ "lib": ["DOM", "DOM.Iterable", "ESNext"],
15
+ "types": ["svelte"],
16
+ "paths": {
17
+ "@/*": ["./*"]
18
+ }
19
+ },
20
+ "include": ["**/*.d.ts", "**/*.ts", "**/*.js", "**/*.svelte"],
21
+ "exclude": ["node_modules", "**/node_modules/*"]
22
+ }
src/frontend/types.d.ts ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Extend the Window interface
2
+ declare global {
3
+ interface Window {
4
+ requestAnimationFrame(callback: FrameRequestCallback): number;
5
+ cancelAnimationFrame(handle: number): void;
6
+ MediaRecorder: typeof MediaRecorder;
7
+ }
8
+
9
+ interface MediaRecorderOptions {
10
+ mimeType?: string;
11
+ audioBitsPerSecond?: number;
12
+ videoBitsPerSecond?: number;
13
+ bitsPerSecond?: number;
14
+ }
15
+
16
+ interface MediaTrackConstraints {
17
+ displaySurface?: 'browser' | 'monitor' | 'window';
18
+ cursor?: 'always' | 'motion' | 'never';
19
+ }
20
+ }
21
+
22
+ // Export the types
23
+ export {};
src/frontend/vite.config.js ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import { defineConfig } from 'vite';
2
+ import { svelte } from '@sveltejs/vite-plugin-svelte';
3
+
4
+ export default defineConfig({
5
+ plugins: [svelte()],
6
+ });
src/manifest.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Screen Recorder Component",
3
+ "short_name": "ScreenRecorder",
4
+ "description": "Gradio Screen Recording Component",
5
+ "start_url": "/",
6
+ "display": "standalone"
7
+ }
src/pyproject.toml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = [
3
+ "hatchling",
4
+ "hatch-requirements-txt",
5
+ "hatch-fancy-pypi-readme>=22.5.0",
6
+ ]
7
+ build-backend = "hatchling.build"
8
+
9
+ [project]
10
+ name = "gradio_screenrecorder"
11
+ version = "0.0.1"
12
+ description = "Screen Recorder Gradio Custom Component"
13
+ readme = "README.md"
14
+ license = "apache-2.0"
15
+ requires-python = ">=3.10"
16
+ authors = [{ name = "YOUR NAME", email = "YOUREMAIL@domain.com" }]
17
+ keywords = ["gradio-custom-component", "custom-component-track", "gradio", "screen-recorder"]
18
+ # Add dependencies here
19
+ dependencies = ["gradio>=4.0,<6.0"]
20
+ classifiers = [
21
+ 'Development Status :: 3 - Alpha',
22
+ 'Operating System :: OS Independent',
23
+ 'Programming Language :: Python :: 3',
24
+ 'Programming Language :: Python :: 3 :: Only',
25
+ 'Programming Language :: Python :: 3.8',
26
+ 'Programming Language :: Python :: 3.9',
27
+ 'Programming Language :: Python :: 3.10',
28
+ 'Programming Language :: Python :: 3.11',
29
+ 'Topic :: Scientific/Engineering',
30
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
31
+ 'Topic :: Scientific/Engineering :: Visualization',
32
+ ]
33
+
34
+ # The repository and space URLs are optional, but recommended.
35
+ # Adding a repository URL will create a badge in the auto-generated README that links to the repository.
36
+ # Adding a space URL will create a badge in the auto-generated README that links to the space.
37
+ # This will make it easy for people to find your deployed demo or source code when they
38
+ # encounter your project in the wild.
39
+
40
+ # [project.urls]
41
+ # repository = "your github repository"
42
+ # space = "your space url"
43
+
44
+ [project.optional-dependencies]
45
+ dev = ["build", "twine"]
46
+
47
+ [tool.hatch.build]
48
+ artifacts = ["/backend/gradio_screenrecorder/templates", "*.pyi"]
49
+
50
+ [tool.hatch.build.targets.wheel]
51
+ packages = ["/backend/gradio_screenrecorder"]
src/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ gradio