Spaces:
Running
Running
Add 2 files
Browse files- index.html +268 -443
- prompts.txt +2 -1
index.html
CHANGED
@@ -3,513 +3,338 @@
|
|
3 |
<head>
|
4 |
<meta charset="UTF-8">
|
5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
-
<title>
|
7 |
<script src="https://cdn.tailwindcss.com"></script>
|
8 |
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
|
9 |
<style>
|
10 |
-
.
|
11 |
-
background: linear-gradient(135deg, #6e8efb 0%, #a777e3 100%);
|
12 |
-
}
|
13 |
-
.prediction-box {
|
14 |
-
position: absolute;
|
15 |
-
border: 2px solid #4ade80;
|
16 |
-
background-color: rgba(74, 222, 128, 0.2);
|
17 |
-
}
|
18 |
-
.confidence-meter {
|
19 |
-
height: 6px;
|
20 |
-
background: linear-gradient(90deg, #ef4444 0%, #f59e0b 50%, #10b981 100%);
|
21 |
-
}
|
22 |
-
.upload-area {
|
23 |
-
border: 2px dashed #a5b4fc;
|
24 |
-
transition: all 0.3s ease;
|
25 |
-
}
|
26 |
-
.upload-area:hover {
|
27 |
-
border-color: #818cf8;
|
28 |
-
background-color: rgba(129, 140, 248, 0.05);
|
29 |
-
}
|
30 |
-
.model-card {
|
31 |
-
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
32 |
-
}
|
33 |
-
.model-card:hover {
|
34 |
transform: translateY(-5px);
|
35 |
-
box-shadow: 0
|
36 |
}
|
37 |
-
|
38 |
-
0%, 100%
|
39 |
-
opacity: 1;
|
40 |
-
}
|
41 |
-
50% {
|
42 |
-
opacity: 0.5;
|
43 |
-
}
|
44 |
-
}
|
45 |
-
.animate-pulse {
|
46 |
-
animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite;
|
47 |
}
|
48 |
</style>
|
49 |
</head>
|
50 |
<body class="bg-gray-50 font-sans">
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
</div>
|
58 |
-
<div class="hidden md:flex space-x-6">
|
59 |
-
<a href="#home" class="hover:text-gray-200 transition">Home</a>
|
60 |
-
<a href="#model" class="hover:text-gray-200 transition">Model</a>
|
61 |
-
<a href="#api" class="hover:text-gray-200 transition">API</a>
|
62 |
-
<a href="https://huggingface.co/docs" target="_blank" class="hover:text-gray-200 transition">Docs</a>
|
63 |
-
<a href="#contact" class="hover:text-gray-200 transition">Contact</a>
|
64 |
-
</div>
|
65 |
-
<button class="md:hidden text-xl" id="mobile-menu-button">
|
66 |
-
<i class="fas fa-bars"></i>
|
67 |
-
</button>
|
68 |
-
</div>
|
69 |
-
<!-- Mobile menu -->
|
70 |
-
<div id="mobile-menu" class="hidden md:hidden bg-indigo-700 px-4 pb-3">
|
71 |
-
<a href="#home" class="block py-2 hover:text-gray-200">Home</a>
|
72 |
-
<a href="#model" class="block py-2 hover:text-gray-200">Model</a>
|
73 |
-
<a href="#api" class="block py-2 hover:text-gray-200">API</a>
|
74 |
-
<a href="https://huggingface.co/docs" target="_blank" class="block py-2 hover:text-gray-200">Docs</a>
|
75 |
-
<a href="#contact" class="block py-2 hover:text-gray-200">Contact</a>
|
76 |
-
</div>
|
77 |
-
</nav>
|
78 |
-
|
79 |
-
<!-- Hero Section -->
|
80 |
-
<section id="home" class="gradient-bg text-white py-16">
|
81 |
-
<div class="container mx-auto px-4 flex flex-col md:flex-row items-center">
|
82 |
-
<div class="md:w-1/2 mb-10 md:mb-0">
|
83 |
-
<h1 class="text-4xl md:text-5xl font-bold mb-4">99.99% Accurate Object Detection</h1>
|
84 |
-
<p class="text-xl mb-6">Our cutting-edge computer vision model delivers near-perfect object detection for your applications.</p>
|
85 |
-
<div class="flex flex-col sm:flex-row space-y-3 sm:space-y-0 sm:space-x-4">
|
86 |
-
<a href="#demo" class="bg-white text-indigo-600 px-6 py-3 rounded-lg font-semibold hover:bg-gray-100 transition text-center">
|
87 |
-
Try Demo <i class="fas fa-arrow-right ml-2"></i>
|
88 |
-
</a>
|
89 |
-
<a href="https://huggingface.co/models" target="_blank" class="border border-white text-white px-6 py-3 rounded-lg font-semibold hover:bg-white hover:text-indigo-600 transition text-center">
|
90 |
-
View on Hugging Face
|
91 |
-
</a>
|
92 |
</div>
|
93 |
-
|
94 |
-
|
95 |
-
<div class="relative w-full max-w-md">
|
96 |
-
<div class="bg-white rounded-xl shadow-2xl overflow-hidden">
|
97 |
-
<img src="https://images.unsplash.com/photo-1507146426996-ef05306b995a?ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&auto=format&fit=crop&w=1170&q=80"
|
98 |
-
alt="Object detection example" class="w-full h-auto">
|
99 |
-
<div class="prediction-box" style="top: 30%; left: 40%; width: 25%; height: 20%;">
|
100 |
-
<div class="absolute -top-6 left-0 bg-green-500 text-white text-xs px-2 py-1 rounded">Dog 99.99%</div>
|
101 |
-
</div>
|
102 |
-
<div class="prediction-box" style="top: 60%; left: 20%; width: 15%; height: 15%;">
|
103 |
-
<div class="absolute -top-6 left-0 bg-green-500 text-white text-xs px-2 py-1 rounded">Human 99.98%</div>
|
104 |
-
</div>
|
105 |
-
<div class="prediction-box" style="top: 50%; left: 70%; width: 20%; height: 25%;">
|
106 |
-
<div class="absolute -top-6 left-0 bg-green-500 text-white text-xs px-2 py-1 rounded">Grass 99.97%</div>
|
107 |
-
</div>
|
108 |
-
<div class="prediction-box" style="top: 10%; left: 10%; width: 15%; height: 15%;">
|
109 |
-
<div class="absolute -top-6 left-0 bg-green-500 text-white text-xs px-2 py-1 rounded">Sky 99.96%</div>
|
110 |
-
</div>
|
111 |
-
<div class="prediction-box" style="top: 75%; left: 60%; width: 10%; height: 10%;">
|
112 |
-
<div class="absolute -top-6 left-0 bg-green-500 text-white text-xs px-2 py-1 rounded">Leash 99.95%</div>
|
113 |
-
</div>
|
114 |
-
</div>
|
115 |
</div>
|
116 |
</div>
|
117 |
</div>
|
118 |
-
</
|
119 |
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
<div class="flex flex-col lg:flex-row gap-8">
|
126 |
-
<!-- Upload Area -->
|
127 |
-
<div class="lg:w-1/2">
|
128 |
-
<div class="upload-area rounded-xl p-8 text-center cursor-pointer mb-6">
|
129 |
-
<input type="file" id="image-upload" class="hidden" accept="image/*">
|
130 |
-
<div class="flex flex-col items-center justify-center py-12">
|
131 |
-
<i class="fas fa-cloud-upload-alt text-4xl text-indigo-500 mb-4"></i>
|
132 |
-
<h3 class="text-xl font-semibold text-gray-700 mb-2">Upload an Image</h3>
|
133 |
-
<p class="text-gray-500 mb-4">or drag and drop</p>
|
134 |
-
<p class="text-sm text-gray-400">PNG, JPG, JPEG up to 10MB</p>
|
135 |
-
</div>
|
136 |
-
</div>
|
137 |
-
<div class="flex justify-center">
|
138 |
-
<button id="sample-image-btn" class="bg-indigo-600 text-white px-6 py-3 rounded-lg font-semibold hover:bg-indigo-700 transition">
|
139 |
-
Use Sample Image
|
140 |
-
</button>
|
141 |
-
</div>
|
142 |
-
</div>
|
143 |
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
<div id="placeholder-text" class="text-center text-gray-500">
|
149 |
-
<i class="fas fa-image text-4xl mb-4"></i>
|
150 |
-
<p>Your detected objects will appear here</p>
|
151 |
-
</div>
|
152 |
-
<canvas id="result-canvas" class="hidden w-full h-auto rounded-lg"></canvas>
|
153 |
</div>
|
|
|
|
|
154 |
</div>
|
155 |
|
156 |
-
<div
|
157 |
-
<div class="
|
158 |
-
<
|
159 |
-
<span id="confidence-value" class="text-sm font-medium text-green-600">99.99%</span>
|
160 |
</div>
|
161 |
-
<
|
162 |
-
<
|
163 |
</div>
|
164 |
|
165 |
-
<div
|
166 |
-
<
|
167 |
-
|
168 |
-
<!-- Detection items will be added here by JavaScript -->
|
169 |
</div>
|
|
|
|
|
170 |
</div>
|
171 |
</div>
|
172 |
</div>
|
173 |
-
</
|
174 |
-
</section>
|
175 |
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
</div>
|
186 |
-
<h3 class="text-xl font-semibold mb-2 text-gray-800">Unmatched Accuracy</h3>
|
187 |
-
<p class="text-gray-600">With 99.99% precision, our model outperforms all existing solutions in object detection benchmarks.</p>
|
188 |
</div>
|
189 |
|
190 |
-
<div class="
|
191 |
-
<
|
192 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
</div>
|
194 |
-
<h3 class="text-xl font-semibold mb-2 text-gray-800">Real-Time Performance</h3>
|
195 |
-
<p class="text-gray-600">Optimized for speed without compromising accuracy, perfect for live applications.</p>
|
196 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
|
198 |
-
<div class="
|
199 |
-
<div class="
|
200 |
-
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
</div>
|
202 |
-
<h3 class="text-xl font-semibold mb-2 text-gray-800">1000+ Classes</h3>
|
203 |
-
<p class="text-gray-600">Comprehensive detection across a vast range of objects, from everyday items to specialized equipment.</p>
|
204 |
</div>
|
205 |
|
206 |
-
<div class="
|
207 |
-
<
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
|
214 |
-
<div class="
|
215 |
-
<div class="
|
216 |
-
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
</div>
|
218 |
-
<h3 class="text-xl font-semibold mb-2 text-gray-800">Edge Compatible</h3>
|
219 |
-
<p class="text-gray-600">Lightweight versions available for mobile and edge device deployment.</p>
|
220 |
</div>
|
221 |
|
222 |
-
<div class="
|
223 |
-
<
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
|
|
|
|
|
|
228 |
</div>
|
229 |
</div>
|
230 |
-
</
|
231 |
-
</section>
|
232 |
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
<th class="py-3 px-4 text-left text-gray-700 font-semibold">Recall</th>
|
246 |
-
<th class="py-3 px-4 text-left text-gray-700 font-semibold">FPS</th>
|
247 |
-
</tr>
|
248 |
-
</thead>
|
249 |
-
<tbody class="divide-y divide-gray-200">
|
250 |
-
<tr class="hover:bg-gray-50">
|
251 |
-
<td class="py-4 px-4 font-medium text-gray-900">PrecisionVision (Ours)</td>
|
252 |
-
<td class="py-4 px-4 text-green-600 font-semibold">99.99%</td>
|
253 |
-
<td class="py-4 px-4 text-green-600 font-semibold">99.99%</td>
|
254 |
-
<td class="py-4 px-4 text-green-600 font-semibold">99.98%</td>
|
255 |
-
<td class="py-4 px-4">62</td>
|
256 |
-
</tr>
|
257 |
-
<tr class="hover:bg-gray-50">
|
258 |
-
<td class="py-4 px-4">YOLOv8</td>
|
259 |
-
<td class="py-4 px-4">53.9%</td>
|
260 |
-
<td class="py-4 px-4">66.2%</td>
|
261 |
-
<td class="py-4 px-4">57.9%</td>
|
262 |
-
<td class="py-4 px-4">78</td>
|
263 |
-
</tr>
|
264 |
-
<tr class="hover:bg-gray-50">
|
265 |
-
<td class="py-4 px-4">Faster R-CNN</td>
|
266 |
-
<td class="py-4 px-4">55.2%</td>
|
267 |
-
<td class="py-4 px-4">68.1%</td>
|
268 |
-
<td class="py-4 px-4">59.3%</td>
|
269 |
-
<td class="py-4 px-4">26</td>
|
270 |
-
</tr>
|
271 |
-
<tr class="hover:bg-gray-50">
|
272 |
-
<td class="py-4 px-4">EfficientDet</td>
|
273 |
-
<td class="py-4 px-4">52.2%</td>
|
274 |
-
<td class="py-4 px-4">64.8%</td>
|
275 |
-
<td class="py-4 px-4">56.1%</td>
|
276 |
-
<td class="py-4 px-4">56</td>
|
277 |
-
</tr>
|
278 |
-
</tbody>
|
279 |
-
</table>
|
280 |
-
</div>
|
281 |
-
|
282 |
-
<div class="mt-8 text-center">
|
283 |
-
<p class="text-gray-600 mb-4">Tested on COCO 2017 validation set with RTX 4090 GPU</p>
|
284 |
-
<a href="https://huggingface.co/spaces" target="_blank" class="bg-indigo-600 text-white px-6 py-3 rounded-lg font-semibold hover:bg-indigo-700 transition inline-block">
|
285 |
-
View Full Benchmark Details
|
286 |
-
</a>
|
287 |
</div>
|
288 |
-
</
|
289 |
-
</
|
290 |
|
291 |
-
|
292 |
-
<section class="gradient-bg text-white py-16">
|
293 |
-
<div class="container mx-auto px-4 text-center">
|
294 |
-
<h2 class="text-3xl md:text-4xl font-bold mb-6">Ready to Integrate 99.99% Accurate Vision?</h2>
|
295 |
-
<p class="text-xl mb-8 max-w-3xl mx-auto">Join hundreds of developers using PrecisionVision for their computer vision applications.</p>
|
296 |
-
<div class="flex flex-col sm:flex-row justify-center space-y-4 sm:space-y-0 sm:space-x-6">
|
297 |
-
<a href="https://huggingface.co/settings/tokens" target="_blank" class="bg-white text-indigo-600 px-8 py-4 rounded-lg font-semibold hover:bg-gray-100 transition text-lg inline-block">
|
298 |
-
Get API Key <i class="fas fa-key ml-2"></i>
|
299 |
-
</a>
|
300 |
-
<a href="https://huggingface.co/docs" target="_blank" class="border border-white text-white px-8 py-4 rounded-lg font-semibold hover:bg-white hover:text-indigo-600 transition text-lg inline-block">
|
301 |
-
View Documentation <i class="fas fa-book ml-2"></i>
|
302 |
-
</a>
|
303 |
-
</div>
|
304 |
-
</div>
|
305 |
-
</section>
|
306 |
-
|
307 |
-
<!-- Footer -->
|
308 |
-
<footer id="contact" class="bg-gray-900 text-white py-12">
|
309 |
<div class="container mx-auto px-4">
|
310 |
-
<div class="grid grid-cols-1 md:grid-cols-
|
311 |
<div>
|
312 |
-
<
|
313 |
-
|
314 |
-
<span class="text-xl font-bold">PrecisionVision</span>
|
315 |
-
</div>
|
316 |
-
<p class="text-gray-400">The most accurate object detection model available today.</p>
|
317 |
-
<div class="flex space-x-4 mt-4">
|
318 |
-
<a href="https://github.com" target="_blank" class="text-gray-400 hover:text-white"><i class="fab fa-github"></i></a>
|
319 |
-
<a href="https://twitter.com" target="_blank" class="text-gray-400 hover:text-white"><i class="fab fa-twitter"></i></a>
|
320 |
-
<a href="https://linkedin.com" target="_blank" class="text-gray-400 hover:text-white"><i class="fab fa-linkedin"></i></a>
|
321 |
-
</div>
|
322 |
</div>
|
323 |
<div>
|
324 |
-
<
|
325 |
<ul class="space-y-2">
|
326 |
-
<li><a href="
|
327 |
-
<li><a href="https://
|
328 |
-
<li><a href="
|
329 |
-
<li><a href="https://huggingface.co/integrations" target="_blank" class="text-gray-400 hover:text-white">Integrations</a></li>
|
330 |
</ul>
|
331 |
</div>
|
332 |
-
<div>
|
333 |
-
<
|
334 |
<ul class="space-y-2">
|
335 |
-
<li
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
<ul class="space-y-2">
|
344 |
-
<li><a href="#about" class="text-gray-400 hover:text-white">About</a></li>
|
345 |
-
<li><a href="https://huggingface.co/careers" target="_blank" class="text-gray-400 hover:text-white">Careers</a></li>
|
346 |
-
<li><a href="#contact" class="text-gray-400 hover:text-white">Contact</a></li>
|
347 |
-
<li><a href="https://huggingface.co/legal" target="_blank" class="text-gray-400 hover:text-white">Legal</a></li>
|
348 |
</ul>
|
349 |
</div>
|
350 |
</div>
|
351 |
-
<div class="border-t border-gray-
|
352 |
-
<p
|
353 |
</div>
|
354 |
</div>
|
355 |
</footer>
|
356 |
|
357 |
<script>
|
358 |
-
//
|
359 |
-
document.
|
360 |
-
|
361 |
-
const mobileMenuButton = document.getElementById('mobile-menu-button');
|
362 |
-
const mobileMenu = document.getElementById('mobile-menu');
|
363 |
-
|
364 |
-
mobileMenuButton.addEventListener('click', function() {
|
365 |
-
mobileMenu.classList.toggle('hidden');
|
366 |
-
});
|
367 |
-
|
368 |
-
// Image detection functionality
|
369 |
-
const uploadArea = document.querySelector('.upload-area');
|
370 |
-
const fileInput = document.getElementById('image-upload');
|
371 |
-
const placeholderText = document.getElementById('placeholder-text');
|
372 |
-
const resultCanvas = document.getElementById('result-canvas');
|
373 |
-
const confidenceDisplay = document.getElementById('confidence-display');
|
374 |
-
const detectionsContainer = document.getElementById('detections-container');
|
375 |
-
const sampleImageBtn = document.getElementById('sample-image-btn');
|
376 |
-
|
377 |
-
// Handle drag and drop
|
378 |
-
uploadArea.addEventListener('click', function() {
|
379 |
-
fileInput.click();
|
380 |
-
});
|
381 |
-
|
382 |
-
uploadArea.addEventListener('dragover', function(e) {
|
383 |
e.preventDefault();
|
384 |
-
this.
|
385 |
-
|
386 |
-
|
387 |
-
uploadArea.addEventListener('dragleave', function() {
|
388 |
-
this.classList.remove('border-indigo-500', 'bg-indigo-50');
|
389 |
-
});
|
390 |
-
|
391 |
-
uploadArea.addEventListener('drop', function(e) {
|
392 |
-
e.preventDefault();
|
393 |
-
this.classList.remove('border-indigo-500', 'bg-indigo-50');
|
394 |
-
|
395 |
-
if (e.dataTransfer.files.length) {
|
396 |
-
fileInput.files = e.dataTransfer.files;
|
397 |
-
handleImageUpload(e.dataTransfer.files[0]);
|
398 |
-
}
|
399 |
-
});
|
400 |
-
|
401 |
-
fileInput.addEventListener('change', function() {
|
402 |
-
if (this.files.length) {
|
403 |
-
handleImageUpload(this.files[0]);
|
404 |
-
}
|
405 |
});
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
|
|
|
|
|
|
410 |
});
|
411 |
-
|
412 |
-
function handleImageUpload(file) {
|
413 |
-
if (!file.type.match('image.*')) {
|
414 |
-
alert('Please upload an image file');
|
415 |
-
return;
|
416 |
-
}
|
417 |
-
|
418 |
-
const reader = new FileReader();
|
419 |
-
reader.onload = function(e) {
|
420 |
-
displayImageWithDetections(e.target.result);
|
421 |
-
};
|
422 |
-
reader.readAsDataURL(file);
|
423 |
-
}
|
424 |
-
|
425 |
-
function displayImageWithDetections(imageSrc) {
|
426 |
-
const img = new Image();
|
427 |
-
img.onload = function() {
|
428 |
-
// Set canvas dimensions
|
429 |
-
const maxWidth = 800;
|
430 |
-
const scale = Math.min(maxWidth / img.width, 1);
|
431 |
-
resultCanvas.width = img.width * scale;
|
432 |
-
resultCanvas.height = img.height * scale;
|
433 |
-
|
434 |
-
const ctx = resultCanvas.getContext('2d');
|
435 |
-
|
436 |
-
// Draw image
|
437 |
-
ctx.drawImage(img, 0, 0, resultCanvas.width, resultCanvas.height);
|
438 |
-
|
439 |
-
// Simulate detections (in a real app, this would come from your model)
|
440 |
-
simulateDetections(ctx, img.width * scale, img.height * scale);
|
441 |
-
|
442 |
-
// Show results
|
443 |
-
placeholderText.classList.add('hidden');
|
444 |
-
resultCanvas.classList.remove('hidden');
|
445 |
-
confidenceDisplay.classList.remove('hidden');
|
446 |
-
|
447 |
-
// Populate detections list
|
448 |
-
populateDetectionsList();
|
449 |
-
};
|
450 |
-
img.src = imageSrc;
|
451 |
-
}
|
452 |
-
|
453 |
-
function simulateDetections(ctx, imgWidth, imgHeight) {
|
454 |
-
// These would be replaced with actual model predictions
|
455 |
-
const simulatedDetections = [
|
456 |
-
{ class: 'dog', confidence: 0.9999, x: 0.4, y: 0.3, width: 0.25, height: 0.2 },
|
457 |
-
{ class: 'human', confidence: 0.9998, x: 0.2, y: 0.6, width: 0.15, height: 0.15 },
|
458 |
-
{ class: 'grass', confidence: 0.9997, x: 0.7, y: 0.5, width: 0.2, height: 0.25 },
|
459 |
-
{ class: 'sky', confidence: 0.9996, x: 0.1, y: 0.1, width: 0.15, height: 0.15 },
|
460 |
-
{ class: 'leash', confidence: 0.9995, x: 0.6, y: 0.75, width: 0.1, height: 0.1 },
|
461 |
-
{ class: 'collar', confidence: 0.9994, x: 0.45, y: 0.35, width: 0.05, height: 0.05 },
|
462 |
-
{ class: 'fur', confidence: 0.9993, x: 0.35, y: 0.4, width: 0.3, height: 0.25 }
|
463 |
-
];
|
464 |
-
|
465 |
-
simulatedDetections.forEach(det => {
|
466 |
-
const x = det.x * imgWidth;
|
467 |
-
const y = det.y * imgHeight;
|
468 |
-
const width = det.width * imgWidth;
|
469 |
-
const height = det.height * imgHeight;
|
470 |
-
|
471 |
-
// Draw bounding box
|
472 |
-
ctx.strokeStyle = '#4ade80';
|
473 |
-
ctx.lineWidth = 2;
|
474 |
-
ctx.strokeRect(x, y, width, height);
|
475 |
-
|
476 |
-
// Draw background for label
|
477 |
-
ctx.fillStyle = 'rgba(74, 222, 128, 0.8)';
|
478 |
-
const text = `${det.class} ${(det.confidence * 100).toFixed(2)}%`;
|
479 |
-
const textWidth = ctx.measureText(text).width + 10;
|
480 |
-
ctx.fillRect(x, y - 25, textWidth, 25);
|
481 |
-
|
482 |
-
// Draw label text
|
483 |
-
ctx.fillStyle = 'white';
|
484 |
-
ctx.font = 'bold 12px sans-serif';
|
485 |
-
ctx.fillText(text, x + 5, y - 8);
|
486 |
-
});
|
487 |
-
}
|
488 |
-
|
489 |
-
function populateDetectionsList() {
|
490 |
-
// Simulated data - replace with actual model output
|
491 |
-
const simulatedDetections = [
|
492 |
-
{ class: 'Dog', confidence: 99.99, color: 'bg-green-500' },
|
493 |
-
{ class: 'Human', confidence: 99.98, color: 'bg-blue-500' },
|
494 |
-
{ class: 'Grass', confidence: 99.97, color: 'bg-purple-500' },
|
495 |
-
{ class: 'Sky', confidence: 99.96, color: 'bg-indigo-500' },
|
496 |
-
{ class: 'Leash', confidence: 99.95, color: 'bg-yellow-500' },
|
497 |
-
{ class: 'Collar', confidence: 99.94, color: 'bg-red-500' },
|
498 |
-
{ class: 'Fur', confidence: 99.93, color: 'bg-pink-500' }
|
499 |
-
];
|
500 |
-
|
501 |
-
detectionsContainer.innerHTML = simulatedDetections.map(det => `
|
502 |
-
<div class="flex items-center justify-between p-3 bg-gray-100 rounded-lg">
|
503 |
-
<div class="flex items-center">
|
504 |
-
<span class="w-3 h-3 rounded-full ${det.color} mr-2"></span>
|
505 |
-
<span class="font-medium">${det.class}</span>
|
506 |
-
</div>
|
507 |
-
<span class="font-semibold text-green-600">${det.confidence.toFixed(2)}%</span>
|
508 |
-
</div>
|
509 |
-
`).join('');
|
510 |
-
|
511 |
-
document.getElementById('detections-list').classList.remove('hidden');
|
512 |
-
}
|
513 |
});
|
514 |
</script>
|
515 |
<p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=Jobwengi/object-detection-model" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body>
|
|
|
3 |
<head>
|
4 |
<meta charset="UTF-8">
|
5 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>LayoutLMv3 Invoice Annotation Guide</title>
|
7 |
<script src="https://cdn.tailwindcss.com"></script>
|
8 |
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
|
9 |
<style>
|
10 |
+
.annotation-card:hover {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
transform: translateY(-5px);
|
12 |
+
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
|
13 |
}
|
14 |
+
.diagram-container {
|
15 |
+
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
17 |
</style>
|
18 |
</head>
|
19 |
<body class="bg-gray-50 font-sans">
|
20 |
+
<header class="bg-blue-600 text-white shadow-lg">
|
21 |
+
<div class="container mx-auto px-4 py-6">
|
22 |
+
<div class="flex justify-between items-center">
|
23 |
+
<div>
|
24 |
+
<h1 class="text-3xl font-bold">Invoice Annotation for LayoutLMv3</h1>
|
25 |
+
<p class="mt-2">A comprehensive guide to preparing your dataset</p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
</div>
|
27 |
+
<div class="hidden md:block">
|
28 |
+
<img src="https://via.placeholder.com/80" alt="AI Icon" class="h-16 w-16 rounded-full border-2 border-white">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
</div>
|
30 |
</div>
|
31 |
</div>
|
32 |
+
</header>
|
33 |
|
34 |
+
<main class="container mx-auto px-4 py-8">
|
35 |
+
<section class="mb-12">
|
36 |
+
<div class="bg-white rounded-xl shadow-md p-6 mb-8">
|
37 |
+
<h2 class="text-2xl font-semibold text-blue-700 mb-4">Getting Started with Annotation</h2>
|
38 |
+
<p class="text-gray-700 mb-4">Proper annotation is crucial for training LayoutLMv3 to understand invoices. This guide walks you through the process of creating a high-quality dataset.</p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
+
<div class="grid grid-cols-1 md:grid-cols-3 gap-6 mt-6">
|
41 |
+
<div class="annotation-card bg-white border border-gray-200 rounded-lg p-6 transition-all duration-300">
|
42 |
+
<div class="text-blue-500 mb-4">
|
43 |
+
<i class="fas fa-file-invoice-dollar text-4xl"></i>
|
|
|
|
|
|
|
|
|
|
|
44 |
</div>
|
45 |
+
<h3 class="text-xl font-semibold mb-2">1. Collect Samples</h3>
|
46 |
+
<p class="text-gray-600">Gather 50 diverse invoice samples representing different layouts, languages, and formats you expect to process.</p>
|
47 |
</div>
|
48 |
|
49 |
+
<div class="annotation-card bg-white border border-gray-200 rounded-lg p-6 transition-all duration-300">
|
50 |
+
<div class="text-green-500 mb-4">
|
51 |
+
<i class="fas fa-tags text-4xl"></i>
|
|
|
52 |
</div>
|
53 |
+
<h3 class="text-xl font-semibold mb-2">2. Define Labels</h3>
|
54 |
+
<p class="text-gray-600">Create a consistent label schema (e.g., vendor_name, invoice_date, total_amount) that covers all relevant fields.</p>
|
55 |
</div>
|
56 |
|
57 |
+
<div class="annotation-card bg-white border border-gray-200 rounded-lg p-6 transition-all duration-300">
|
58 |
+
<div class="text-purple-500 mb-4">
|
59 |
+
<i class="fas fa-mouse-pointer text-4xl"></i>
|
|
|
60 |
</div>
|
61 |
+
<h3 class="text-xl font-semibold mb-2">3. Choose Tools</h3>
|
62 |
+
<p class="text-gray-600">Select annotation tools like Label Studio, VGG Image Annotator, or custom solutions that support bounding boxes and text.</p>
|
63 |
</div>
|
64 |
</div>
|
65 |
</div>
|
66 |
+
</section>
|
|
|
67 |
|
68 |
+
<section class="mb-12">
|
69 |
+
<div class="bg-white rounded-xl shadow-md p-6">
|
70 |
+
<h2 class="text-2xl font-semibold text-blue-700 mb-4">Annotation Process Details</h2>
|
71 |
+
|
72 |
+
<div class="diagram-container rounded-lg p-6 mb-6">
|
73 |
+
<div class="flex flex-col md:flex-row items-center">
|
74 |
+
<div class="md:w-1/2 mb-6 md:mb-0 md:pr-6">
|
75 |
+
<h3 class="text-xl font-semibold mb-3">Key Annotation Steps</h3>
|
76 |
+
<ol class="list-decimal list-inside space-y-3 text-gray-700">
|
77 |
+
<li class="font-medium">Draw bounding boxes around each relevant text element</li>
|
78 |
+
<li>Assign the appropriate label to each box</li>
|
79 |
+
<li>Include the OCR-extracted text content</li>
|
80 |
+
<li>Mark relationships between elements (optional)</li>
|
81 |
+
<li>Validate annotations for consistency</li>
|
82 |
+
</ol>
|
83 |
+
</div>
|
84 |
+
<div class="md:w-1/2">
|
85 |
+
<img src="https://via.placeholder.com/500x300" alt="Annotation Example" class="w-full rounded-lg border border-gray-200">
|
86 |
+
</div>
|
87 |
</div>
|
|
|
|
|
88 |
</div>
|
89 |
|
90 |
+
<div class="mt-6">
|
91 |
+
<h3 class="text-xl font-semibold mb-3">Common Invoice Fields to Annotate</h3>
|
92 |
+
<div class="grid grid-cols-2 md:grid-cols-4 gap-4">
|
93 |
+
<div class="bg-blue-50 p-3 rounded-lg">
|
94 |
+
<h4 class="font-medium text-blue-700">Header Fields</h4>
|
95 |
+
<ul class="mt-2 text-sm text-gray-600">
|
96 |
+
<li>Invoice Number</li>
|
97 |
+
<li>Invoice Date</li>
|
98 |
+
<li>Due Date</li>
|
99 |
+
<li>Vendor Name</li>
|
100 |
+
</ul>
|
101 |
+
</div>
|
102 |
+
<div class="bg-green-50 p-3 rounded-lg">
|
103 |
+
<h4 class="font-medium text-green-700">Customer Info</h4>
|
104 |
+
<ul class="mt-2 text-sm text-gray-600">
|
105 |
+
<li>Customer Name</li>
|
106 |
+
<li>Customer Address</li>
|
107 |
+
<li>Customer ID</li>
|
108 |
+
<li>Tax ID</li>
|
109 |
+
</ul>
|
110 |
+
</div>
|
111 |
+
<div class="bg-yellow-50 p-3 rounded-lg">
|
112 |
+
<h4 class="font-medium text-yellow-700">Line Items</h4>
|
113 |
+
<ul class="mt-2 text-sm text-gray-600">
|
114 |
+
<li>Description</li>
|
115 |
+
<li>Quantity</li>
|
116 |
+
<li>Unit Price</li>
|
117 |
+
<li>Line Total</li>
|
118 |
+
</ul>
|
119 |
+
</div>
|
120 |
+
<div class="bg-purple-50 p-3 rounded-lg">
|
121 |
+
<h4 class="font-medium text-purple-700">Totals</h4>
|
122 |
+
<ul class="mt-2 text-sm text-gray-600">
|
123 |
+
<li>Subtotal</li>
|
124 |
+
<li>Tax Amount</li>
|
125 |
+
<li>Discount</li>
|
126 |
+
<li>Total Amount</li>
|
127 |
+
</ul>
|
128 |
+
</div>
|
129 |
</div>
|
|
|
|
|
130 |
</div>
|
131 |
+
</div>
|
132 |
+
</section>
|
133 |
+
|
134 |
+
<section class="mb-12">
|
135 |
+
<div class="bg-white rounded-xl shadow-md p-6">
|
136 |
+
<h2 class="text-2xl font-semibold text-blue-700 mb-4">Tools & Resources</h2>
|
137 |
|
138 |
+
<div class="grid grid-cols-1 md:grid-cols-2 gap-6">
|
139 |
+
<div class="border border-gray-200 rounded-lg p-5">
|
140 |
+
<h3 class="text-xl font-semibold mb-3 flex items-center">
|
141 |
+
<i class="fas fa-tools text-blue-500 mr-2"></i>
|
142 |
+
Annotation Tools
|
143 |
+
</h3>
|
144 |
+
<ul class="space-y-2">
|
145 |
+
<li>
|
146 |
+
<a href="https://labelstud.io/" target="_blank" class="text-blue-600 hover:underline flex items-center">
|
147 |
+
<i class="fas fa-external-link-alt mr-2 text-sm"></i>
|
148 |
+
Label Studio - Open source annotation tool
|
149 |
+
</a>
|
150 |
+
</li>
|
151 |
+
<li>
|
152 |
+
<a href="https://www.robots.ox.ac.uk/~vgg/software/via/" target="_blank" class="text-blue-600 hover:underline flex items-center">
|
153 |
+
<i class="fas fa-external-link-alt mr-2 text-sm"></i>
|
154 |
+
VGG Image Annotator (VIA)
|
155 |
+
</a>
|
156 |
+
</li>
|
157 |
+
<li>
|
158 |
+
<a href="https://docs.microsoft.com/en-us/azure/cognitive-services/form-recognizer/label-tool" target="_blank" class="text-blue-600 hover:underline flex items-center">
|
159 |
+
<i class="fas fa-external-link-alt mr-2 text-sm"></i>
|
160 |
+
Azure Form Recognizer Labeling Tool
|
161 |
+
</a>
|
162 |
+
</li>
|
163 |
+
</ul>
|
164 |
+
</div>
|
165 |
+
|
166 |
+
<div class="border border-gray-200 rounded-lg p-5">
|
167 |
+
<h3 class="text-xl font-semibold mb-3 flex items-center">
|
168 |
+
<i class="fas fa-book text-green-500 mr-2"></i>
|
169 |
+
Documentation
|
170 |
+
</h3>
|
171 |
+
<ul class="space-y-2">
|
172 |
+
<li>
|
173 |
+
<a href="https://huggingface.co/docs/transformers/model_doc/layoutlmv3" target="_blank" class="text-blue-600 hover:underline flex items-center">
|
174 |
+
<i class="fas fa-external-link-alt mr-2 text-sm"></i>
|
175 |
+
LayoutLMv3 Official Documentation
|
176 |
+
</a>
|
177 |
+
</li>
|
178 |
+
<li>
|
179 |
+
<a href="https://arxiv.org/abs/2204.08387" target="_blank" class="text-blue-600 hover:underline flex items-center">
|
180 |
+
<i class="fas fa-external-link-alt mr-2 text-sm"></i>
|
181 |
+
LayoutLMv3 Research Paper
|
182 |
+
</a>
|
183 |
+
</li>
|
184 |
+
<li>
|
185 |
+
<a href="https://github.com/microsoft/unilm/tree/master/layoutlmv3" target="_blank" class="text-blue-600 hover:underline flex items-center">
|
186 |
+
<i class="fas fa-external-link-alt mr-2 text-sm"></i>
|
187 |
+
GitHub Repository
|
188 |
+
</a>
|
189 |
+
</li>
|
190 |
+
</ul>
|
191 |
</div>
|
|
|
|
|
192 |
</div>
|
193 |
|
194 |
+
<div class="mt-6 bg-blue-50 rounded-lg p-5">
|
195 |
+
<h3 class="text-xl font-semibold mb-3 text-blue-700">Sample Annotation Format</h3>
|
196 |
+
<pre class="bg-white p-4 rounded-md overflow-x-auto text-sm">
|
197 |
+
{
|
198 |
+
"image_path": "invoice_001.jpg",
|
199 |
+
"width": 2480,
|
200 |
+
"height": 3508,
|
201 |
+
"annotations": [
|
202 |
+
{
|
203 |
+
"label": "vendor_name",
|
204 |
+
"bbox": [320, 120, 800, 160],
|
205 |
+
"text": "ACME Corporation"
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"label": "invoice_number",
|
209 |
+
"bbox": [1600, 120, 2000, 160],
|
210 |
+
"text": "INV-2023-0042"
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"label": "invoice_date",
|
214 |
+
"bbox": [1600, 180, 2000, 220],
|
215 |
+
"text": "2023-06-15"
|
216 |
+
}
|
217 |
+
]
|
218 |
+
}</pre>
|
219 |
+
<p class="mt-3 text-sm text-gray-600">This JSON structure shows how annotated data should be formatted for LayoutLMv3 training.</p>
|
220 |
</div>
|
221 |
+
</div>
|
222 |
+
</section>
|
223 |
+
|
224 |
+
<section class="mb-12">
|
225 |
+
<div class="bg-white rounded-xl shadow-md p-6">
|
226 |
+
<h2 class="text-2xl font-semibold text-blue-700 mb-4">Next Steps After Annotation</h2>
|
227 |
|
228 |
+
<div class="grid grid-cols-1 md:grid-cols-2 gap-6">
|
229 |
+
<div class="bg-gradient-to-r from-blue-50 to-purple-50 rounded-lg p-6">
|
230 |
+
<h3 class="text-xl font-semibold mb-3 flex items-center">
|
231 |
+
<i class="fas fa-cogs text-purple-500 mr-2"></i>
|
232 |
+
Training Process
|
233 |
+
</h3>
|
234 |
+
<ol class="list-decimal list-inside space-y-2 text-gray-700">
|
235 |
+
<li>Split your dataset (70% train, 15% validation, 15% test)</li>
|
236 |
+
<li>Configure LayoutLMv3 model parameters</li>
|
237 |
+
<li>Start with a small learning rate (e.g., 5e-5)</li>
|
238 |
+
<li>Monitor loss and accuracy metrics</li>
|
239 |
+
<li>Adjust hyperparameters as needed</li>
|
240 |
+
</ol>
|
241 |
+
</div>
|
242 |
+
|
243 |
+
<div class="bg-gradient-to-r from-green-50 to-blue-50 rounded-lg p-6">
|
244 |
+
<h3 class="text-xl font-semibold mb-3 flex items-center">
|
245 |
+
<i class="fas fa-chart-line text-green-500 mr-2"></i>
|
246 |
+
Evaluation Metrics
|
247 |
+
</h3>
|
248 |
+
<ul class="space-y-2 text-gray-700">
|
249 |
+
<li><span class="font-medium">Field-level F1 score:</span> Precision and recall for each field type</li>
|
250 |
+
<li><span class="font-medium">Exact match accuracy:</span> Percentage of perfectly extracted fields</li>
|
251 |
+
<li><span class="font-medium">Partial match accuracy:</span> For numeric fields with small deviations</li>
|
252 |
+
<li><span class="font-medium">OCR quality impact:</span> Compare with ground truth OCR</li>
|
253 |
+
</ul>
|
254 |
</div>
|
|
|
|
|
255 |
</div>
|
256 |
|
257 |
+
<div class="mt-6 bg-yellow-50 rounded-lg p-5">
|
258 |
+
<h3 class="text-xl font-semibold mb-3 text-yellow-700">Scaling Up</h3>
|
259 |
+
<p class="text-gray-700">Once you validate the approach with your 50-sample dataset, consider:</p>
|
260 |
+
<ul class="list-disc list-inside mt-2 space-y-1 text-gray-700">
|
261 |
+
<li>Creating annotation guidelines for your team</li>
|
262 |
+
<li>Implementing quality control processes</li>
|
263 |
+
<li>Exploring semi-supervised learning techniques</li>
|
264 |
+
<li>Using data augmentation for layout variations</li>
|
265 |
+
</ul>
|
266 |
</div>
|
267 |
</div>
|
268 |
+
</section>
|
|
|
269 |
|
270 |
+
<section class="bg-blue-600 text-white rounded-xl shadow-lg p-8">
|
271 |
+
<div class="max-w-3xl mx-auto text-center">
|
272 |
+
<h2 class="text-3xl font-bold mb-4">Ready to Start Annotating?</h2>
|
273 |
+
<p class="text-xl mb-6">Get started with your invoice annotation project today and unlock the power of LayoutLMv3 for document understanding.</p>
|
274 |
+
<div class="flex flex-col sm:flex-row justify-center gap-4">
|
275 |
+
<a href="#contact" class="bg-white text-blue-600 font-semibold px-6 py-3 rounded-lg hover:bg-blue-50 transition duration-300">
|
276 |
+
<i class="fas fa-envelope mr-2"></i> Contact for Help
|
277 |
+
</a>
|
278 |
+
<a href="https://github.com/microsoft/unilm/tree/master/layoutlmv3" target="_blank" class="bg-blue-800 text-white font-semibold px-6 py-3 rounded-lg hover:bg-blue-700 transition duration-300">
|
279 |
+
<i class="fab fa-github mr-2"></i> View GitHub Repo
|
280 |
+
</a>
|
281 |
+
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
</div>
|
283 |
+
</section>
|
284 |
+
</main>
|
285 |
|
286 |
+
<footer class="bg-gray-800 text-white py-8">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
287 |
<div class="container mx-auto px-4">
|
288 |
+
<div class="grid grid-cols-1 md:grid-cols-3 gap-8">
|
289 |
<div>
|
290 |
+
<h3 class="text-xl font-semibold mb-4">About LayoutLMv3</h3>
|
291 |
+
<p class="text-gray-300">LayoutLMv3 is a state-of-the-art multimodal pre-trained model for document understanding, combining text, layout, and image information.</p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
</div>
|
293 |
<div>
|
294 |
+
<h3 class="text-xl font-semibold mb-4">Quick Links</h3>
|
295 |
<ul class="space-y-2">
|
296 |
+
<li><a href="https://huggingface.co/docs/transformers/model_doc/layoutlmv3" target="_blank" class="text-gray-300 hover:text-white">Model Documentation</a></li>
|
297 |
+
<li><a href="https://arxiv.org/abs/2204.08387" target="_blank" class="text-gray-300 hover:text-white">Research Paper</a></li>
|
298 |
+
<li><a href="https://github.com/microsoft/unilm/tree/master/layoutlmv3" target="_blank" class="text-gray-300 hover:text-white">GitHub Repository</a></li>
|
|
|
299 |
</ul>
|
300 |
</div>
|
301 |
+
<div id="contact">
|
302 |
+
<h3 class="text-xl font-semibold mb-4">Contact</h3>
|
303 |
<ul class="space-y-2">
|
304 |
+
<li class="flex items-center">
|
305 |
+
<i class="fas fa-envelope mr-2 text-blue-300"></i>
|
306 |
+
<a href="mailto:support@documentai.example" class="text-gray-300 hover:text-white">support@documentai.example</a>
|
307 |
+
</li>
|
308 |
+
<li class="flex items-center">
|
309 |
+
<i class="fab fa-github mr-2 text-blue-300"></i>
|
310 |
+
<a href="https://github.com/microsoft/unilm" target="_blank" class="text-gray-300 hover:text-white">microsoft/unilm</a>
|
311 |
+
</li>
|
|
|
|
|
|
|
|
|
|
|
312 |
</ul>
|
313 |
</div>
|
314 |
</div>
|
315 |
+
<div class="border-t border-gray-700 mt-8 pt-6 text-center text-gray-400">
|
316 |
+
<p>© 2023 Document AI Solutions. All rights reserved.</p>
|
317 |
</div>
|
318 |
</div>
|
319 |
</footer>
|
320 |
|
321 |
<script>
|
322 |
+
// Smooth scrolling for anchor links
|
323 |
+
document.querySelectorAll('a[href^="#"]').forEach(anchor => {
|
324 |
+
anchor.addEventListener('click', function (e) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
e.preventDefault();
|
326 |
+
document.querySelector(this.getAttribute('href')).scrollIntoView({
|
327 |
+
behavior: 'smooth'
|
328 |
+
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
329 |
});
|
330 |
+
});
|
331 |
+
|
332 |
+
// Track outbound links
|
333 |
+
document.querySelectorAll('a[target="_blank"]').forEach(link => {
|
334 |
+
link.addEventListener('click', function() {
|
335 |
+
console.log('Outbound link clicked:', this.href);
|
336 |
+
// Here you could add analytics tracking
|
337 |
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
});
|
339 |
</script>
|
340 |
<p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=Jobwengi/object-detection-model" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body>
|
prompts.txt
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
want to build a model to hugging face, that does perfect 99.99% object detection for computer visions,
|
2 |
-
the sample image used isn't detected well, detect all the things in it, and make all the links in the site to be functional
|
|
|
|
1 |
want to build a model to hugging face, that does perfect 99.99% object detection for computer visions,
|
2 |
+
the sample image used isn't detected well, detect all the things in it, and make all the links in the site to be functional
|
3 |
+
I'm working on training a LayoutLMv3 model for invoice analysis and need help with annotating a sample dataset. Specifically, I need to annotate 50 invoice images, but I'm unsure about the process. If you have experience with LayoutLMv3 annotation, I'd greatly appreciate your guidance. This sample dataset will help me determine if the approach works, and if successful, I can scale up to annotating thousands of invoices
|