Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,9 +11,10 @@ import io
|
|
11 |
import zipfile
|
12 |
import uuid
|
13 |
import traceback
|
14 |
-
from huggingface_hub import snapshot_download
|
15 |
from flask_cors import CORS
|
16 |
import numpy as np
|
|
|
|
|
17 |
|
18 |
app = Flask(__name__)
|
19 |
CORS(app) # Enable CORS for all routes
|
@@ -33,7 +34,7 @@ os.makedirs(CACHE_DIR, exist_ok=True)
|
|
33 |
os.environ['HF_HOME'] = CACHE_DIR
|
34 |
os.environ['TRANSFORMERS_CACHE'] = os.path.join(CACHE_DIR, 'transformers')
|
35 |
os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
|
36 |
-
os.environ['NUMBA_THREADING_LAYER'] = 'omp'
|
37 |
|
38 |
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
39 |
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
|
@@ -48,8 +49,8 @@ model_loaded = False
|
|
48 |
model_loading = False
|
49 |
|
50 |
# Configuration for processing
|
51 |
-
TIMEOUT_SECONDS =
|
52 |
-
MAX_DIMENSION =
|
53 |
|
54 |
# TimeoutError for handling timeouts
|
55 |
class TimeoutError(Exception):
|
@@ -58,22 +59,40 @@ class TimeoutError(Exception):
|
|
58 |
# Install necessary dependencies
|
59 |
def install_dependencies():
|
60 |
try:
|
61 |
-
|
62 |
-
#
|
63 |
subprocess.check_call([
|
64 |
-
"pip", "install",
|
65 |
-
"torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
"lpips",
|
67 |
"omegaconf",
|
68 |
"transformers",
|
69 |
-
"safetensors"
|
|
|
|
|
|
|
|
|
|
|
70 |
"accelerate",
|
71 |
"imageio[ffmpeg]",
|
72 |
"PyMCubes",
|
73 |
-
"trimesh"
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
77 |
"tensorboard"
|
78 |
])
|
79 |
|
@@ -84,12 +103,14 @@ def install_dependencies():
|
|
84 |
# Add OpenLRM to python path
|
85 |
if not "OpenLRM" in os.getenv("PYTHONPATH", ""):
|
86 |
os.environ["PYTHONPATH"] = f"{os.getenv('PYTHONPATH', '')}:OpenLRM"
|
|
|
87 |
|
88 |
print("Successfully installed dependencies")
|
|
|
89 |
except Exception as e:
|
90 |
print(f"Error installing dependencies: {str(e)}")
|
91 |
print(traceback.format_exc())
|
92 |
-
|
93 |
|
94 |
# Thread-safe timeout implementation
|
95 |
def process_with_timeout(function, args, timeout):
|
@@ -127,7 +148,14 @@ def allowed_file(filename):
|
|
127 |
# Image preprocessing with automatic background removal
|
128 |
def preprocess_image(image_path):
|
129 |
try:
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
with Image.open(image_path) as img:
|
132 |
img = img.convert("RGBA")
|
133 |
|
@@ -144,8 +172,12 @@ def preprocess_image(image_path):
|
|
144 |
# Use high-quality Lanczos resampling for better detail preservation
|
145 |
img = img.resize((new_width, new_height), Image.LANCZOS)
|
146 |
|
147 |
-
# Remove background automatically
|
148 |
-
|
|
|
|
|
|
|
|
|
149 |
|
150 |
# Save both versions for flexibility
|
151 |
img_path = image_path.replace(".jpg", ".png").replace(".jpeg", ".png")
|
@@ -158,10 +190,10 @@ def preprocess_image(image_path):
|
|
158 |
except Exception as e:
|
159 |
print(f"Error in image preprocessing: {str(e)}")
|
160 |
print(traceback.format_exc())
|
161 |
-
# Return original if
|
162 |
return image_path, image_path
|
163 |
|
164 |
-
# Initialize OpenLRM model
|
165 |
def load_model():
|
166 |
global openlrm_model, openlrm_processor, model_loaded, model_loading
|
167 |
|
@@ -178,59 +210,55 @@ def load_model():
|
|
178 |
model_loading = True
|
179 |
print("Starting OpenLRM model loading...")
|
180 |
|
181 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
try:
|
183 |
from openlrm.utils.preprocess import Preprocessor
|
184 |
from openlrm.utils.config import load_config
|
185 |
from openlrm.models.registry import get_model
|
186 |
from openlrm.pipelines.inference import InferencePipeline
|
187 |
|
188 |
-
# Use the
|
189 |
-
model_name = "zxhezexin/openlrm-mix-
|
190 |
|
191 |
# Load configuration for inference
|
192 |
-
config_path = "OpenLRM/configs/infer-
|
193 |
config = load_config(config_path)
|
194 |
config.model_name = model_name
|
195 |
|
|
|
|
|
|
|
|
|
196 |
# Initialize preprocessor
|
197 |
openlrm_processor = Preprocessor()
|
198 |
|
199 |
-
# Initialize model and inference pipeline
|
200 |
-
device = "
|
|
|
|
|
|
|
201 |
openlrm_model = InferencePipeline(config, device)
|
202 |
|
203 |
print(f"OpenLRM model loaded successfully on {device}")
|
204 |
model_loaded = True
|
205 |
|
206 |
-
#
|
207 |
-
|
208 |
-
torch.cuda.empty_cache()
|
209 |
|
210 |
return openlrm_model, openlrm_processor
|
211 |
|
212 |
-
except
|
213 |
-
print(f"
|
214 |
-
print(
|
215 |
-
|
216 |
-
# Try loading again after installing dependencies
|
217 |
-
from openlrm.utils.preprocess import Preprocessor
|
218 |
-
from openlrm.utils.config import load_config
|
219 |
-
from openlrm.models.registry import get_model
|
220 |
-
from openlrm.pipelines.inference import InferencePipeline
|
221 |
-
|
222 |
-
model_name = "zxhezexin/openlrm-mix-small-1.1"
|
223 |
-
config_path = "OpenLRM/configs/infer-s.yaml"
|
224 |
-
config = load_config(config_path)
|
225 |
-
config.model_name = model_name
|
226 |
-
|
227 |
-
openlrm_processor = Preprocessor()
|
228 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
229 |
-
openlrm_model = InferencePipeline(config, device)
|
230 |
-
|
231 |
-
model_loaded = True
|
232 |
-
print(f"OpenLRM model loaded successfully on {device} after installing dependencies")
|
233 |
-
return openlrm_model, openlrm_processor
|
234 |
|
235 |
except Exception as e:
|
236 |
print(f"Error loading OpenLRM model: {str(e)}")
|
@@ -239,25 +267,73 @@ def load_model():
|
|
239 |
finally:
|
240 |
model_loading = False
|
241 |
|
242 |
-
#
|
243 |
-
def depth_based_fallback(image_path, output_dir, detail_level='
|
244 |
try:
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
except Exception as e:
|
251 |
print(f"Fallback also failed: {str(e)}")
|
|
|
252 |
return False
|
253 |
-
return True
|
254 |
|
255 |
@app.route('/health', methods=['GET'])
|
256 |
def health_check():
|
257 |
return jsonify({
|
258 |
"status": "healthy",
|
259 |
-
"model": "OpenLRM Image-to-3D Model Generator",
|
260 |
-
"device": "
|
261 |
}), 200
|
262 |
|
263 |
@app.route('/progress/<job_id>', methods=['GET'])
|
@@ -315,7 +391,7 @@ def convert_image_to_3d():
|
|
315 |
# Get optional parameters with defaults
|
316 |
try:
|
317 |
output_format = request.form.get('output_format', 'obj').lower()
|
318 |
-
detail_level = request.form.get('detail_level', '
|
319 |
source_cam_dist = float(request.form.get('source_cam_dist', 2.0))
|
320 |
remove_bg = request.form.get('remove_bg', 'true').lower() == 'true'
|
321 |
except ValueError:
|
@@ -357,150 +433,151 @@ def convert_image_to_3d():
|
|
357 |
img_path, img_no_bg_path = preprocess_image(filepath) if remove_bg else (filepath, filepath)
|
358 |
processing_jobs[job_id]['progress'] = 10
|
359 |
|
360 |
-
#
|
|
|
361 |
try:
|
|
|
362 |
openlrm_model, openlrm_processor = load_model()
|
363 |
processing_jobs[job_id]['progress'] = 30
|
364 |
except Exception as e:
|
365 |
-
processing_jobs[job_id]['status'] = '
|
366 |
-
processing_jobs[job_id]['error'] = f"
|
367 |
-
|
368 |
|
369 |
-
# Process image with
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
378 |
|
379 |
-
#
|
380 |
-
|
381 |
-
dump_mesh_path = os.path.join(output_dir, "output.ply") # OpenLRM uses .ply format
|
382 |
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
dump_mesh_path=dump_mesh_path,
|
391 |
-
)
|
392 |
|
393 |
-
|
394 |
-
|
395 |
-
|
|
|
|
|
|
|
|
|
|
|
396 |
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
# Try fallback implementation if OpenLRM fails
|
404 |
-
processing_jobs[job_id]['progress'] = 35
|
405 |
-
processing_jobs[job_id]['error'] = f"Primary method failed: {str(error)}. Trying fallback..."
|
406 |
-
|
407 |
-
# Use fallback depth-based implementation
|
408 |
-
if depth_based_fallback(img_path, output_dir, detail_level):
|
409 |
-
processing_jobs[job_id]['progress'] = 60
|
410 |
-
processing_jobs[job_id]['error'] = None # Clear error if fallback succeeded
|
411 |
-
else:
|
412 |
-
raise Exception(f"Both primary and fallback 3D generation methods failed: {str(error)}")
|
413 |
-
|
414 |
processing_jobs[job_id]['progress'] = 60
|
|
|
|
|
|
|
|
|
|
|
415 |
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
if os.path.exists(mtl_path):
|
432 |
-
zipf.write(mtl_path, arcname="model.mtl")
|
433 |
-
|
434 |
-
# Include texture file if it exists
|
435 |
-
texture_path = os.path.join(output_dir, "model.png")
|
436 |
-
if os.path.exists(texture_path):
|
437 |
-
zipf.write(texture_path, arcname="model.png")
|
438 |
-
|
439 |
-
processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
|
440 |
-
processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
|
441 |
-
|
442 |
-
elif output_format == 'glb':
|
443 |
-
# Convert PLY to GLB
|
444 |
-
import trimesh
|
445 |
-
mesh = trimesh.load(mesh_path_orig)
|
446 |
-
glb_path = os.path.join(output_dir, "model.glb")
|
447 |
-
mesh.export(glb_path, file_type='glb')
|
448 |
-
|
449 |
-
processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
|
450 |
-
processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
|
451 |
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
|
458 |
-
processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
|
459 |
|
460 |
-
processing_jobs[job_id]['
|
|
|
461 |
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
processing_jobs[job_id]['preview_video'] = f"/preview-video/{job_id}"
|
469 |
|
470 |
-
|
471 |
-
processing_jobs[job_id]['
|
472 |
-
|
473 |
-
|
474 |
-
|
|
|
|
|
475 |
|
476 |
-
|
477 |
-
|
478 |
-
processing_jobs[job_id]['status'] = 'error'
|
479 |
-
processing_jobs[job_id]['error'] = f"Error during processing: {str(e)}"
|
480 |
-
print(f"Error processing job {job_id}: {str(e)}")
|
481 |
-
print(error_details)
|
482 |
-
return
|
483 |
|
484 |
-
|
485 |
-
if os.path.exists(filepath):
|
486 |
-
os.remove(filepath)
|
487 |
|
488 |
-
#
|
489 |
-
|
490 |
-
if
|
491 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
492 |
|
493 |
except Exception as e:
|
494 |
-
# Handle errors
|
495 |
error_details = traceback.format_exc()
|
496 |
processing_jobs[job_id]['status'] = 'error'
|
497 |
-
processing_jobs[job_id]['error'] = f"{str(e)}
|
498 |
print(f"Error processing job {job_id}: {str(e)}")
|
499 |
print(error_details)
|
500 |
-
|
501 |
-
|
|
|
|
|
502 |
if os.path.exists(filepath):
|
503 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
|
505 |
# Start processing thread
|
506 |
processing_thread = threading.Thread(target=process_image)
|
@@ -652,21 +729,26 @@ def model_info(job_id):
|
|
652 |
"preview_url": job['preview_url'],
|
653 |
"preview_video": job.get('preview_video'),
|
654 |
"model_stats": model_stats,
|
655 |
-
|
656 |
-
"completed_at": job.get('completed_at')
|
|
|
|
|
657 |
}), 200
|
658 |
|
659 |
@app.route('/', methods=['GET'])
|
660 |
def index():
|
661 |
return jsonify({
|
662 |
-
"message": "OpenLRM Image-to-3D Model Generator API",
|
|
|
|
|
663 |
"endpoints": [
|
664 |
"/convert",
|
665 |
"/progress/<job_id>",
|
666 |
"/download/<job_id>",
|
667 |
"/preview/<job_id>",
|
668 |
"/preview-video/<job_id>",
|
669 |
-
"/model-info/<job_id>"
|
|
|
670 |
],
|
671 |
"parameters": {
|
672 |
"output_format": "obj, glb, or ply",
|
@@ -674,13 +756,67 @@ def index():
|
|
674 |
"source_cam_dist": "Camera distance from object (1.0-3.5, default 2.0)",
|
675 |
"remove_bg": "true/false - automatically remove background"
|
676 |
},
|
677 |
-
"
|
|
|
678 |
}), 200
|
679 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
680 |
if __name__ == '__main__':
|
681 |
# Start the cleanup thread
|
682 |
cleanup_old_jobs()
|
683 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
684 |
# Use port 7860 which is standard for Hugging Face Spaces
|
685 |
port = int(os.environ.get('PORT', 7860))
|
686 |
-
app.run(host='0.0.0.0', port=port)
|
|
|
11 |
import zipfile
|
12 |
import uuid
|
13 |
import traceback
|
|
|
14 |
from flask_cors import CORS
|
15 |
import numpy as np
|
16 |
+
import subprocess
|
17 |
+
import sys
|
18 |
|
19 |
app = Flask(__name__)
|
20 |
CORS(app) # Enable CORS for all routes
|
|
|
34 |
os.environ['HF_HOME'] = CACHE_DIR
|
35 |
os.environ['TRANSFORMERS_CACHE'] = os.path.join(CACHE_DIR, 'transformers')
|
36 |
os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
|
37 |
+
os.environ['NUMBA_THREADING_LAYER'] = 'workqueue' # Changed from 'omp'
|
38 |
|
39 |
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
40 |
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
|
|
|
49 |
model_loading = False
|
50 |
|
51 |
# Configuration for processing
|
52 |
+
TIMEOUT_SECONDS = 480 # 8 minutes max for processing (increased for CPU)
|
53 |
+
MAX_DIMENSION = 384 # Max image dimension to process (reduced for CPU)
|
54 |
|
55 |
# TimeoutError for handling timeouts
|
56 |
class TimeoutError(Exception):
|
|
|
59 |
# Install necessary dependencies
|
60 |
def install_dependencies():
|
61 |
try:
|
62 |
+
# Install core dependencies in smaller batches to avoid memory issues
|
63 |
+
# First batch: essential packages
|
64 |
subprocess.check_call([
|
65 |
+
"pip", "install", "--no-cache-dir",
|
66 |
+
"torch==2.0.1+cpu",
|
67 |
+
"torchvision==0.15.2+cpu",
|
68 |
+
"--extra-index-url",
|
69 |
+
"https://download.pytorch.org/whl/cpu"
|
70 |
+
])
|
71 |
+
|
72 |
+
# Second batch: OpenLRM dependencies 1
|
73 |
+
subprocess.check_call([
|
74 |
+
"pip", "install", "--no-cache-dir",
|
75 |
"lpips",
|
76 |
"omegaconf",
|
77 |
"transformers",
|
78 |
+
"safetensors"
|
79 |
+
])
|
80 |
+
|
81 |
+
# Third batch: OpenLRM dependencies 2
|
82 |
+
subprocess.check_call([
|
83 |
+
"pip", "install", "--no-cache-dir",
|
84 |
"accelerate",
|
85 |
"imageio[ffmpeg]",
|
86 |
"PyMCubes",
|
87 |
+
"trimesh"
|
88 |
+
])
|
89 |
+
|
90 |
+
# Fourth batch: Image processing
|
91 |
+
subprocess.check_call([
|
92 |
+
"pip", "install", "--no-cache-dir",
|
93 |
+
"opencv-python-headless", # Use headless variant
|
94 |
+
"rembg[cpu]", # CPU version instead of GPU
|
95 |
+
"httpx",
|
96 |
"tensorboard"
|
97 |
])
|
98 |
|
|
|
103 |
# Add OpenLRM to python path
|
104 |
if not "OpenLRM" in os.getenv("PYTHONPATH", ""):
|
105 |
os.environ["PYTHONPATH"] = f"{os.getenv('PYTHONPATH', '')}:OpenLRM"
|
106 |
+
sys.path.append("OpenLRM") # Add to sys.path directly
|
107 |
|
108 |
print("Successfully installed dependencies")
|
109 |
+
return True
|
110 |
except Exception as e:
|
111 |
print(f"Error installing dependencies: {str(e)}")
|
112 |
print(traceback.format_exc())
|
113 |
+
return False
|
114 |
|
115 |
# Thread-safe timeout implementation
|
116 |
def process_with_timeout(function, args, timeout):
|
|
|
148 |
# Image preprocessing with automatic background removal
|
149 |
def preprocess_image(image_path):
|
150 |
try:
|
151 |
+
# Try to use rembg for bg removal
|
152 |
+
try:
|
153 |
+
from rembg import remove
|
154 |
+
use_rembg = True
|
155 |
+
except ImportError:
|
156 |
+
use_rembg = False
|
157 |
+
print("Rembg not available, skipping background removal")
|
158 |
+
|
159 |
with Image.open(image_path) as img:
|
160 |
img = img.convert("RGBA")
|
161 |
|
|
|
172 |
# Use high-quality Lanczos resampling for better detail preservation
|
173 |
img = img.resize((new_width, new_height), Image.LANCZOS)
|
174 |
|
175 |
+
# Remove background automatically if rembg is available
|
176 |
+
if use_rembg:
|
177 |
+
img_no_bg = remove(img)
|
178 |
+
else:
|
179 |
+
# Create a fallback "no bg" version by just using original
|
180 |
+
img_no_bg = img.copy()
|
181 |
|
182 |
# Save both versions for flexibility
|
183 |
img_path = image_path.replace(".jpg", ".png").replace(".jpeg", ".png")
|
|
|
190 |
except Exception as e:
|
191 |
print(f"Error in image preprocessing: {str(e)}")
|
192 |
print(traceback.format_exc())
|
193 |
+
# Return original if preprocessing fails
|
194 |
return image_path, image_path
|
195 |
|
196 |
+
# Initialize OpenLRM model with special handling for CPU-only environments
|
197 |
def load_model():
|
198 |
global openlrm_model, openlrm_processor, model_loaded, model_loading
|
199 |
|
|
|
210 |
model_loading = True
|
211 |
print("Starting OpenLRM model loading...")
|
212 |
|
213 |
+
# First check if dependencies are installed, if not install them
|
214 |
+
try:
|
215 |
+
from openlrm.utils.preprocess import Preprocessor
|
216 |
+
except ImportError:
|
217 |
+
success = install_dependencies()
|
218 |
+
if not success:
|
219 |
+
raise ImportError("Failed to install OpenLRM dependencies")
|
220 |
+
|
221 |
+
# Now try importing needed components
|
222 |
try:
|
223 |
from openlrm.utils.preprocess import Preprocessor
|
224 |
from openlrm.utils.config import load_config
|
225 |
from openlrm.models.registry import get_model
|
226 |
from openlrm.pipelines.inference import InferencePipeline
|
227 |
|
228 |
+
# Use the tiny model variant for CPU environment
|
229 |
+
model_name = "zxhezexin/openlrm-mix-tiny-1.0" # Tiniest model for CPU
|
230 |
|
231 |
# Load configuration for inference
|
232 |
+
config_path = "OpenLRM/configs/infer-xs.yaml" # Extra small model config
|
233 |
config = load_config(config_path)
|
234 |
config.model_name = model_name
|
235 |
|
236 |
+
# CPU optimizations
|
237 |
+
config.half_precision = False # Disable half precision on CPU
|
238 |
+
config.mesh_resolution = 64 # Lower mesh resolution
|
239 |
+
|
240 |
# Initialize preprocessor
|
241 |
openlrm_processor = Preprocessor()
|
242 |
|
243 |
+
# Initialize model and inference pipeline - force CPU
|
244 |
+
device = "cpu" # Force CPU
|
245 |
+
torch.set_num_threads(4) # Limit CPU threads to avoid OOM
|
246 |
+
|
247 |
+
print(f"Using device: {device}")
|
248 |
openlrm_model = InferencePipeline(config, device)
|
249 |
|
250 |
print(f"OpenLRM model loaded successfully on {device}")
|
251 |
model_loaded = True
|
252 |
|
253 |
+
# Clean up memory
|
254 |
+
gc.collect()
|
|
|
255 |
|
256 |
return openlrm_model, openlrm_processor
|
257 |
|
258 |
+
except Exception as e:
|
259 |
+
print(f"Error importing OpenLRM components: {str(e)}")
|
260 |
+
print(traceback.format_exc())
|
261 |
+
raise
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
|
263 |
except Exception as e:
|
264 |
print(f"Error loading OpenLRM model: {str(e)}")
|
|
|
267 |
finally:
|
268 |
model_loading = False
|
269 |
|
270 |
+
# Simple depth-based fallback implementation if OpenLRM fails
|
271 |
+
def depth_based_fallback(image_path, output_dir, detail_level='medium'):
|
272 |
try:
|
273 |
+
import numpy as np
|
274 |
+
from PIL import Image
|
275 |
+
import trimesh
|
276 |
+
|
277 |
+
print("Using simple depth-based fallback implementation")
|
278 |
+
|
279 |
+
# Create a simple mesh using a heightmap approach
|
280 |
+
img = Image.open(image_path).convert('L') # Convert to grayscale
|
281 |
+
|
282 |
+
# Resize to manageable size based on detail level
|
283 |
+
if detail_level == 'low':
|
284 |
+
size = 32
|
285 |
+
elif detail_level == 'medium':
|
286 |
+
size = 64
|
287 |
+
else: # high
|
288 |
+
size = 96
|
289 |
+
|
290 |
+
img = img.resize((size, size))
|
291 |
+
|
292 |
+
# Create a heightmap from image intensity
|
293 |
+
heightmap = np.array(img) / 255.0
|
294 |
+
|
295 |
+
# Create a 3D mesh grid
|
296 |
+
x, y = np.meshgrid(np.linspace(-1, 1, size), np.linspace(-1, 1, size))
|
297 |
+
z = heightmap * 0.5 # Scale the height
|
298 |
+
|
299 |
+
# Create vertices for the mesh
|
300 |
+
vertices = np.zeros((size * size, 3))
|
301 |
+
vertices[:, 0] = x.flatten()
|
302 |
+
vertices[:, 1] = y.flatten()
|
303 |
+
vertices[:, 2] = z.flatten()
|
304 |
+
|
305 |
+
# Create face indices
|
306 |
+
faces = []
|
307 |
+
for i in range(size - 1):
|
308 |
+
for j in range(size - 1):
|
309 |
+
v0 = i * size + j
|
310 |
+
v1 = i * size + (j + 1)
|
311 |
+
v2 = (i + 1) * size + j
|
312 |
+
v3 = (i + 1) * size + (j + 1)
|
313 |
+
faces.append([v0, v1, v2])
|
314 |
+
faces.append([v1, v3, v2])
|
315 |
+
|
316 |
+
faces = np.array(faces)
|
317 |
+
|
318 |
+
# Create a mesh using trimesh
|
319 |
+
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
|
320 |
+
|
321 |
+
# Export as PLY
|
322 |
+
mesh_path = os.path.join(output_dir, "output.ply")
|
323 |
+
mesh.export(mesh_path)
|
324 |
+
|
325 |
+
return True
|
326 |
except Exception as e:
|
327 |
print(f"Fallback also failed: {str(e)}")
|
328 |
+
print(traceback.format_exc())
|
329 |
return False
|
|
|
330 |
|
331 |
@app.route('/health', methods=['GET'])
|
332 |
def health_check():
|
333 |
return jsonify({
|
334 |
"status": "healthy",
|
335 |
+
"model": "OpenLRM Image-to-3D Model Generator (CPU Edition)",
|
336 |
+
"device": "cpu"
|
337 |
}), 200
|
338 |
|
339 |
@app.route('/progress/<job_id>', methods=['GET'])
|
|
|
391 |
# Get optional parameters with defaults
|
392 |
try:
|
393 |
output_format = request.form.get('output_format', 'obj').lower()
|
394 |
+
detail_level = request.form.get('detail_level', 'low').lower() # Default to low for CPU
|
395 |
source_cam_dist = float(request.form.get('source_cam_dist', 2.0))
|
396 |
remove_bg = request.form.get('remove_bg', 'true').lower() == 'true'
|
397 |
except ValueError:
|
|
|
433 |
img_path, img_no_bg_path = preprocess_image(filepath) if remove_bg else (filepath, filepath)
|
434 |
processing_jobs[job_id]['progress'] = 10
|
435 |
|
436 |
+
# Try to run with OpenLRM
|
437 |
+
use_fallback = False
|
438 |
try:
|
439 |
+
# Load model
|
440 |
openlrm_model, openlrm_processor = load_model()
|
441 |
processing_jobs[job_id]['progress'] = 30
|
442 |
except Exception as e:
|
443 |
+
processing_jobs[job_id]['status'] = 'processing' # Keep processing, we'll use fallback
|
444 |
+
processing_jobs[job_id]['error'] = f"OpenLRM model failed to load: {str(e)}. Using fallback."
|
445 |
+
use_fallback = True
|
446 |
|
447 |
+
# Process image with appropriate method
|
448 |
+
if not use_fallback:
|
449 |
+
try:
|
450 |
+
def generate_3d():
|
451 |
+
# Use OpenLRM
|
452 |
+
image_to_use = img_no_bg_path if remove_bg else img_path
|
453 |
+
|
454 |
+
# Configure export paths
|
455 |
+
dump_video_path = os.path.join(output_dir, "output.mp4")
|
456 |
+
dump_mesh_path = os.path.join(output_dir, "output.ply")
|
457 |
+
|
458 |
+
# Process with OpenLRM
|
459 |
+
openlrm_model.infer_single(
|
460 |
+
image_path=image_to_use,
|
461 |
+
source_cam_dist=source_cam_dist,
|
462 |
+
export_video=True,
|
463 |
+
export_mesh=True,
|
464 |
+
dump_video_path=dump_video_path,
|
465 |
+
dump_mesh_path=dump_mesh_path,
|
466 |
+
)
|
467 |
+
|
468 |
+
return dump_video_path, dump_mesh_path
|
469 |
|
470 |
+
# Run with timeout
|
471 |
+
(video_path, mesh_path), error = process_with_timeout(generate_3d, [], TIMEOUT_SECONDS)
|
|
|
472 |
|
473 |
+
if error:
|
474 |
+
if isinstance(error, TimeoutError):
|
475 |
+
use_fallback = True
|
476 |
+
processing_jobs[job_id]['error'] = f"OpenLRM processing timed out after {TIMEOUT_SECONDS} seconds. Using fallback."
|
477 |
+
else:
|
478 |
+
use_fallback = True
|
479 |
+
processing_jobs[job_id]['error'] = f"OpenLRM processing failed: {str(error)}. Using fallback."
|
|
|
|
|
480 |
|
481 |
+
except Exception as e:
|
482 |
+
use_fallback = True
|
483 |
+
processing_jobs[job_id]['error'] = f"Error during OpenLRM processing: {str(e)}. Using fallback."
|
484 |
+
|
485 |
+
# Use fallback method if needed
|
486 |
+
if use_fallback:
|
487 |
+
processing_jobs[job_id]['progress'] = 35
|
488 |
+
print("Using fallback method for 3D generation")
|
489 |
|
490 |
+
# Use our simple depth-based implementation
|
491 |
+
if depth_based_fallback(img_path, output_dir, detail_level):
|
492 |
+
processing_jobs[job_id]['progress'] = 60
|
493 |
+
else:
|
494 |
+
raise Exception("Both primary and fallback 3D generation methods failed")
|
495 |
+
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
496 |
processing_jobs[job_id]['progress'] = 60
|
497 |
+
|
498 |
+
# Check if the mesh was created
|
499 |
+
mesh_path_orig = os.path.join(output_dir, "output.ply")
|
500 |
+
if not os.path.exists(mesh_path_orig):
|
501 |
+
raise Exception("3D model generation failed to produce a mesh")
|
502 |
|
503 |
+
# Convert to requested format
|
504 |
+
if output_format == 'obj':
|
505 |
+
# Convert PLY to OBJ
|
506 |
+
import trimesh
|
507 |
+
mesh = trimesh.load(mesh_path_orig)
|
508 |
+
obj_path = os.path.join(output_dir, "model.obj")
|
509 |
+
mesh.export(obj_path, file_type='obj')
|
510 |
+
|
511 |
+
# Create a zip file with OBJ and MTL
|
512 |
+
zip_path = os.path.join(output_dir, "model.zip")
|
513 |
+
with zipfile.ZipFile(zip_path, 'w') as zipf:
|
514 |
+
zipf.write(obj_path, arcname="model.obj")
|
515 |
+
mtl_path = os.path.join(output_dir, "model.mtl")
|
516 |
+
if os.path.exists(mtl_path):
|
517 |
+
zipf.write(mtl_path, arcname="model.mtl")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
518 |
|
519 |
+
# Include texture file if it exists
|
520 |
+
texture_path = os.path.join(output_dir, "model.png")
|
521 |
+
if os.path.exists(texture_path):
|
522 |
+
zipf.write(texture_path, arcname="model.png")
|
|
|
|
|
|
|
523 |
|
524 |
+
processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
|
525 |
+
processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
|
526 |
|
527 |
+
elif output_format == 'glb':
|
528 |
+
# Convert PLY to GLB
|
529 |
+
import trimesh
|
530 |
+
mesh = trimesh.load(mesh_path_orig)
|
531 |
+
glb_path = os.path.join(output_dir, "model.glb")
|
532 |
+
mesh.export(glb_path, file_type='glb')
|
|
|
533 |
|
534 |
+
processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
|
535 |
+
processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
|
536 |
+
|
537 |
+
else: # Keep as PLY format
|
538 |
+
import shutil
|
539 |
+
ply_path = os.path.join(output_dir, "model.ply")
|
540 |
+
shutil.copy(mesh_path_orig, ply_path)
|
541 |
|
542 |
+
processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
|
543 |
+
processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
|
|
|
|
|
|
|
|
|
|
|
544 |
|
545 |
+
processing_jobs[job_id]['progress'] = 90
|
|
|
|
|
546 |
|
547 |
+
# Create or check for video preview
|
548 |
+
video_path_orig = os.path.join(output_dir, "output.mp4")
|
549 |
+
if os.path.exists(video_path_orig):
|
550 |
+
preview_path = os.path.join(output_dir, "preview.mp4")
|
551 |
+
import shutil
|
552 |
+
shutil.copy(video_path_orig, preview_path)
|
553 |
+
processing_jobs[job_id]['preview_video'] = f"/preview-video/{job_id}"
|
554 |
+
|
555 |
+
# Update job status
|
556 |
+
processing_jobs[job_id]['status'] = 'completed'
|
557 |
+
processing_jobs[job_id]['progress'] = 100
|
558 |
+
processing_jobs[job_id]['completed_at'] = time.time()
|
559 |
+
print(f"Job {job_id} completed successfully")
|
560 |
|
561 |
except Exception as e:
|
|
|
562 |
error_details = traceback.format_exc()
|
563 |
processing_jobs[job_id]['status'] = 'error'
|
564 |
+
processing_jobs[job_id]['error'] = f"Error during processing: {str(e)}"
|
565 |
print(f"Error processing job {job_id}: {str(e)}")
|
566 |
print(error_details)
|
567 |
+
return
|
568 |
+
|
569 |
+
finally:
|
570 |
+
# Clean up temporary file
|
571 |
if os.path.exists(filepath):
|
572 |
+
try:
|
573 |
+
os.remove(filepath)
|
574 |
+
except:
|
575 |
+
pass
|
576 |
+
|
577 |
+
# Force garbage collection to free memory
|
578 |
+
gc.collect()
|
579 |
+
if torch.cuda.is_available():
|
580 |
+
torch.cuda.empty_cache()
|
581 |
|
582 |
# Start processing thread
|
583 |
processing_thread = threading.Thread(target=process_image)
|
|
|
729 |
"preview_url": job['preview_url'],
|
730 |
"preview_video": job.get('preview_video'),
|
731 |
"model_stats": model_stats,
|
732 |
+
"created_at": job.get('created_at'),
|
733 |
+
"completed_at": job.get('completed_at'),
|
734 |
+
"cpu_only": True, # Flag to indicate this was processed on CPU
|
735 |
+
"used_fallback": job.get('error') and "Using fallback" in job.get('error')
|
736 |
}), 200
|
737 |
|
738 |
@app.route('/', methods=['GET'])
|
739 |
def index():
|
740 |
return jsonify({
|
741 |
+
"message": "OpenLRM Image-to-3D Model Generator API (CPU Edition)",
|
742 |
+
"status": "running",
|
743 |
+
"device": "cpu",
|
744 |
"endpoints": [
|
745 |
"/convert",
|
746 |
"/progress/<job_id>",
|
747 |
"/download/<job_id>",
|
748 |
"/preview/<job_id>",
|
749 |
"/preview-video/<job_id>",
|
750 |
+
"/model-info/<job_id>",
|
751 |
+
"/health"
|
752 |
],
|
753 |
"parameters": {
|
754 |
"output_format": "obj, glb, or ply",
|
|
|
756 |
"source_cam_dist": "Camera distance from object (1.0-3.5, default 2.0)",
|
757 |
"remove_bg": "true/false - automatically remove background"
|
758 |
},
|
759 |
+
"notes": "This CPU-only version has reduced capabilities compared to the GPU version. It uses a smaller model and may fall back to a simpler 3D generation method when needed.",
|
760 |
+
"description": "Creates 3D models from 2D images with structural completion"
|
761 |
}), 200
|
762 |
|
763 |
+
# Add a status endpoint for monitoring compute resources
|
764 |
+
@app.route('/status', methods=['GET'])
|
765 |
+
def system_status():
|
766 |
+
# Collect system information
|
767 |
+
import psutil
|
768 |
+
|
769 |
+
try:
|
770 |
+
memory = psutil.virtual_memory()
|
771 |
+
disk = psutil.disk_usage('/')
|
772 |
+
|
773 |
+
# Count active and completed jobs
|
774 |
+
active_jobs = sum(1 for j in processing_jobs.values() if j['status'] == 'processing')
|
775 |
+
completed_jobs = sum(1 for j in processing_jobs.values() if j['status'] == 'completed')
|
776 |
+
failed_jobs = sum(1 for j in processing_jobs.values() if j['status'] == 'error')
|
777 |
+
|
778 |
+
return jsonify({
|
779 |
+
"status": "operational",
|
780 |
+
"system": {
|
781 |
+
"memory_used_percent": memory.percent,
|
782 |
+
"memory_available_mb": memory.available // (1024 * 1024),
|
783 |
+
"disk_used_percent": disk.percent,
|
784 |
+
"disk_free_gb": disk.free // (1024 * 1024 * 1024),
|
785 |
+
"cpu_percent": psutil.cpu_percent(interval=0.1)
|
786 |
+
},
|
787 |
+
"jobs": {
|
788 |
+
"active": active_jobs,
|
789 |
+
"completed": completed_jobs,
|
790 |
+
"failed": failed_jobs,
|
791 |
+
"total": len(processing_jobs)
|
792 |
+
},
|
793 |
+
"model_loaded": model_loaded,
|
794 |
+
"model_loading": model_loading
|
795 |
+
}), 200
|
796 |
+
except Exception as e:
|
797 |
+
return jsonify({
|
798 |
+
"status": "operational",
|
799 |
+
"error": str(e),
|
800 |
+
"jobs": {
|
801 |
+
"total": len(processing_jobs)
|
802 |
+
},
|
803 |
+
"model_loaded": model_loaded
|
804 |
+
}), 200
|
805 |
+
|
806 |
if __name__ == '__main__':
|
807 |
# Start the cleanup thread
|
808 |
cleanup_old_jobs()
|
809 |
|
810 |
+
# Try installing dependencies at startup
|
811 |
+
try:
|
812 |
+
# Check if we need to install psutil
|
813 |
+
try:
|
814 |
+
import psutil
|
815 |
+
except ImportError:
|
816 |
+
subprocess.check_call(["pip", "install", "--no-cache-dir", "psutil"])
|
817 |
+
except:
|
818 |
+
print("Warning: Could not install psutil. Status endpoint may have limited functionality.")
|
819 |
+
|
820 |
# Use port 7860 which is standard for Hugging Face Spaces
|
821 |
port = int(os.environ.get('PORT', 7860))
|
822 |
+
app.run(host='0.0.0.0', port=port)
|