mac9087 commited on
Commit
9904f06
·
verified ·
1 Parent(s): 2326ba3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +322 -186
app.py CHANGED
@@ -11,9 +11,10 @@ import io
11
  import zipfile
12
  import uuid
13
  import traceback
14
- from huggingface_hub import snapshot_download
15
  from flask_cors import CORS
16
  import numpy as np
 
 
17
 
18
  app = Flask(__name__)
19
  CORS(app) # Enable CORS for all routes
@@ -33,7 +34,7 @@ os.makedirs(CACHE_DIR, exist_ok=True)
33
  os.environ['HF_HOME'] = CACHE_DIR
34
  os.environ['TRANSFORMERS_CACHE'] = os.path.join(CACHE_DIR, 'transformers')
35
  os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
36
- os.environ['NUMBA_THREADING_LAYER'] = 'omp'
37
 
38
  app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
39
  app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
@@ -48,8 +49,8 @@ model_loaded = False
48
  model_loading = False
49
 
50
  # Configuration for processing
51
- TIMEOUT_SECONDS = 240 # 4 minutes max for processing
52
- MAX_DIMENSION = 512 # Max image dimension to process
53
 
54
  # TimeoutError for handling timeouts
55
  class TimeoutError(Exception):
@@ -58,22 +59,40 @@ class TimeoutError(Exception):
58
  # Install necessary dependencies
59
  def install_dependencies():
60
  try:
61
- import subprocess
62
- # Install core dependencies
63
  subprocess.check_call([
64
- "pip", "install",
65
- "torch>=2.0.0",
 
 
 
 
 
 
 
 
66
  "lpips",
67
  "omegaconf",
68
  "transformers",
69
- "safetensors",
 
 
 
 
 
70
  "accelerate",
71
  "imageio[ffmpeg]",
72
  "PyMCubes",
73
- "trimesh",
74
- "opencv-python",
75
- "rembg[gpu,cli]",
76
- "httpx[socks]",
 
 
 
 
 
77
  "tensorboard"
78
  ])
79
 
@@ -84,12 +103,14 @@ def install_dependencies():
84
  # Add OpenLRM to python path
85
  if not "OpenLRM" in os.getenv("PYTHONPATH", ""):
86
  os.environ["PYTHONPATH"] = f"{os.getenv('PYTHONPATH', '')}:OpenLRM"
 
87
 
88
  print("Successfully installed dependencies")
 
89
  except Exception as e:
90
  print(f"Error installing dependencies: {str(e)}")
91
  print(traceback.format_exc())
92
- raise
93
 
94
  # Thread-safe timeout implementation
95
  def process_with_timeout(function, args, timeout):
@@ -127,7 +148,14 @@ def allowed_file(filename):
127
  # Image preprocessing with automatic background removal
128
  def preprocess_image(image_path):
129
  try:
130
- from rembg import remove
 
 
 
 
 
 
 
131
  with Image.open(image_path) as img:
132
  img = img.convert("RGBA")
133
 
@@ -144,8 +172,12 @@ def preprocess_image(image_path):
144
  # Use high-quality Lanczos resampling for better detail preservation
145
  img = img.resize((new_width, new_height), Image.LANCZOS)
146
 
147
- # Remove background automatically
148
- img_no_bg = remove(img)
 
 
 
 
149
 
150
  # Save both versions for flexibility
151
  img_path = image_path.replace(".jpg", ".png").replace(".jpeg", ".png")
@@ -158,10 +190,10 @@ def preprocess_image(image_path):
158
  except Exception as e:
159
  print(f"Error in image preprocessing: {str(e)}")
160
  print(traceback.format_exc())
161
- # Return original if rembg fails
162
  return image_path, image_path
163
 
164
- # Initialize OpenLRM model
165
  def load_model():
166
  global openlrm_model, openlrm_processor, model_loaded, model_loading
167
 
@@ -178,59 +210,55 @@ def load_model():
178
  model_loading = True
179
  print("Starting OpenLRM model loading...")
180
 
181
- # Import OpenLRM components
 
 
 
 
 
 
 
 
182
  try:
183
  from openlrm.utils.preprocess import Preprocessor
184
  from openlrm.utils.config import load_config
185
  from openlrm.models.registry import get_model
186
  from openlrm.pipelines.inference import InferencePipeline
187
 
188
- # Use the small model variant for HF free tier
189
- model_name = "zxhezexin/openlrm-mix-small-1.1" # Smallest model for HF free tier
190
 
191
  # Load configuration for inference
192
- config_path = "OpenLRM/configs/infer-s.yaml" # Small model config
193
  config = load_config(config_path)
194
  config.model_name = model_name
195
 
 
 
 
 
196
  # Initialize preprocessor
197
  openlrm_processor = Preprocessor()
198
 
199
- # Initialize model and inference pipeline
200
- device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
201
  openlrm_model = InferencePipeline(config, device)
202
 
203
  print(f"OpenLRM model loaded successfully on {device}")
204
  model_loaded = True
205
 
206
- # Optimize memory usage
207
- if device == "cuda":
208
- torch.cuda.empty_cache()
209
 
210
  return openlrm_model, openlrm_processor
211
 
212
- except ImportError as e:
213
- print(f"ImportError: {str(e)}")
214
- print("Installing OpenLRM dependencies...")
215
- install_dependencies()
216
- # Try loading again after installing dependencies
217
- from openlrm.utils.preprocess import Preprocessor
218
- from openlrm.utils.config import load_config
219
- from openlrm.models.registry import get_model
220
- from openlrm.pipelines.inference import InferencePipeline
221
-
222
- model_name = "zxhezexin/openlrm-mix-small-1.1"
223
- config_path = "OpenLRM/configs/infer-s.yaml"
224
- config = load_config(config_path)
225
- config.model_name = model_name
226
-
227
- openlrm_processor = Preprocessor()
228
- device = "cuda" if torch.cuda.is_available() else "cpu"
229
- openlrm_model = InferencePipeline(config, device)
230
-
231
- model_loaded = True
232
- print(f"OpenLRM model loaded successfully on {device} after installing dependencies")
233
- return openlrm_model, openlrm_processor
234
 
235
  except Exception as e:
236
  print(f"Error loading OpenLRM model: {str(e)}")
@@ -239,25 +267,73 @@ def load_model():
239
  finally:
240
  model_loading = False
241
 
242
- # Fallback to original depth-based implementation if OpenLRM fails
243
- def depth_based_fallback(image_path, output_dir, detail_level='high'):
244
  try:
245
- # This uses your original depth estimation implementation as a fallback
246
- # [Implementation would go here]
247
- print("Using depth-based fallback implementation")
248
- # Your original implementation could be added here
249
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  except Exception as e:
251
  print(f"Fallback also failed: {str(e)}")
 
252
  return False
253
- return True
254
 
255
  @app.route('/health', methods=['GET'])
256
  def health_check():
257
  return jsonify({
258
  "status": "healthy",
259
- "model": "OpenLRM Image-to-3D Model Generator",
260
- "device": "cuda" if torch.cuda.is_available() else "cpu"
261
  }), 200
262
 
263
  @app.route('/progress/<job_id>', methods=['GET'])
@@ -315,7 +391,7 @@ def convert_image_to_3d():
315
  # Get optional parameters with defaults
316
  try:
317
  output_format = request.form.get('output_format', 'obj').lower()
318
- detail_level = request.form.get('detail_level', 'medium').lower()
319
  source_cam_dist = float(request.form.get('source_cam_dist', 2.0))
320
  remove_bg = request.form.get('remove_bg', 'true').lower() == 'true'
321
  except ValueError:
@@ -357,150 +433,151 @@ def convert_image_to_3d():
357
  img_path, img_no_bg_path = preprocess_image(filepath) if remove_bg else (filepath, filepath)
358
  processing_jobs[job_id]['progress'] = 10
359
 
360
- # Load model
 
361
  try:
 
362
  openlrm_model, openlrm_processor = load_model()
363
  processing_jobs[job_id]['progress'] = 30
364
  except Exception as e:
365
- processing_jobs[job_id]['status'] = 'error'
366
- processing_jobs[job_id]['error'] = f"Error loading model: {str(e)}"
367
- return
368
 
369
- # Process image with thread-safe timeout
370
- try:
371
- def generate_3d():
372
- # Import here to ensure it's within the thread
373
- import os
374
- from openlrm.pipelines.inference import InferencePipeline
375
-
376
- # Process with OpenLRM
377
- image_to_use = img_no_bg_path if remove_bg else img_path
 
 
 
 
 
 
 
 
 
 
 
 
 
378
 
379
- # Configure export paths
380
- dump_video_path = os.path.join(output_dir, "output.mp4")
381
- dump_mesh_path = os.path.join(output_dir, "output.ply") # OpenLRM uses .ply format
382
 
383
- # Process with OpenLRM
384
- openlrm_model.infer_single(
385
- image_path=image_to_use,
386
- source_cam_dist=source_cam_dist,
387
- export_video=True,
388
- export_mesh=True,
389
- dump_video_path=dump_video_path,
390
- dump_mesh_path=dump_mesh_path,
391
- )
392
 
393
- return dump_video_path, dump_mesh_path
394
-
395
- (video_path, mesh_path), error = process_with_timeout(generate_3d, [], TIMEOUT_SECONDS)
 
 
 
 
 
396
 
397
- if error:
398
- if isinstance(error, TimeoutError):
399
- processing_jobs[job_id]['status'] = 'error'
400
- processing_jobs[job_id]['error'] = f"Processing timed out after {TIMEOUT_SECONDS} seconds"
401
- return
402
- else:
403
- # Try fallback implementation if OpenLRM fails
404
- processing_jobs[job_id]['progress'] = 35
405
- processing_jobs[job_id]['error'] = f"Primary method failed: {str(error)}. Trying fallback..."
406
-
407
- # Use fallback depth-based implementation
408
- if depth_based_fallback(img_path, output_dir, detail_level):
409
- processing_jobs[job_id]['progress'] = 60
410
- processing_jobs[job_id]['error'] = None # Clear error if fallback succeeded
411
- else:
412
- raise Exception(f"Both primary and fallback 3D generation methods failed: {str(error)}")
413
-
414
  processing_jobs[job_id]['progress'] = 60
 
 
 
 
 
415
 
416
- # Convert PLY to requested format if needed
417
- mesh_path_orig = os.path.join(output_dir, "output.ply")
418
- if os.path.exists(mesh_path_orig):
419
- if output_format == 'obj':
420
- # Convert PLY to OBJ
421
- import trimesh
422
- mesh = trimesh.load(mesh_path_orig)
423
- obj_path = os.path.join(output_dir, "model.obj")
424
- mesh.export(obj_path, file_type='obj')
425
-
426
- # Create a zip file with OBJ and MTL
427
- zip_path = os.path.join(output_dir, "model.zip")
428
- with zipfile.ZipFile(zip_path, 'w') as zipf:
429
- zipf.write(obj_path, arcname="model.obj")
430
- mtl_path = os.path.join(output_dir, "model.mtl")
431
- if os.path.exists(mtl_path):
432
- zipf.write(mtl_path, arcname="model.mtl")
433
-
434
- # Include texture file if it exists
435
- texture_path = os.path.join(output_dir, "model.png")
436
- if os.path.exists(texture_path):
437
- zipf.write(texture_path, arcname="model.png")
438
-
439
- processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
440
- processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
441
-
442
- elif output_format == 'glb':
443
- # Convert PLY to GLB
444
- import trimesh
445
- mesh = trimesh.load(mesh_path_orig)
446
- glb_path = os.path.join(output_dir, "model.glb")
447
- mesh.export(glb_path, file_type='glb')
448
-
449
- processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
450
- processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
451
 
452
- else: # Keep as PLY format
453
- import shutil
454
- ply_path = os.path.join(output_dir, "model.ply")
455
- shutil.copy(mesh_path_orig, ply_path)
456
-
457
- processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
458
- processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
459
 
460
- processing_jobs[job_id]['progress'] = 90
 
461
 
462
- # Also save the video preview
463
- video_path_orig = os.path.join(output_dir, "output.mp4")
464
- if os.path.exists(video_path_orig):
465
- preview_path = os.path.join(output_dir, "preview.mp4")
466
- import shutil
467
- shutil.copy(video_path_orig, preview_path)
468
- processing_jobs[job_id]['preview_video'] = f"/preview-video/{job_id}"
469
 
470
- # Update job status
471
- processing_jobs[job_id]['status'] = 'completed'
472
- processing_jobs[job_id]['progress'] = 100
473
- processing_jobs[job_id]['completed_at'] = time.time()
474
- print(f"Job {job_id} completed successfully")
 
 
475
 
476
- except Exception as e:
477
- error_details = traceback.format_exc()
478
- processing_jobs[job_id]['status'] = 'error'
479
- processing_jobs[job_id]['error'] = f"Error during processing: {str(e)}"
480
- print(f"Error processing job {job_id}: {str(e)}")
481
- print(error_details)
482
- return
483
 
484
- # Clean up temporary file
485
- if os.path.exists(filepath):
486
- os.remove(filepath)
487
 
488
- # Force garbage collection to free memory
489
- gc.collect()
490
- if torch.cuda.is_available():
491
- torch.cuda.empty_cache()
 
 
 
 
 
 
 
 
 
492
 
493
  except Exception as e:
494
- # Handle errors
495
  error_details = traceback.format_exc()
496
  processing_jobs[job_id]['status'] = 'error'
497
- processing_jobs[job_id]['error'] = f"{str(e)}\n{error_details}"
498
  print(f"Error processing job {job_id}: {str(e)}")
499
  print(error_details)
500
-
501
- # Clean up on error
 
 
502
  if os.path.exists(filepath):
503
- os.remove(filepath)
 
 
 
 
 
 
 
 
504
 
505
  # Start processing thread
506
  processing_thread = threading.Thread(target=process_image)
@@ -652,21 +729,26 @@ def model_info(job_id):
652
  "preview_url": job['preview_url'],
653
  "preview_video": job.get('preview_video'),
654
  "model_stats": model_stats,
655
- "created_at": job.get('created_at'),
656
- "completed_at": job.get('completed_at')
 
 
657
  }), 200
658
 
659
  @app.route('/', methods=['GET'])
660
  def index():
661
  return jsonify({
662
- "message": "OpenLRM Image-to-3D Model Generator API",
 
 
663
  "endpoints": [
664
  "/convert",
665
  "/progress/<job_id>",
666
  "/download/<job_id>",
667
  "/preview/<job_id>",
668
  "/preview-video/<job_id>",
669
- "/model-info/<job_id>"
 
670
  ],
671
  "parameters": {
672
  "output_format": "obj, glb, or ply",
@@ -674,13 +756,67 @@ def index():
674
  "source_cam_dist": "Camera distance from object (1.0-3.5, default 2.0)",
675
  "remove_bg": "true/false - automatically remove background"
676
  },
677
- "description": "This API creates high-quality 3D models from 2D images with full structural completion from all angles"
 
678
  }), 200
679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680
  if __name__ == '__main__':
681
  # Start the cleanup thread
682
  cleanup_old_jobs()
683
 
 
 
 
 
 
 
 
 
 
 
684
  # Use port 7860 which is standard for Hugging Face Spaces
685
  port = int(os.environ.get('PORT', 7860))
686
- app.run(host='0.0.0.0', port=port)
 
11
  import zipfile
12
  import uuid
13
  import traceback
 
14
  from flask_cors import CORS
15
  import numpy as np
16
+ import subprocess
17
+ import sys
18
 
19
  app = Flask(__name__)
20
  CORS(app) # Enable CORS for all routes
 
34
  os.environ['HF_HOME'] = CACHE_DIR
35
  os.environ['TRANSFORMERS_CACHE'] = os.path.join(CACHE_DIR, 'transformers')
36
  os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
37
+ os.environ['NUMBA_THREADING_LAYER'] = 'workqueue' # Changed from 'omp'
38
 
39
  app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
40
  app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
 
49
  model_loading = False
50
 
51
  # Configuration for processing
52
+ TIMEOUT_SECONDS = 480 # 8 minutes max for processing (increased for CPU)
53
+ MAX_DIMENSION = 384 # Max image dimension to process (reduced for CPU)
54
 
55
  # TimeoutError for handling timeouts
56
  class TimeoutError(Exception):
 
59
  # Install necessary dependencies
60
  def install_dependencies():
61
  try:
62
+ # Install core dependencies in smaller batches to avoid memory issues
63
+ # First batch: essential packages
64
  subprocess.check_call([
65
+ "pip", "install", "--no-cache-dir",
66
+ "torch==2.0.1+cpu",
67
+ "torchvision==0.15.2+cpu",
68
+ "--extra-index-url",
69
+ "https://download.pytorch.org/whl/cpu"
70
+ ])
71
+
72
+ # Second batch: OpenLRM dependencies 1
73
+ subprocess.check_call([
74
+ "pip", "install", "--no-cache-dir",
75
  "lpips",
76
  "omegaconf",
77
  "transformers",
78
+ "safetensors"
79
+ ])
80
+
81
+ # Third batch: OpenLRM dependencies 2
82
+ subprocess.check_call([
83
+ "pip", "install", "--no-cache-dir",
84
  "accelerate",
85
  "imageio[ffmpeg]",
86
  "PyMCubes",
87
+ "trimesh"
88
+ ])
89
+
90
+ # Fourth batch: Image processing
91
+ subprocess.check_call([
92
+ "pip", "install", "--no-cache-dir",
93
+ "opencv-python-headless", # Use headless variant
94
+ "rembg[cpu]", # CPU version instead of GPU
95
+ "httpx",
96
  "tensorboard"
97
  ])
98
 
 
103
  # Add OpenLRM to python path
104
  if not "OpenLRM" in os.getenv("PYTHONPATH", ""):
105
  os.environ["PYTHONPATH"] = f"{os.getenv('PYTHONPATH', '')}:OpenLRM"
106
+ sys.path.append("OpenLRM") # Add to sys.path directly
107
 
108
  print("Successfully installed dependencies")
109
+ return True
110
  except Exception as e:
111
  print(f"Error installing dependencies: {str(e)}")
112
  print(traceback.format_exc())
113
+ return False
114
 
115
  # Thread-safe timeout implementation
116
  def process_with_timeout(function, args, timeout):
 
148
  # Image preprocessing with automatic background removal
149
  def preprocess_image(image_path):
150
  try:
151
+ # Try to use rembg for bg removal
152
+ try:
153
+ from rembg import remove
154
+ use_rembg = True
155
+ except ImportError:
156
+ use_rembg = False
157
+ print("Rembg not available, skipping background removal")
158
+
159
  with Image.open(image_path) as img:
160
  img = img.convert("RGBA")
161
 
 
172
  # Use high-quality Lanczos resampling for better detail preservation
173
  img = img.resize((new_width, new_height), Image.LANCZOS)
174
 
175
+ # Remove background automatically if rembg is available
176
+ if use_rembg:
177
+ img_no_bg = remove(img)
178
+ else:
179
+ # Create a fallback "no bg" version by just using original
180
+ img_no_bg = img.copy()
181
 
182
  # Save both versions for flexibility
183
  img_path = image_path.replace(".jpg", ".png").replace(".jpeg", ".png")
 
190
  except Exception as e:
191
  print(f"Error in image preprocessing: {str(e)}")
192
  print(traceback.format_exc())
193
+ # Return original if preprocessing fails
194
  return image_path, image_path
195
 
196
+ # Initialize OpenLRM model with special handling for CPU-only environments
197
  def load_model():
198
  global openlrm_model, openlrm_processor, model_loaded, model_loading
199
 
 
210
  model_loading = True
211
  print("Starting OpenLRM model loading...")
212
 
213
+ # First check if dependencies are installed, if not install them
214
+ try:
215
+ from openlrm.utils.preprocess import Preprocessor
216
+ except ImportError:
217
+ success = install_dependencies()
218
+ if not success:
219
+ raise ImportError("Failed to install OpenLRM dependencies")
220
+
221
+ # Now try importing needed components
222
  try:
223
  from openlrm.utils.preprocess import Preprocessor
224
  from openlrm.utils.config import load_config
225
  from openlrm.models.registry import get_model
226
  from openlrm.pipelines.inference import InferencePipeline
227
 
228
+ # Use the tiny model variant for CPU environment
229
+ model_name = "zxhezexin/openlrm-mix-tiny-1.0" # Tiniest model for CPU
230
 
231
  # Load configuration for inference
232
+ config_path = "OpenLRM/configs/infer-xs.yaml" # Extra small model config
233
  config = load_config(config_path)
234
  config.model_name = model_name
235
 
236
+ # CPU optimizations
237
+ config.half_precision = False # Disable half precision on CPU
238
+ config.mesh_resolution = 64 # Lower mesh resolution
239
+
240
  # Initialize preprocessor
241
  openlrm_processor = Preprocessor()
242
 
243
+ # Initialize model and inference pipeline - force CPU
244
+ device = "cpu" # Force CPU
245
+ torch.set_num_threads(4) # Limit CPU threads to avoid OOM
246
+
247
+ print(f"Using device: {device}")
248
  openlrm_model = InferencePipeline(config, device)
249
 
250
  print(f"OpenLRM model loaded successfully on {device}")
251
  model_loaded = True
252
 
253
+ # Clean up memory
254
+ gc.collect()
 
255
 
256
  return openlrm_model, openlrm_processor
257
 
258
+ except Exception as e:
259
+ print(f"Error importing OpenLRM components: {str(e)}")
260
+ print(traceback.format_exc())
261
+ raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
 
263
  except Exception as e:
264
  print(f"Error loading OpenLRM model: {str(e)}")
 
267
  finally:
268
  model_loading = False
269
 
270
+ # Simple depth-based fallback implementation if OpenLRM fails
271
+ def depth_based_fallback(image_path, output_dir, detail_level='medium'):
272
  try:
273
+ import numpy as np
274
+ from PIL import Image
275
+ import trimesh
276
+
277
+ print("Using simple depth-based fallback implementation")
278
+
279
+ # Create a simple mesh using a heightmap approach
280
+ img = Image.open(image_path).convert('L') # Convert to grayscale
281
+
282
+ # Resize to manageable size based on detail level
283
+ if detail_level == 'low':
284
+ size = 32
285
+ elif detail_level == 'medium':
286
+ size = 64
287
+ else: # high
288
+ size = 96
289
+
290
+ img = img.resize((size, size))
291
+
292
+ # Create a heightmap from image intensity
293
+ heightmap = np.array(img) / 255.0
294
+
295
+ # Create a 3D mesh grid
296
+ x, y = np.meshgrid(np.linspace(-1, 1, size), np.linspace(-1, 1, size))
297
+ z = heightmap * 0.5 # Scale the height
298
+
299
+ # Create vertices for the mesh
300
+ vertices = np.zeros((size * size, 3))
301
+ vertices[:, 0] = x.flatten()
302
+ vertices[:, 1] = y.flatten()
303
+ vertices[:, 2] = z.flatten()
304
+
305
+ # Create face indices
306
+ faces = []
307
+ for i in range(size - 1):
308
+ for j in range(size - 1):
309
+ v0 = i * size + j
310
+ v1 = i * size + (j + 1)
311
+ v2 = (i + 1) * size + j
312
+ v3 = (i + 1) * size + (j + 1)
313
+ faces.append([v0, v1, v2])
314
+ faces.append([v1, v3, v2])
315
+
316
+ faces = np.array(faces)
317
+
318
+ # Create a mesh using trimesh
319
+ mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
320
+
321
+ # Export as PLY
322
+ mesh_path = os.path.join(output_dir, "output.ply")
323
+ mesh.export(mesh_path)
324
+
325
+ return True
326
  except Exception as e:
327
  print(f"Fallback also failed: {str(e)}")
328
+ print(traceback.format_exc())
329
  return False
 
330
 
331
  @app.route('/health', methods=['GET'])
332
  def health_check():
333
  return jsonify({
334
  "status": "healthy",
335
+ "model": "OpenLRM Image-to-3D Model Generator (CPU Edition)",
336
+ "device": "cpu"
337
  }), 200
338
 
339
  @app.route('/progress/<job_id>', methods=['GET'])
 
391
  # Get optional parameters with defaults
392
  try:
393
  output_format = request.form.get('output_format', 'obj').lower()
394
+ detail_level = request.form.get('detail_level', 'low').lower() # Default to low for CPU
395
  source_cam_dist = float(request.form.get('source_cam_dist', 2.0))
396
  remove_bg = request.form.get('remove_bg', 'true').lower() == 'true'
397
  except ValueError:
 
433
  img_path, img_no_bg_path = preprocess_image(filepath) if remove_bg else (filepath, filepath)
434
  processing_jobs[job_id]['progress'] = 10
435
 
436
+ # Try to run with OpenLRM
437
+ use_fallback = False
438
  try:
439
+ # Load model
440
  openlrm_model, openlrm_processor = load_model()
441
  processing_jobs[job_id]['progress'] = 30
442
  except Exception as e:
443
+ processing_jobs[job_id]['status'] = 'processing' # Keep processing, we'll use fallback
444
+ processing_jobs[job_id]['error'] = f"OpenLRM model failed to load: {str(e)}. Using fallback."
445
+ use_fallback = True
446
 
447
+ # Process image with appropriate method
448
+ if not use_fallback:
449
+ try:
450
+ def generate_3d():
451
+ # Use OpenLRM
452
+ image_to_use = img_no_bg_path if remove_bg else img_path
453
+
454
+ # Configure export paths
455
+ dump_video_path = os.path.join(output_dir, "output.mp4")
456
+ dump_mesh_path = os.path.join(output_dir, "output.ply")
457
+
458
+ # Process with OpenLRM
459
+ openlrm_model.infer_single(
460
+ image_path=image_to_use,
461
+ source_cam_dist=source_cam_dist,
462
+ export_video=True,
463
+ export_mesh=True,
464
+ dump_video_path=dump_video_path,
465
+ dump_mesh_path=dump_mesh_path,
466
+ )
467
+
468
+ return dump_video_path, dump_mesh_path
469
 
470
+ # Run with timeout
471
+ (video_path, mesh_path), error = process_with_timeout(generate_3d, [], TIMEOUT_SECONDS)
 
472
 
473
+ if error:
474
+ if isinstance(error, TimeoutError):
475
+ use_fallback = True
476
+ processing_jobs[job_id]['error'] = f"OpenLRM processing timed out after {TIMEOUT_SECONDS} seconds. Using fallback."
477
+ else:
478
+ use_fallback = True
479
+ processing_jobs[job_id]['error'] = f"OpenLRM processing failed: {str(error)}. Using fallback."
 
 
480
 
481
+ except Exception as e:
482
+ use_fallback = True
483
+ processing_jobs[job_id]['error'] = f"Error during OpenLRM processing: {str(e)}. Using fallback."
484
+
485
+ # Use fallback method if needed
486
+ if use_fallback:
487
+ processing_jobs[job_id]['progress'] = 35
488
+ print("Using fallback method for 3D generation")
489
 
490
+ # Use our simple depth-based implementation
491
+ if depth_based_fallback(img_path, output_dir, detail_level):
492
+ processing_jobs[job_id]['progress'] = 60
493
+ else:
494
+ raise Exception("Both primary and fallback 3D generation methods failed")
495
+ else:
 
 
 
 
 
 
 
 
 
 
 
496
  processing_jobs[job_id]['progress'] = 60
497
+
498
+ # Check if the mesh was created
499
+ mesh_path_orig = os.path.join(output_dir, "output.ply")
500
+ if not os.path.exists(mesh_path_orig):
501
+ raise Exception("3D model generation failed to produce a mesh")
502
 
503
+ # Convert to requested format
504
+ if output_format == 'obj':
505
+ # Convert PLY to OBJ
506
+ import trimesh
507
+ mesh = trimesh.load(mesh_path_orig)
508
+ obj_path = os.path.join(output_dir, "model.obj")
509
+ mesh.export(obj_path, file_type='obj')
510
+
511
+ # Create a zip file with OBJ and MTL
512
+ zip_path = os.path.join(output_dir, "model.zip")
513
+ with zipfile.ZipFile(zip_path, 'w') as zipf:
514
+ zipf.write(obj_path, arcname="model.obj")
515
+ mtl_path = os.path.join(output_dir, "model.mtl")
516
+ if os.path.exists(mtl_path):
517
+ zipf.write(mtl_path, arcname="model.mtl")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
 
519
+ # Include texture file if it exists
520
+ texture_path = os.path.join(output_dir, "model.png")
521
+ if os.path.exists(texture_path):
522
+ zipf.write(texture_path, arcname="model.png")
 
 
 
523
 
524
+ processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
525
+ processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
526
 
527
+ elif output_format == 'glb':
528
+ # Convert PLY to GLB
529
+ import trimesh
530
+ mesh = trimesh.load(mesh_path_orig)
531
+ glb_path = os.path.join(output_dir, "model.glb")
532
+ mesh.export(glb_path, file_type='glb')
 
533
 
534
+ processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
535
+ processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
536
+
537
+ else: # Keep as PLY format
538
+ import shutil
539
+ ply_path = os.path.join(output_dir, "model.ply")
540
+ shutil.copy(mesh_path_orig, ply_path)
541
 
542
+ processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
543
+ processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
 
 
 
 
 
544
 
545
+ processing_jobs[job_id]['progress'] = 90
 
 
546
 
547
+ # Create or check for video preview
548
+ video_path_orig = os.path.join(output_dir, "output.mp4")
549
+ if os.path.exists(video_path_orig):
550
+ preview_path = os.path.join(output_dir, "preview.mp4")
551
+ import shutil
552
+ shutil.copy(video_path_orig, preview_path)
553
+ processing_jobs[job_id]['preview_video'] = f"/preview-video/{job_id}"
554
+
555
+ # Update job status
556
+ processing_jobs[job_id]['status'] = 'completed'
557
+ processing_jobs[job_id]['progress'] = 100
558
+ processing_jobs[job_id]['completed_at'] = time.time()
559
+ print(f"Job {job_id} completed successfully")
560
 
561
  except Exception as e:
 
562
  error_details = traceback.format_exc()
563
  processing_jobs[job_id]['status'] = 'error'
564
+ processing_jobs[job_id]['error'] = f"Error during processing: {str(e)}"
565
  print(f"Error processing job {job_id}: {str(e)}")
566
  print(error_details)
567
+ return
568
+
569
+ finally:
570
+ # Clean up temporary file
571
  if os.path.exists(filepath):
572
+ try:
573
+ os.remove(filepath)
574
+ except:
575
+ pass
576
+
577
+ # Force garbage collection to free memory
578
+ gc.collect()
579
+ if torch.cuda.is_available():
580
+ torch.cuda.empty_cache()
581
 
582
  # Start processing thread
583
  processing_thread = threading.Thread(target=process_image)
 
729
  "preview_url": job['preview_url'],
730
  "preview_video": job.get('preview_video'),
731
  "model_stats": model_stats,
732
+ "created_at": job.get('created_at'),
733
+ "completed_at": job.get('completed_at'),
734
+ "cpu_only": True, # Flag to indicate this was processed on CPU
735
+ "used_fallback": job.get('error') and "Using fallback" in job.get('error')
736
  }), 200
737
 
738
  @app.route('/', methods=['GET'])
739
  def index():
740
  return jsonify({
741
+ "message": "OpenLRM Image-to-3D Model Generator API (CPU Edition)",
742
+ "status": "running",
743
+ "device": "cpu",
744
  "endpoints": [
745
  "/convert",
746
  "/progress/<job_id>",
747
  "/download/<job_id>",
748
  "/preview/<job_id>",
749
  "/preview-video/<job_id>",
750
+ "/model-info/<job_id>",
751
+ "/health"
752
  ],
753
  "parameters": {
754
  "output_format": "obj, glb, or ply",
 
756
  "source_cam_dist": "Camera distance from object (1.0-3.5, default 2.0)",
757
  "remove_bg": "true/false - automatically remove background"
758
  },
759
+ "notes": "This CPU-only version has reduced capabilities compared to the GPU version. It uses a smaller model and may fall back to a simpler 3D generation method when needed.",
760
+ "description": "Creates 3D models from 2D images with structural completion"
761
  }), 200
762
 
763
+ # Add a status endpoint for monitoring compute resources
764
+ @app.route('/status', methods=['GET'])
765
+ def system_status():
766
+ # Collect system information
767
+ import psutil
768
+
769
+ try:
770
+ memory = psutil.virtual_memory()
771
+ disk = psutil.disk_usage('/')
772
+
773
+ # Count active and completed jobs
774
+ active_jobs = sum(1 for j in processing_jobs.values() if j['status'] == 'processing')
775
+ completed_jobs = sum(1 for j in processing_jobs.values() if j['status'] == 'completed')
776
+ failed_jobs = sum(1 for j in processing_jobs.values() if j['status'] == 'error')
777
+
778
+ return jsonify({
779
+ "status": "operational",
780
+ "system": {
781
+ "memory_used_percent": memory.percent,
782
+ "memory_available_mb": memory.available // (1024 * 1024),
783
+ "disk_used_percent": disk.percent,
784
+ "disk_free_gb": disk.free // (1024 * 1024 * 1024),
785
+ "cpu_percent": psutil.cpu_percent(interval=0.1)
786
+ },
787
+ "jobs": {
788
+ "active": active_jobs,
789
+ "completed": completed_jobs,
790
+ "failed": failed_jobs,
791
+ "total": len(processing_jobs)
792
+ },
793
+ "model_loaded": model_loaded,
794
+ "model_loading": model_loading
795
+ }), 200
796
+ except Exception as e:
797
+ return jsonify({
798
+ "status": "operational",
799
+ "error": str(e),
800
+ "jobs": {
801
+ "total": len(processing_jobs)
802
+ },
803
+ "model_loaded": model_loaded
804
+ }), 200
805
+
806
  if __name__ == '__main__':
807
  # Start the cleanup thread
808
  cleanup_old_jobs()
809
 
810
+ # Try installing dependencies at startup
811
+ try:
812
+ # Check if we need to install psutil
813
+ try:
814
+ import psutil
815
+ except ImportError:
816
+ subprocess.check_call(["pip", "install", "--no-cache-dir", "psutil"])
817
+ except:
818
+ print("Warning: Could not install psutil. Status endpoint may have limited functionality.")
819
+
820
  # Use port 7860 which is standard for Hugging Face Spaces
821
  port = int(os.environ.get('PORT', 7860))
822
+ app.run(host='0.0.0.0', port=port)