mac9087 commited on
Commit
13afc6c
·
verified ·
1 Parent(s): 251affc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -169
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import torch
3
  import time
@@ -16,11 +17,11 @@ from flask_cors import CORS
16
  import numpy as np
17
  import trimesh
18
  import cv2
19
- from tsr.system import TSR # Updated import
20
- import torchvision.transforms as T
21
 
22
  app = Flask(__name__)
23
- CORS(app)
24
 
25
  # Configure directories
26
  UPLOAD_FOLDER = '/tmp/uploads'
@@ -28,12 +29,12 @@ RESULTS_FOLDER = '/tmp/results'
28
  CACHE_DIR = '/tmp/huggingface'
29
  ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
30
 
31
- # Create directories
32
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
33
  os.makedirs(RESULTS_FOLDER, exist_ok=True)
34
  os.makedirs(CACHE_DIR, exist_ok=True)
35
 
36
- # Set Hugging Face cache
37
  os.environ['HF_HOME'] = CACHE_DIR
38
  os.environ['TRANSFORMERS_CACHE'] = os.path.join(CACHE_DIR, 'transformers')
39
  os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
@@ -41,22 +42,23 @@ os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
41
  app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
42
  app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
43
 
44
- # Job tracking
45
  processing_jobs = {}
46
 
47
  # Global model variables
48
- u2net_model = None
49
- triposr_model = None
50
  model_loaded = False
51
  model_loading = False
52
 
53
- # Configuration
54
- TIMEOUT_SECONDS = 240 # 4 minutes max
55
- MAX_DIMENSION = 512 # Max image dimension
56
 
 
57
  class TimeoutError(Exception):
58
  pass
59
 
 
60
  def process_with_timeout(function, args, timeout):
61
  result = [None]
62
  error = [None]
@@ -89,11 +91,12 @@ def process_with_timeout(function, args, timeout):
89
  def allowed_file(filename):
90
  return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
91
 
 
92
  def preprocess_image(image_path):
93
  with Image.open(image_path) as img:
94
  img = img.convert("RGB")
95
 
96
- # Resize if too large
97
  if img.width > MAX_DIMENSION or img.height > MAX_DIMENSION:
98
  if img.width > img.height:
99
  new_width = MAX_DIMENSION
@@ -103,62 +106,28 @@ def preprocess_image(image_path):
103
  new_width = int(img.width * (MAX_DIMENSION / img.height))
104
  img = img.resize((new_width, new_height), Image.LANCZOS)
105
 
106
- # Apply adaptive histogram equalization
107
- img_array = np.array(img)
108
- if len(img_array.shape) == 3 and img_array.shape[2] == 3:
109
- lab = cv2.cvtColor(img_array, cv2.COLOR_RGB2LAB)
110
- l, a, b = cv2.split(lab)
111
- clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
112
- cl = clahe.apply(l)
113
- enhanced_lab = cv2.merge((cl, a, b))
114
- img_array = cv2.cvtColor(enhanced_lab, cv2.COLOR_LAB2RGB)
115
- img = Image.fromarray(img_array)
116
-
117
  return img
118
 
119
- def remove_background(image):
120
- global u2net_model
121
- if u2net_model is None:
122
- # Dynamically import U2NET to avoid circular import issues
123
- from u2net import U2NET
124
- u2net_model = U2NET()
125
- u2net_model.load_state_dict(torch.load('u2net.pth', map_location='cpu'))
126
- u2net_model.eval()
127
- u2net_model.to('cpu')
128
-
129
- img_array = np.array(image)
130
- img_tensor = T.ToTensor()(image.resize((320, 320))).unsqueeze(0)
131
-
132
- with torch.no_grad():
133
- d1, *_ = u2net_model(img_tensor)
134
- pred = d1[:, 0, :, :]
135
- pred = (pred - pred.min()) / (pred.max() - pred.min())
136
- mask = (pred > 0.5).float().squeeze().numpy()
137
-
138
- mask_img = Image.fromarray((mask * 255).astype('uint8')).resize(image.size)
139
- mask_array = np.array(mask_img)[:, :, np.newaxis] / 255
140
- result = img_array * mask_array + (1 - mask_array) * 255 # White background
141
- return Image.fromarray(result.astype('uint8'))
142
-
143
  def load_model():
144
- global triposr_model, model_loaded, model_loading
145
 
146
  if model_loaded:
147
- return triposr_model
148
 
149
  if model_loading:
150
  while model_loading and not model_loaded:
151
  time.sleep(0.5)
152
- return triposr_model
153
 
154
  try:
155
  model_loading = True
156
- print("Loading TripoSR model...")
157
 
158
- model_name = "stabilityai/TripoSR"
 
 
159
  max_retries = 3
160
  retry_delay = 5
161
-
162
  for attempt in range(max_retries):
163
  try:
164
  snapshot_download(
@@ -169,23 +138,27 @@ def load_model():
169
  break
170
  except Exception as e:
171
  if attempt < max_retries - 1:
172
- print(f"Download attempt {attempt+1} failed: {str(e)}. Retrying...")
173
  time.sleep(retry_delay)
174
  retry_delay *= 2
175
  else:
176
  raise
177
 
178
- # Initialize TSR model
179
- triposr_model = TSR.from_pretrained(
180
  model_name,
181
- torch_dtype=torch.float32,
182
- device="cpu",
 
183
  cache_dir=CACHE_DIR
184
  )
185
 
 
 
 
186
  model_loaded = True
187
- print("TripoSR model loaded successfully on CPU")
188
- return triposr_model
189
 
190
  except Exception as e:
191
  print(f"Error loading model: {str(e)}")
@@ -194,27 +167,20 @@ def load_model():
194
  finally:
195
  model_loading = False
196
 
197
- def optimize_mesh(mesh, detail_level='medium'):
198
- # Simplify mesh based on detail level
199
- if detail_level == 'high':
200
- target_faces = 50000
201
- elif detail_level == 'medium':
202
- target_faces = 30000
203
- else:
204
- target_faces = 15000
205
-
206
- if len(mesh.faces) > target_faces:
207
- mesh = mesh.simplify_quadric_decimation(target_faces)
208
-
209
- # Fix normals
210
- mesh.fix_normals()
211
- return mesh
212
 
213
  @app.route('/health', methods=['GET'])
214
  def health_check():
215
  return jsonify({
216
  "status": "healthy",
217
- "model": "TripoSR 3D Model Generator",
218
  "device": "cpu"
219
  }), 200
220
 
@@ -226,6 +192,7 @@ def progress(job_id):
226
  return
227
 
228
  job = processing_jobs[job_id]
 
229
  yield f"data: {json.dumps({'status': 'processing', 'progress': job['progress']})}\n\n"
230
 
231
  last_progress = job['progress']
@@ -234,8 +201,10 @@ def progress(job_id):
234
  if job['progress'] != last_progress:
235
  yield f"data: {json.dumps({'status': 'processing', 'progress': job['progress']})}\n\n"
236
  last_progress = job['progress']
 
237
  time.sleep(0.5)
238
  check_count += 1
 
239
  if check_count > 60:
240
  if 'thread_alive' in job and not job['thread_alive']():
241
  job['status'] = 'error'
@@ -260,17 +229,16 @@ def convert_image_to_3d():
260
  return jsonify({"error": "No image selected"}), 400
261
 
262
  if not allowed_file(file.filename):
263
- return jsonify({"error": f"File type not allowed: {', '.join(ALLOWED_EXTENSIONS)}"}), 400
264
 
265
  try:
266
  output_format = request.form.get('output_format', 'glb').lower()
267
  detail_level = request.form.get('detail_level', 'medium').lower()
268
- texture_quality = request.form.get('texture_quality', 'medium').lower()
269
  except ValueError:
270
  return jsonify({"error": "Invalid parameter values"}), 400
271
 
272
- if output_format not in ['obj', 'glb']:
273
- return jsonify({"error": "Unsupported output format: 'obj' or 'glb'"}), 400
274
 
275
  job_id = str(uuid.uuid4())
276
  output_dir = os.path.join(RESULTS_FOLDER, job_id)
@@ -295,30 +263,32 @@ def convert_image_to_3d():
295
  processing_jobs[job_id]['thread_alive'] = lambda: thread.is_alive()
296
 
297
  try:
298
- # Preprocess image
299
  processing_jobs[job_id]['progress'] = 5
300
  image = preprocess_image(filepath)
301
  processing_jobs[job_id]['progress'] = 10
302
 
303
- # Remove background
304
- processing_jobs[job_id]['progress'] = 20
305
- clean_image = remove_background(image)
306
- processing_jobs[job_id]['progress'] = 30
307
-
308
- # Load TripoSR model
309
  try:
310
  model = load_model()
311
- processing_jobs[job_id]['progress'] = 40
312
  except Exception as e:
313
  processing_jobs[job_id]['status'] = 'error'
314
  processing_jobs[job_id]['error'] = f"Error loading model: {str(e)}"
315
  return
316
 
317
- # Generate 3D model
318
  try:
319
  def generate_3d():
320
- # TSR expects a PIL image
321
- mesh = model(clean_image)
 
 
 
 
 
 
 
 
 
 
322
  return mesh
323
 
324
  mesh, error = process_with_timeout(generate_3d, [], TIMEOUT_SECONDS)
@@ -330,49 +300,18 @@ def convert_image_to_3d():
330
  return
331
  else:
332
  raise error
333
-
334
- processing_jobs[job_id]['progress'] = 70
335
-
336
- # Optimize mesh
337
- mesh = optimize_mesh(mesh, detail_level)
338
  processing_jobs[job_id]['progress'] = 80
339
 
340
- except Exception as e:
341
- error_details = traceback.format_exc()
342
- processing_jobs[job_id]['status'] = 'error'
343
- processing_jobs[job_id]['error'] = f"Error during processing: {str(e)}"
344
- print(f"Error processing job {job_id}: {str(e)}")
345
- print(error_details)
346
- return
347
-
348
- # Export model
349
- try:
350
- if output_format == 'obj':
351
- obj_path = os.path.join(output_dir, "model.obj")
352
- mesh.export(
353
- obj_path,
354
- file_type='obj',
355
- include_normals=True,
356
- include_texture=True
357
- )
358
- zip_path = os.path.join(output_dir, "model.zip")
359
- with zipfile.ZipFile(zip_path, 'w') as zipf:
360
- zipf.write(obj_path, arcname="model.obj")
361
- mtl_path = os.path.join(output_dir, "model.mtl")
362
- if os.path.exists(mtl_path):
363
- zipf.write(mtl_path, arcname="model.mtl")
364
- texture_path = os.path.join(output_dir, "model.png")
365
- if os.path.exists(texture_path):
366
- zipf.write(texture_path, arcname="model.png")
367
-
368
- processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
369
- processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
370
 
371
- elif output_format == 'glb':
372
- glb_path = os.path.join(output_dir, "model.glb")
373
- mesh.export(glb_path, file_type='glb')
374
- processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
375
- processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
376
 
377
  processing_jobs[job_id]['status'] = 'completed'
378
  processing_jobs[job_id]['progress'] = 100
@@ -381,9 +320,10 @@ def convert_image_to_3d():
381
  except Exception as e:
382
  error_details = traceback.format_exc()
383
  processing_jobs[job_id]['status'] = 'error'
384
- processing_jobs[job_id]['error'] = f"Error exporting model: {str(e)}"
385
- print(f"Error exporting model for job {job_id}: {str(e)}")
386
  print(error_details)
 
387
 
388
  if os.path.exists(filepath):
389
  os.remove(filepath)
@@ -396,6 +336,7 @@ def convert_image_to_3d():
396
  processing_jobs[job_id]['error'] = f"{str(e)}\n{error_details}"
397
  print(f"Error processing job {job_id}: {str(e)}")
398
  print(error_details)
 
399
  if os.path.exists(filepath):
400
  os.remove(filepath)
401
 
@@ -411,16 +352,10 @@ def download_model(job_id):
411
  return jsonify({"error": "Model not found or processing not complete"}), 404
412
 
413
  output_dir = os.path.join(RESULTS_FOLDER, job_id)
414
- output_format = processing_jobs[job_id].get('output_format', 'glb')
415
 
416
- if output_format == 'obj':
417
- zip_path = os.path.join(output_dir, "model.zip")
418
- if os.path.exists(zip_path):
419
- return send_file(zip_path, as_attachment=True, download_name="model.zip")
420
- else:
421
- glb_path = os.path.join(output_dir, "model.glb")
422
- if os.path.exists(glb_path):
423
- return send_file(glb_path, as_attachment=True, download_name="model.glb")
424
 
425
  return jsonify({"error": "File not found"}), 404
426
 
@@ -430,16 +365,10 @@ def preview_model(job_id):
430
  return jsonify({"error": "Model not found or processing not complete"}), 404
431
 
432
  output_dir = os.path.join(RESULTS_FOLDER, job_id)
433
- output_format = processing_jobs[job_id].get('output_format', 'glb')
434
-
435
- if output_format == 'obj':
436
- obj_path = os.path.join(output_dir, "model.obj")
437
- if os.path.exists(obj_path):
438
- return send_file(obj_path, mimetype='model/obj')
439
- else:
440
- glb_path = os.path.join(output_dir, "model.glb")
441
- if os.path.exists(glb_path):
442
- return send_file(glb_path, mimetype='model/gltf-binary')
443
 
444
  return jsonify({"error": "Model file not found"}), 404
445
 
@@ -461,6 +390,7 @@ def cleanup_old_jobs():
461
  shutil.rmtree(output_dir)
462
  except Exception as e:
463
  print(f"Error cleaning up job {job_id}: {str(e)}")
 
464
  if job_id in processing_jobs:
465
  del processing_jobs[job_id]
466
 
@@ -483,17 +413,9 @@ def model_info(job_id):
483
  output_dir = os.path.join(RESULTS_FOLDER, job_id)
484
  model_stats = {}
485
 
486
- if job['output_format'] == 'obj':
487
- obj_path = os.path.join(output_dir, "model.obj")
488
- zip_path = os.path.join(output_dir, "model.zip")
489
- if os.path.exists(obj_path):
490
- model_stats['obj_size'] = os.path.getsize(obj_path)
491
- if os.path.exists(zip_path):
492
- model_stats['package_size'] = os.path.getsize(zip_path)
493
- else:
494
- glb_path = os.path.join(output_dir, "model.glb")
495
- if os.path.exists(glb_path):
496
- model_stats['model_size'] = os.path.getsize(glb_path)
497
 
498
  return jsonify({
499
  "status": job['status'],
@@ -508,7 +430,7 @@ def model_info(job_id):
508
  @app.route('/', methods=['GET'])
509
  def index():
510
  return jsonify({
511
- "message": "TripoSR Image to 3D API",
512
  "endpoints": [
513
  "/convert",
514
  "/progress/<job_id>",
@@ -517,14 +439,14 @@ def index():
517
  "/model-info/<job_id>"
518
  ],
519
  "parameters": {
520
- "output_format": "obj or glb",
521
- "detail_level": "low, medium, or high - controls mesh density",
522
- "texture_quality": "low, medium, or high - controls texture quality"
523
  },
524
- "description": "Creates full 3D models from 2D images with background removal"
525
  }), 200
526
 
527
  if __name__ == '__main__':
528
  cleanup_old_jobs()
529
  port = int(os.environ.get('PORT', 7860))
530
- app.run(host='0.0.0.0', port=port)
 
 
1
+ ```python
2
  import os
3
  import torch
4
  import time
 
17
  import numpy as np
18
  import trimesh
19
  import cv2
20
+ import pymeshlab
21
+ from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline
22
 
23
  app = Flask(__name__)
24
+ CORS(app) # Enable CORS for all routes
25
 
26
  # Configure directories
27
  UPLOAD_FOLDER = '/tmp/uploads'
 
29
  CACHE_DIR = '/tmp/huggingface'
30
  ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
31
 
32
+ # Create necessary directories
33
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
34
  os.makedirs(RESULTS_FOLDER, exist_ok=True)
35
  os.makedirs(CACHE_DIR, exist_ok=True)
36
 
37
+ # Set Hugging Face cache environment variables
38
  os.environ['HF_HOME'] = CACHE_DIR
39
  os.environ['TRANSFORMERS_CACHE'] = os.path.join(CACHE_DIR, 'transformers')
40
  os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
 
42
  app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
43
  app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
44
 
45
+ # Job tracking dictionary
46
  processing_jobs = {}
47
 
48
  # Global model variables
49
+ hunyuan_pipeline = None
 
50
  model_loaded = False
51
  model_loading = False
52
 
53
+ # Configuration for processing
54
+ TIMEOUT_SECONDS = 600 # 10 minutes max for Hunyuan3D-2mini on CPU
55
+ MAX_DIMENSION = 256 # Reduced for CPU memory constraints
56
 
57
+ # TimeoutError for handling timeouts
58
  class TimeoutError(Exception):
59
  pass
60
 
61
+ # Thread-safe timeout implementation
62
  def process_with_timeout(function, args, timeout):
63
  result = [None]
64
  error = [None]
 
91
  def allowed_file(filename):
92
  return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
93
 
94
+ # Simplified image preprocessing for Hunyuan3D-2mini
95
  def preprocess_image(image_path):
96
  with Image.open(image_path) as img:
97
  img = img.convert("RGB")
98
 
99
+ # Resize to smaller dimensions for CPU
100
  if img.width > MAX_DIMENSION or img.height > MAX_DIMENSION:
101
  if img.width > img.height:
102
  new_width = MAX_DIMENSION
 
106
  new_width = int(img.width * (MAX_DIMENSION / img.height))
107
  img = img.resize((new_width, new_height), Image.LANCZOS)
108
 
 
 
 
 
 
 
 
 
 
 
 
109
  return img
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  def load_model():
112
+ global hunyuan_pipeline, model_loaded, model_loading
113
 
114
  if model_loaded:
115
+ return hunyuan_pipeline
116
 
117
  if model_loading:
118
  while model_loading and not model_loaded:
119
  time.sleep(0.5)
120
+ return hunyuan_pipeline
121
 
122
  try:
123
  model_loading = True
124
+ print("Starting model loading...")
125
 
126
+ model_name = "tencent/Hunyuan3D-2mini"
127
+
128
+ # Download model with retry mechanism
129
  max_retries = 3
130
  retry_delay = 5
 
131
  for attempt in range(max_retries):
132
  try:
133
  snapshot_download(
 
138
  break
139
  except Exception as e:
140
  if attempt < max_retries - 1:
141
+ print(f"Download attempt {attempt+1} failed: {str(e)}. Retrying in {retry_delay} seconds...")
142
  time.sleep(retry_delay)
143
  retry_delay *= 2
144
  else:
145
  raise
146
 
147
+ # Load Hunyuan3D-2mini pipeline
148
+ hunyuan_pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(
149
  model_name,
150
+ subfolder="hunyuan3d-dit-v2-mini",
151
+ use_safetensors=True,
152
+ torch_dtype=torch.float16,
153
  cache_dir=CACHE_DIR
154
  )
155
 
156
+ # Move to CPU
157
+ hunyuan_pipeline.to("cpu")
158
+
159
  model_loaded = True
160
+ print("Model loaded successfully on CPU")
161
+ return hunyuan_pipeline
162
 
163
  except Exception as e:
164
  print(f"Error loading model: {str(e)}")
 
167
  finally:
168
  model_loading = False
169
 
170
+ # Optimize mesh for Unity
171
+ def optimize_mesh(mesh_path, target_faces=10000):
172
+ ms = pymeshlab.MeshSet()
173
+ ms.load_new_mesh(mesh_path)
174
+ ms.meshing_decimation_quadric_edge_collapse(targetfacenum=target_faces)
175
+ optimized_path = mesh_path.replace(".glb", "_optimized.glb")
176
+ ms.save_current_mesh(optimized_path)
177
+ return optimized_path
 
 
 
 
 
 
 
178
 
179
  @app.route('/health', methods=['GET'])
180
  def health_check():
181
  return jsonify({
182
  "status": "healthy",
183
+ "model": "Hunyuan3D-2mini 3D Generator",
184
  "device": "cpu"
185
  }), 200
186
 
 
192
  return
193
 
194
  job = processing_jobs[job_id]
195
+
196
  yield f"data: {json.dumps({'status': 'processing', 'progress': job['progress']})}\n\n"
197
 
198
  last_progress = job['progress']
 
201
  if job['progress'] != last_progress:
202
  yield f"data: {json.dumps({'status': 'processing', 'progress': job['progress']})}\n\n"
203
  last_progress = job['progress']
204
+
205
  time.sleep(0.5)
206
  check_count += 1
207
+
208
  if check_count > 60:
209
  if 'thread_alive' in job and not job['thread_alive']():
210
  job['status'] = 'error'
 
229
  return jsonify({"error": "No image selected"}), 400
230
 
231
  if not allowed_file(file.filename):
232
+ return jsonify({"error": f"File type not allowed. Supported types: {', '.join(ALLOWED_EXTENSIONS)}"}), 400
233
 
234
  try:
235
  output_format = request.form.get('output_format', 'glb').lower()
236
  detail_level = request.form.get('detail_level', 'medium').lower()
 
237
  except ValueError:
238
  return jsonify({"error": "Invalid parameter values"}), 400
239
 
240
+ if output_format not in ['glb']:
241
+ return jsonify({"error": "Only GLB format is supported with Hunyuan3D-2mini"}), 400
242
 
243
  job_id = str(uuid.uuid4())
244
  output_dir = os.path.join(RESULTS_FOLDER, job_id)
 
263
  processing_jobs[job_id]['thread_alive'] = lambda: thread.is_alive()
264
 
265
  try:
 
266
  processing_jobs[job_id]['progress'] = 5
267
  image = preprocess_image(filepath)
268
  processing_jobs[job_id]['progress'] = 10
269
 
 
 
 
 
 
 
270
  try:
271
  model = load_model()
272
+ processing_jobs[job_id]['progress'] = 30
273
  except Exception as e:
274
  processing_jobs[job_id]['status'] = 'error'
275
  processing_jobs[job_id]['error'] = f"Error loading model: {str(e)}"
276
  return
277
 
 
278
  try:
279
  def generate_3d():
280
+ # Adjust settings based on detail level
281
+ steps = {'low': 20, 'medium': 30, 'high': 40}
282
+ resolution = {'low': 200, 'medium': 256, 'high': 300}
283
+
284
+ mesh = model(
285
+ image=image,
286
+ num_inference_steps=steps[detail_level],
287
+ octree_resolution=resolution[detail_level],
288
+ num_chunks=10000,
289
+ generator=torch.manual_seed(12345),
290
+ output_type="trimesh"
291
+ )[0]
292
  return mesh
293
 
294
  mesh, error = process_with_timeout(generate_3d, [], TIMEOUT_SECONDS)
 
300
  return
301
  else:
302
  raise error
303
+
 
 
 
 
304
  processing_jobs[job_id]['progress'] = 80
305
 
306
+ # Export and optimize
307
+ glb_path = os.path.join(output_dir, "model.glb")
308
+ mesh.export(glb_path, file_type='glb')
309
+
310
+ # Optimize for Unity
311
+ optimized_path = optimize_mesh(glb_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
 
313
+ processing_jobs[job_id]['result_url'] = f"/download/{job_id}"
314
+ processing_jobs[job_id]['preview_url'] = f"/preview/{job_id}"
 
 
 
315
 
316
  processing_jobs[job_id]['status'] = 'completed'
317
  processing_jobs[job_id]['progress'] = 100
 
320
  except Exception as e:
321
  error_details = traceback.format_exc()
322
  processing_jobs[job_id]['status'] = 'error'
323
+ processing_jobs[job_id]['error'] = f"Error during processing: {str(e)}"
324
+ print(f"Error processing job {job_id}: {str(e)}")
325
  print(error_details)
326
+ return
327
 
328
  if os.path.exists(filepath):
329
  os.remove(filepath)
 
336
  processing_jobs[job_id]['error'] = f"{str(e)}\n{error_details}"
337
  print(f"Error processing job {job_id}: {str(e)}")
338
  print(error_details)
339
+
340
  if os.path.exists(filepath):
341
  os.remove(filepath)
342
 
 
352
  return jsonify({"error": "Model not found or processing not complete"}), 404
353
 
354
  output_dir = os.path.join(RESULTS_FOLDER, job_id)
355
+ glb_path = os.path.join(output_dir, "model_optimized.glb")
356
 
357
+ if os.path.exists(glb_path):
358
+ return send_file(glb_path, as_attachment=True, download_name="model.glb")
 
 
 
 
 
 
359
 
360
  return jsonify({"error": "File not found"}), 404
361
 
 
365
  return jsonify({"error": "Model not found or processing not complete"}), 404
366
 
367
  output_dir = os.path.join(RESULTS_FOLDER, job_id)
368
+ glb_path = os.path.join(output_dir, "model_optimized.glb")
369
+
370
+ if os.path.exists(glb_path):
371
+ return send_file(glb_path, mimetype='model/gltf-binary')
 
 
 
 
 
 
372
 
373
  return jsonify({"error": "Model file not found"}), 404
374
 
 
390
  shutil.rmtree(output_dir)
391
  except Exception as e:
392
  print(f"Error cleaning up job {job_id}: {str(e)}")
393
+
394
  if job_id in processing_jobs:
395
  del processing_jobs[job_id]
396
 
 
413
  output_dir = os.path.join(RESULTS_FOLDER, job_id)
414
  model_stats = {}
415
 
416
+ glb_path = os.path.join(output_dir, "model_optimized.glb")
417
+ if os.path.exists(glb_path):
418
+ model_stats['model_size'] = os.path.getsize(glb_path)
 
 
 
 
 
 
 
 
419
 
420
  return jsonify({
421
  "status": job['status'],
 
430
  @app.route('/', methods=['GET'])
431
  def index():
432
  return jsonify({
433
+ "message": "Image to 3D API (Hunyuan3D-2mini)",
434
  "endpoints": [
435
  "/convert",
436
  "/progress/<job_id>",
 
439
  "/model-info/<job_id>"
440
  ],
441
  "parameters": {
442
+ "output_format": "glb",
443
+ "detail_level": "low, medium, or high - controls mesh detail"
 
444
  },
445
+ "description": "This API creates full 3D models from 2D images using Hunyuan3D-2mini"
446
  }), 200
447
 
448
  if __name__ == '__main__':
449
  cleanup_old_jobs()
450
  port = int(os.environ.get('PORT', 7860))
451
+ app.run(host='0.0.0.0', port=port)
452
+ ```