Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ from huggingface_hub import snapshot_download
|
|
14 |
from flask_cors import CORS
|
15 |
import numpy as np
|
16 |
import trimesh
|
17 |
-
from
|
18 |
|
19 |
# Force CPU usage
|
20 |
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
@@ -46,12 +46,12 @@ app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
|
|
46 |
processing_jobs = {}
|
47 |
|
48 |
# Global model variables
|
49 |
-
|
50 |
model_loaded = False
|
51 |
model_loading = False
|
52 |
|
53 |
# Configuration for processing
|
54 |
-
TIMEOUT_SECONDS =
|
55 |
MAX_DIMENSION = 256
|
56 |
|
57 |
# TimeoutError for handling timeouts
|
@@ -106,21 +106,21 @@ def preprocess_image(image_path):
|
|
106 |
return img
|
107 |
|
108 |
def load_model():
|
109 |
-
global
|
110 |
|
111 |
if model_loaded:
|
112 |
-
return
|
113 |
|
114 |
if model_loading:
|
115 |
while model_loading and not model_loaded:
|
116 |
time.sleep(0.5)
|
117 |
-
return
|
118 |
|
119 |
try:
|
120 |
model_loading = True
|
121 |
print("Starting model loading...")
|
122 |
|
123 |
-
model_name = "
|
124 |
|
125 |
# Download model with retry mechanism
|
126 |
max_retries = 3
|
@@ -141,18 +141,18 @@ def load_model():
|
|
141 |
else:
|
142 |
raise
|
143 |
|
144 |
-
# Load
|
145 |
-
|
146 |
model_name,
|
147 |
use_safetensors=True,
|
148 |
torch_dtype=torch.float16,
|
149 |
cache_dir=CACHE_DIR,
|
150 |
-
|
151 |
)
|
152 |
|
153 |
model_loaded = True
|
154 |
print("Model loaded successfully on CPU")
|
155 |
-
return
|
156 |
|
157 |
except Exception as e:
|
158 |
print(f"Error loading model: {str(e)}")
|
@@ -165,7 +165,7 @@ def load_model():
|
|
165 |
def health_check():
|
166 |
return jsonify({
|
167 |
"status": "healthy",
|
168 |
-
"model": "
|
169 |
"device": "cpu"
|
170 |
}), 200
|
171 |
|
@@ -263,8 +263,8 @@ def convert_image_to_3d():
|
|
263 |
try:
|
264 |
def generate_3d():
|
265 |
# Adjust settings based on detail level
|
266 |
-
steps = {'low':
|
267 |
-
|
268 |
|
269 |
# Convert PIL image to numpy
|
270 |
img_array = np.array(image)
|
@@ -272,9 +272,10 @@ def convert_image_to_3d():
|
|
272 |
# Generate mesh
|
273 |
mesh = model(
|
274 |
image=img_array,
|
275 |
-
|
276 |
-
texture_resolution=
|
277 |
-
|
|
|
278 |
)
|
279 |
return mesh
|
280 |
|
@@ -414,7 +415,7 @@ def model_info(job_id):
|
|
414 |
@app.route('/', methods=['GET'])
|
415 |
def index():
|
416 |
return jsonify({
|
417 |
-
"message": "Image to 3D API (
|
418 |
"endpoints": [
|
419 |
"/convert",
|
420 |
"/progress/<job_id>",
|
@@ -426,7 +427,7 @@ def index():
|
|
426 |
"output_format": "glb",
|
427 |
"detail_level": "low, medium, or high - controls mesh detail"
|
428 |
},
|
429 |
-
"description": "This API creates full 3D models from 2D images using
|
430 |
}), 200
|
431 |
|
432 |
if __name__ == '__main__':
|
|
|
14 |
from flask_cors import CORS
|
15 |
import numpy as np
|
16 |
import trimesh
|
17 |
+
from lgm.pipeline import LGMPipeline
|
18 |
|
19 |
# Force CPU usage
|
20 |
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
|
|
46 |
processing_jobs = {}
|
47 |
|
48 |
# Global model variables
|
49 |
+
lgm_pipeline = None
|
50 |
model_loaded = False
|
51 |
model_loading = False
|
52 |
|
53 |
# Configuration for processing
|
54 |
+
TIMEOUT_SECONDS = 240 # 4 minutes max for LGM on CPU
|
55 |
MAX_DIMENSION = 256
|
56 |
|
57 |
# TimeoutError for handling timeouts
|
|
|
106 |
return img
|
107 |
|
108 |
def load_model():
|
109 |
+
global lgm_pipeline, model_loaded, model_loading
|
110 |
|
111 |
if model_loaded:
|
112 |
+
return lgm_pipeline
|
113 |
|
114 |
if model_loading:
|
115 |
while model_loading and not model_loaded:
|
116 |
time.sleep(0.5)
|
117 |
+
return lgm_pipeline
|
118 |
|
119 |
try:
|
120 |
model_loading = True
|
121 |
print("Starting model loading...")
|
122 |
|
123 |
+
model_name = "open-mmlab/LGM"
|
124 |
|
125 |
# Download model with retry mechanism
|
126 |
max_retries = 3
|
|
|
141 |
else:
|
142 |
raise
|
143 |
|
144 |
+
# Load LGM pipeline
|
145 |
+
lgm_pipeline = LGMPipeline.from_pretrained(
|
146 |
model_name,
|
147 |
use_safetensors=True,
|
148 |
torch_dtype=torch.float16,
|
149 |
cache_dir=CACHE_DIR,
|
150 |
+
device_map="cpu"
|
151 |
)
|
152 |
|
153 |
model_loaded = True
|
154 |
print("Model loaded successfully on CPU")
|
155 |
+
return lgm_pipeline
|
156 |
|
157 |
except Exception as e:
|
158 |
print(f"Error loading model: {str(e)}")
|
|
|
165 |
def health_check():
|
166 |
return jsonify({
|
167 |
"status": "healthy",
|
168 |
+
"model": "LGM 3D Generator",
|
169 |
"device": "cpu"
|
170 |
}), 200
|
171 |
|
|
|
263 |
try:
|
264 |
def generate_3d():
|
265 |
# Adjust settings based on detail level
|
266 |
+
steps = {'low': 40, 'medium': 60, 'high': 80}
|
267 |
+
resolution = {'low': 512, 'medium': 1024, 'high': 2048}
|
268 |
|
269 |
# Convert PIL image to numpy
|
270 |
img_array = np.array(image)
|
|
|
272 |
# Generate mesh
|
273 |
mesh = model(
|
274 |
image=img_array,
|
275 |
+
num_inference_steps=steps[detail_level],
|
276 |
+
texture_resolution=resolution[detail_level],
|
277 |
+
generator=torch.manual_seed(12345),
|
278 |
+
output_type="trimesh"
|
279 |
)
|
280 |
return mesh
|
281 |
|
|
|
415 |
@app.route('/', methods=['GET'])
|
416 |
def index():
|
417 |
return jsonify({
|
418 |
+
"message": "Image to 3D API (LGM)",
|
419 |
"endpoints": [
|
420 |
"/convert",
|
421 |
"/progress/<job_id>",
|
|
|
427 |
"output_format": "glb",
|
428 |
"detail_level": "low, medium, or high - controls mesh detail"
|
429 |
},
|
430 |
+
"description": "This API creates full 3D models from 2D images using LGM"
|
431 |
}), 200
|
432 |
|
433 |
if __name__ == '__main__':
|