Ashrafb commited on
Commit
8b9baab
·
verified ·
1 Parent(s): 8f49770

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +13 -2
main.py CHANGED
@@ -19,6 +19,11 @@ def load_model():
19
  model = Model(device='cuda' if torch.cuda.is_available() else 'cpu')
20
  model.load_model('cartoon4')
21
 
 
 
 
 
 
22
  @app.post("/upload/")
23
  async def process_image(file: UploadFile = File(...), top: int = Form(...), bottom: int = Form(...), left: int = Form(...), right: int = Form(...)):
24
  global model
@@ -31,10 +36,18 @@ async def process_image(file: UploadFile = File(...), top: int = Form(...), bott
31
  # Convert the uploaded image to numpy array
32
  nparr = np.frombuffer(contents, np.uint8)
33
  frame_rgb = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
 
34
 
35
  # Process the uploaded image
36
  aligned_face, instyle, message = model.detect_and_align_image(frame_rgb, top, bottom, left, right)
 
 
 
 
37
  processed_image, message = model.image_toonify(aligned_face, instyle, model.exstyle, style_degree=0.5, style_type='cartoon4')
 
 
 
38
 
39
  # Convert BGR to RGB
40
  processed_image_rgb = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)
@@ -44,8 +57,6 @@ async def process_image(file: UploadFile = File(...), top: int = Form(...), bott
44
 
45
  # Return the processed image as a streaming response
46
  return StreamingResponse(BytesIO(encoded_image.tobytes()), media_type="image/jpeg")
47
-
48
-
49
  # Mount static files directory
50
  app.mount("/", StaticFiles(directory="AB", html=True), name="static")
51
 
 
19
  model = Model(device='cuda' if torch.cuda.is_available() else 'cpu')
20
  model.load_model('cartoon4')
21
 
22
+ import logging
23
+
24
+ # Configure logging
25
+ logging.basicConfig(level=logging.INFO)
26
+
27
  @app.post("/upload/")
28
  async def process_image(file: UploadFile = File(...), top: int = Form(...), bottom: int = Form(...), left: int = Form(...), right: int = Form(...)):
29
  global model
 
36
  # Convert the uploaded image to numpy array
37
  nparr = np.frombuffer(contents, np.uint8)
38
  frame_rgb = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
39
+ logging.info(f"Uploaded image shape: {frame_rgb.shape}")
40
 
41
  # Process the uploaded image
42
  aligned_face, instyle, message = model.detect_and_align_image(frame_rgb, top, bottom, left, right)
43
+ if aligned_face is None or instyle is None:
44
+ logging.error("Failed to process the image: No face detected or alignment failed.")
45
+ return {"error": message}
46
+
47
  processed_image, message = model.image_toonify(aligned_face, instyle, model.exstyle, style_degree=0.5, style_type='cartoon4')
48
+ if processed_image is None:
49
+ logging.error("Failed to toonify the image.")
50
+ return {"error": message}
51
 
52
  # Convert BGR to RGB
53
  processed_image_rgb = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)
 
57
 
58
  # Return the processed image as a streaming response
59
  return StreamingResponse(BytesIO(encoded_image.tobytes()), media_type="image/jpeg")
 
 
60
  # Mount static files directory
61
  app.mount("/", StaticFiles(directory="AB", html=True), name="static")
62