danhtran2mind commited on
Commit
904172a
·
verified ·
1 Parent(s): a8d6bff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -16
app.py CHANGED
@@ -31,27 +31,25 @@ loaded_autoencoder = tf.keras.models.load_model(
31
  def process_image(input_img):
32
  # Convert PIL Image to grayscale and resize
33
  img = input_img.convert("L") # Convert to grayscale (single channel)
34
- img = img.resize((WIDTH, HEIGHT)) # Adjust size as needed
35
- img_array = tf.keras.preprocessing.image.img_to_array(img) / 255.0
36
- img_array = img_array[None, ..., 0:1] # Add batch dimension and keep single channel
37
- # print("img_array shape: ", img_array.shape)
38
 
39
- # Run inference
40
- output_array = loaded_autoencoder.predict(img_array)
41
  print("output_array shape: ", output_array.shape)
42
 
43
- # Assuming output_array has shape (1, 512, 512, 2) for U and V channels
44
- # Extract Y (grayscale input) and UV (model output)
45
- y_channel = img_array[0, :, :, 0] # Grayscale input (Y channel)
46
- uv_channels = output_array[0] # Model output (U and V channels)
47
 
48
- # Combine Y, U, V into a 3-channel YUV image
49
- yuv_image = np.stack([y_channel, uv_channels[:, :, 0], uv_channels[:, :, 1]], axis=-1)
50
 
51
- # Convert YUV to RGB
52
- yuv_image = yuv_image * 255.0 # Denormalize
53
- rgb_image = Image.fromarray(yuv_image.astype(np.uint8), mode="YCbCr") # Use YCbCr (alias for YUV in PIL)
54
- rgb_image = rgb_image.convert("RGB") # Convert to RGB
55
 
56
  return rgb_image
57
 
 
31
  def process_image(input_img):
32
  # Convert PIL Image to grayscale and resize
33
  img = input_img.convert("L") # Convert to grayscale (single channel)
34
+ img = img.resize((WIDTH, HEIGHT)) # Resize to 512x512
35
+ img_array = tf.keras.preprocessing.image.img_to_array(img) / 255.0 # Normalize to [0, 1]
36
+ img_array = img_array[None, ..., 0:1] # Add batch dimension, shape: (1, 512, 512, 1)
 
37
 
38
+ # Run inference (assuming loaded_autoencoder predicts a*b* channels)
39
+ output_array = loaded_autoencoder.predict(img_array) # Shape: (1, 512, 512, 2) for a*b*
40
  print("output_array shape: ", output_array.shape)
41
 
42
+ # Extract L* (grayscale input) and a*b* (model output)
43
+ L_channel = img_array[0, :, :, 0] * 100.0 # Denormalize L* to [0, 100]
44
+ ab_channels = output_array[0] * 128.0 # Denormalize a*b* to [-128, 128]
 
45
 
46
+ # Combine L*, a*, b* into a 3-channel L*a*b* image
47
+ lab_image = np.stack([L_channel, ab_channels[:, :, 0], ab_channels[:, :, 1]], axis=-1) # Shape: (512, 512, 3)
48
 
49
+ # Convert L*a*b* to RGB
50
+ rgb_array = lab2rgb(lab_image) # Convert to RGB, output in [0, 1]
51
+ rgb_array = np.clip(rgb_array, 0, 1) * 255.0 # Scale to [0, 255]
52
+ rgb_image = Image.fromarray(rgb_array.astype(np.uint8), mode="RGB") # Create RGB PIL image
53
 
54
  return rgb_image
55