amirfekrazad commited on
Commit
2b9840c
·
1 Parent(s): de673d6

Add the model and the app

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ import cv2
4
+ import mtcnn
5
+ import numpy as np
6
+
7
+ model = tf.keras.models.load_model('./model')
8
+
9
+ def load_and_preprocess_image(im_path, detector, maxWidth = 512):
10
+
11
+ desiredLeftEye = (0.36, 0.43)
12
+
13
+ # Load the image and convert it to grayscale
14
+ try:
15
+ image = cv2.imread(im_path)
16
+ except:
17
+ return 0
18
+
19
+ if image is None:
20
+ return 0
21
+
22
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
23
+
24
+ # Detect the face in the image
25
+ result = detector.detect_faces(image)
26
+
27
+ # Get the bounding box for the face
28
+ x, y, w, h = result[0]['box']
29
+
30
+ desiredFaceWidth = 224
31
+ desiredFaceHeight = 224
32
+
33
+ # Get the landmarks for the face
34
+ landmarks = result[0]['keypoints']
35
+
36
+ # Calculate the angle between the eyes
37
+ eye_1 = landmarks['left_eye']
38
+ eye_2 = landmarks['right_eye']
39
+ dy = eye_2[1] - eye_1[1]
40
+ dx = eye_2[0] - eye_1[0]
41
+ angle = np.arctan2(dy, dx) * 180 / np.pi
42
+
43
+ desiredRightEyeX = 1.0 - desiredLeftEye[0]
44
+
45
+ dist = np.sqrt((dx ** 2) + (dy ** 2))
46
+ desiredDist = (desiredRightEyeX - desiredLeftEye[0]) * desiredFaceWidth
47
+ scale = desiredDist / dist
48
+
49
+ eyesCenter = ((eye_1[0] + eye_2[0]) // 2, (eye_1[1] + eye_2[1]) // 2)
50
+
51
+ # grab the rotation matrix for rotating and scaling the face
52
+ M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
53
+ # update the translation component of the matrix
54
+ tX = desiredFaceWidth * 0.5
55
+ tY = desiredFaceHeight * desiredLeftEye[1]
56
+ M[0, 2] += (tX - eyesCenter[0])
57
+ M[1, 2] += (tY - eyesCenter[1])
58
+
59
+ (w, h) = (desiredFaceWidth, desiredFaceHeight)
60
+ output = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC)
61
+
62
+ output = np.array(output)
63
+
64
+ output = tf.image.convert_image_dtype(output, dtype=tf.float32)
65
+
66
+ output = tf.image.rgb_to_grayscale(output)
67
+ output = tf.tile(output, [1, 1, 3])
68
+
69
+ output = tf.clip_by_value(output, 0, 1)
70
+
71
+ return output
72
+
73
+ def predict_remaining_life(img_path):
74
+ detector = mtcnn.MTCNN()
75
+ # Transform the target image and add a batch dimension
76
+ img = load_and_preprocess_image(img_path, detector)
77
+ img = np.expand_dims(img, axis = 0)
78
+ #print(img.shape)
79
+ #plt.imshow(img)
80
+ # Put model into evaluation mode and turn on inference mode
81
+ pred = model.predict(img)
82
+ pred = round(pred[0][0]*100,1)
83
+
84
+ # Return the prediction dictionary and prediction time
85
+ return pred
86
+
87
+
88
+ # Create title, description and article strings
89
+ title = "Remaining Life Predictor"
90
+ description = "A Convolutional Neural Net to predict how many years a person has left to live using a facial image"
91
+ article = "https://arxiv.org/abs/2301.08229"
92
+
93
+ # Create the Gradio demo
94
+ demo = gr.Interface(fn=predict_remaining_life, # mapping function from input to output
95
+ inputs=gr.Image(type="filepath"), # what are the inputs?
96
+ outputs=gr.Number(label="Remaining Life (Year)"),
97
+ title=title,
98
+ description=description,
99
+ article=article)
100
+
101
+ # Launch the demo!
102
+ demo.launch(debug=False, # print errors locally?
103
+ share=False) # generate a publically shareable URL?
model/.DS_Store ADDED
Binary file (6.15 kB). View file
 
model/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12267191038e5ed3282719d4ad76f6d4d9081c7577ab407b70bb7fddb1519334
3
+ size 57879
model/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43c1adf18e88d5eb74bf50fede2446a05246f409febd3572413f9f1ca862fb9f
3
+ size 342554
model/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e80346bfc2f1fc28f3a549352900464d5106015a637716b808380012d1e4ef4
3
+ size 165845415
model/variables/variables.index ADDED
Binary file (2.26 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch==1.12.0
2
+ torchvision==0.13.0
3
+ gradio==3.1.4
4
+ tensorflow==2.11.0
5
+ numpy==1.24.0
6
+ mtcnn==0.1.0
7
+ cv2==4.6.0