Padzong commited on
Commit
9f29ff6
·
verified ·
1 Parent(s): d31b16f

Labeling bug fix, components reposition and styling

Browse files
Files changed (1) hide show
  1. app.py +74 -25
app.py CHANGED
@@ -28,7 +28,6 @@ for user in usersIndexes:
28
  for notUser in notUsersImages:
29
  examples.append([userImage, notUser, 1])
30
 
31
- #%%
32
  def predict(input1, input2, label=None):
33
  img1_PIL = Image.open(f'data/{input1}')
34
  img2_PIL = Image.open(f'data/{input2}')
@@ -36,11 +35,11 @@ def predict(input1, input2, label=None):
36
  img2 = transforms.ToTensor()(img2_PIL).unsqueeze(0)
37
 
38
  for el in examples:
39
- if input1 == input2:
40
- label = 0
41
  break
42
- if input1 in el and input2 in el:
43
- label = el[2]
44
 
45
  with torch.no_grad():
46
  out1, out2 = model(img1, img2)
@@ -49,33 +48,83 @@ def predict(input1, input2, label=None):
49
  decision = f'Access granted, confidence: {pred.item():4f}'
50
  else:
51
  decision = f'Access denied, confidence: {pred.item():4f}'
52
- return img1_PIL, img2_PIL, decision, label
53
-
54
  #%%
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  img_PIL = Image.open(f'data/{file_list[0]}')
57
 
58
- with gr.Blocks() as demo:
59
- drop1 = gr.Dropdown(
60
- value=file_list[0],
61
- choices=file_list,
62
- label='First image',
63
- scale=0
64
- )
65
- drop2 = gr.Dropdown(
66
- value=file_list[0],
67
- choices=file_list,
68
- label='Second image',
69
- scale=0
70
- )
71
  with gr.Row():
72
- img1 = gr.Image(value=img_PIL, height=153, width=136, interactive=False, scale=0, label='image1')
73
- img2 = gr.Image(value=img_PIL, height=153, width=136, interactive=False, scale=0, label='image2')
74
- label = gr.Label(value=0, label='0 means images represent the same fingerprint')
75
- output = gr.Label(value=predict(*examples[0])[2], label='Prediction, the closer to 0, the more similar')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  drop1.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
78
  drop2.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
79
- demo.launch()
 
80
 
81
  # %%
 
28
  for notUser in notUsersImages:
29
  examples.append([userImage, notUser, 1])
30
 
 
31
  def predict(input1, input2, label=None):
32
  img1_PIL = Image.open(f'data/{input1}')
33
  img2_PIL = Image.open(f'data/{input2}')
 
35
  img2 = transforms.ToTensor()(img2_PIL).unsqueeze(0)
36
 
37
  for el in examples:
38
+ if input1 == el[0] and input2 == el[1] and el[2] == 0:
39
+ label = 'Scans of the same finger'
40
  break
41
+ if input1 == el[0] and input2 == el[1] and el[2] == 1:
42
+ label = 'Scans of different fingers'
43
 
44
  with torch.no_grad():
45
  out1, out2 = model(img1, img2)
 
48
  decision = f'Access granted, confidence: {pred.item():4f}'
49
  else:
50
  decision = f'Access denied, confidence: {pred.item():4f}'
51
+
52
+ return img1_PIL, img2_PIL, decision, label
53
  #%%
54
+ css = """
55
+ .gradio-container {
56
+ height: 100vh;
57
+ max-width: 1024px !important;
58
+ }
59
+
60
+ .my_img {
61
+ max-height: 288px !important;
62
+ object-fit: cover !important;
63
+ }
64
+
65
+ #res div h2 { color: #07ef03; }
66
+ """
67
+
68
+ js = """
69
+ () => {
70
+ label = document.querySelector("#res div h2");
71
+ txt = label.textContent.split(",")[0]
72
+ if (txt === 'Access granted') {
73
+ label.style.color = "#07ef03";
74
+ }
75
+ if (txt === 'Access denied') {
76
+ label.style.color = "red";
77
+ }
78
+ }
79
+ """
80
 
81
  img_PIL = Image.open(f'data/{file_list[0]}')
82
 
83
+ with gr.Blocks(css=css, elem_classes=['container']) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
84
  with gr.Row():
85
+
86
+ with gr.Row():
87
+ drop1 = gr.Dropdown(value=None,
88
+ choices=file_list,
89
+ label='Select first image',
90
+ scale=1,
91
+
92
+ )
93
+
94
+ drop2 = gr.Dropdown(value=None,
95
+ choices=file_list,
96
+ label='Select second image',
97
+ scale=1,
98
+
99
+ )
100
+ label = gr.Label(value='Scans of the same finger', show_label=False)
101
+
102
+
103
+
104
+ with gr.Row():
105
+ img1 = gr.Image(value=img_PIL,
106
+ height=288,
107
+ width=256,
108
+ interactive=False,
109
+ scale=1,
110
+ label='first image',
111
+ show_download_button=False,
112
+ elem_classes=['my-img'])
113
+
114
+ img2 = gr.Image(value=img_PIL,
115
+ height=288,
116
+ width=256,
117
+ interactive=False,
118
+ scale=1,
119
+ label='second image',
120
+ show_download_button=False,
121
+ elem_classes=['my-img'])
122
+
123
+ output = gr.Label(value=predict(*examples[0])[2], elem_id='res', show_label=False)
124
 
125
  drop1.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
126
  drop2.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
127
+ output.change(fn=None, inputs=None, js=js)
128
+ demo.launch()
129
 
130
  # %%