Spaces:
Sleeping
Sleeping
Update Aug 22 12:58
Browse files
app.py
CHANGED
@@ -50,7 +50,7 @@ def s(input_string):
|
|
50 |
|
51 |
def optimize(objective_function, iteration_input, progress=gr.Progress()):
|
52 |
|
53 |
-
print(objective_function)
|
54 |
|
55 |
# Variable setup
|
56 |
Current_BEST = torch.tensor( -1e10 ) # Some arbitrary very small number
|
@@ -153,8 +153,8 @@ def optimize(objective_function, iteration_input, progress=gr.Progress()):
|
|
153 |
|
154 |
# (ii) Convergence tracking (assuming the best Y is to be maximized)
|
155 |
# if Current_BEST != -1e10:
|
156 |
-
print(Current_BEST)
|
157 |
-
print(convergence)
|
158 |
convergence.append(Current_BEST.abs())
|
159 |
time_conv.append(time.time() - START_TIME)
|
160 |
|
@@ -258,7 +258,7 @@ def create_convergence_plot(objective_function, iteration_input, time_conv, conv
|
|
258 |
|
259 |
|
260 |
ax.set_xlabel('Time (seconds)')
|
261 |
-
ax.set_ylabel('Objective Value')
|
262 |
ax.set_title('Convergence Plot for {t} iterations'.format(t=iteration_input))
|
263 |
# ax.legend()
|
264 |
|
@@ -284,7 +284,7 @@ def create_convergence_plot(objective_function, iteration_input, time_conv, conv
|
|
284 |
ax.axhline(y=2650, color='red', linestyle='--', label='Optimal Value')
|
285 |
|
286 |
elif objective_function=="WeldedBeam.png":
|
287 |
-
ax.axhline(y=
|
288 |
|
289 |
elif objective_function=="Car.png":
|
290 |
ax.axhline(y=25, color='red', linestyle='--', label='Optimal Value')
|
@@ -367,7 +367,7 @@ def submit_action(objective_function_choices, iteration_input):
|
|
367 |
def clear_output():
|
368 |
# print(gallery.selected_index)
|
369 |
|
370 |
-
return gr.update(value=[], selected=None), None, 15, gr.Markdown(""), '
|
371 |
|
372 |
def reset_gallery():
|
373 |
return gr.update(value=image_paths)
|
@@ -378,14 +378,32 @@ with gr.Blocks() as demo:
|
|
378 |
gr.HTML(
|
379 |
"""
|
380 |
<div style="text-align: center;">
|
381 |
-
<
|
382 |
-
|
|
|
|
|
|
|
|
|
383 |
Fast and Accurate Bayesian Optimization with Pre-trained Transformers for Constrained Engineering Problems</a>
|
384 |
-
</
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
385 |
|
386 |
-
<p style="text-align: left;">This is a demo for Bayesian Optimization using PFN (Prior-Data Fitted Networks).
|
387 |
-
Select your objective function by clicking on one of the check boxes below, then enter the iteration number to run the optimization process.
|
388 |
-
The results will be visualized in the radar chart and convergence plot.</p>
|
389 |
|
390 |
|
391 |
|
@@ -393,6 +411,26 @@ with gr.Blocks() as demo:
|
|
393 |
</div>
|
394 |
"""
|
395 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
396 |
|
397 |
|
398 |
with gr.Row():
|
@@ -405,7 +443,7 @@ with gr.Blocks() as demo:
|
|
405 |
gr.Markdown("## Select a problem (objective): ")
|
406 |
img_key = gr.Markdown(value="", visible=False)
|
407 |
|
408 |
-
gallery = gr.Gallery(value=image_paths, label="
|
409 |
# height = 450,
|
410 |
object_fit='contain',
|
411 |
columns=3, rows=3, elem_id="gallery")
|
@@ -421,20 +459,54 @@ with gr.Blocks() as demo:
|
|
421 |
|
422 |
with gr.Column():
|
423 |
# gr.Markdown("# Outputs: ")
|
424 |
-
gr.Markdown("
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
convergence_plot = gr.Plot(label="Convergence Plot")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
429 |
|
430 |
|
431 |
|
432 |
def handle_select(evt: gr.SelectData):
|
433 |
selected_image = evt.value
|
434 |
key = evt.value['image']['orig_name']
|
435 |
-
|
436 |
-
|
437 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
438 |
|
439 |
return key, formulation
|
440 |
|
@@ -465,4 +537,4 @@ with gr.Blocks() as demo:
|
|
465 |
|
466 |
|
467 |
|
468 |
-
demo.launch()
|
|
|
50 |
|
51 |
def optimize(objective_function, iteration_input, progress=gr.Progress()):
|
52 |
|
53 |
+
# print(objective_function)
|
54 |
|
55 |
# Variable setup
|
56 |
Current_BEST = torch.tensor( -1e10 ) # Some arbitrary very small number
|
|
|
153 |
|
154 |
# (ii) Convergence tracking (assuming the best Y is to be maximized)
|
155 |
# if Current_BEST != -1e10:
|
156 |
+
# print(Current_BEST)
|
157 |
+
# print(convergence)
|
158 |
convergence.append(Current_BEST.abs())
|
159 |
time_conv.append(time.time() - START_TIME)
|
160 |
|
|
|
258 |
|
259 |
|
260 |
ax.set_xlabel('Time (seconds)')
|
261 |
+
ax.set_ylabel('Objective Value (Minimization)')
|
262 |
ax.set_title('Convergence Plot for {t} iterations'.format(t=iteration_input))
|
263 |
# ax.legend()
|
264 |
|
|
|
284 |
ax.axhline(y=2650, color='red', linestyle='--', label='Optimal Value')
|
285 |
|
286 |
elif objective_function=="WeldedBeam.png":
|
287 |
+
ax.axhline(y=3.3, color='red', linestyle='--', label='Optimal Value')
|
288 |
|
289 |
elif objective_function=="Car.png":
|
290 |
ax.axhline(y=25, color='red', linestyle='--', label='Optimal Value')
|
|
|
367 |
def clear_output():
|
368 |
# print(gallery.selected_index)
|
369 |
|
370 |
+
return gr.update(value=[], selected=None), None, 15, gr.Markdown(""), 'Formulation_default.png'
|
371 |
|
372 |
def reset_gallery():
|
373 |
return gr.update(value=image_paths)
|
|
|
378 |
gr.HTML(
|
379 |
"""
|
380 |
<div style="text-align: center;">
|
381 |
+
<p style="text-align: center; font-size:30px;"><b>
|
382 |
+
Constrained Bayesian Optimization with Pre-trained Transformers
|
383 |
+
</b></p>
|
384 |
+
|
385 |
+
<p style="text-align: center; font-size:18px;"><b>
|
386 |
+
Paper: <a href="https://arxiv.org/abs/2404.04495">
|
387 |
Fast and Accurate Bayesian Optimization with Pre-trained Transformers for Constrained Engineering Problems</a>
|
388 |
+
</b></p>
|
389 |
+
|
390 |
+
<p style="text-align: left;font-size:18px;">
|
391 |
+
Explore our interactive demo that uses PFN (Prior-Data Fitted Networks) for solving constrained Bayesian optimization problems!
|
392 |
+
</p>
|
393 |
+
|
394 |
+
<p style="text-align: left;font-size:24px;"><b>
|
395 |
+
Get Started:
|
396 |
+
</b> </p>
|
397 |
+
|
398 |
+
|
399 |
+
<p style="text-align: left;font-size:18px;">
|
400 |
+
<ol style="text-align: left;font-size:18px;text-indent: 30px;">
|
401 |
+
<li> <b>Select a Problem:</b> Click on an image from the problem gallery to choose your objective function. </li>
|
402 |
+
<li> <b>Set Iterations:</b> Adjust the slider to set the number of iterations for the optimization process. </li>
|
403 |
+
<li> <b>Run Optimization:</b> Click "Submit" to start the optimization. Use "Clear" if you need to reselect your parameters. </li>
|
404 |
+
</ol>
|
405 |
+
</p>
|
406 |
|
|
|
|
|
|
|
407 |
|
408 |
|
409 |
|
|
|
411 |
</div>
|
412 |
"""
|
413 |
)
|
414 |
+
|
415 |
+
gr.HTML(
|
416 |
+
|
417 |
+
"""
|
418 |
+
<p style="text-align: left;font-size:24px;"><b>
|
419 |
+
Result Display:
|
420 |
+
</b> </p>
|
421 |
+
|
422 |
+
<p style="text-align: left;font-size:18px;">
|
423 |
+
<ol style="text-align: left;font-size:18px;text-indent: 30px;">
|
424 |
+
<li> <b>Panel Display:</b> Shows the problem formulation and the optimization results. </li>
|
425 |
+
<li> <b>Convergence Plot:</b> Visualizes the best observed objective against the algorithm's runtime over the chosen iterations. </li>
|
426 |
+
<ul>
|
427 |
+
<li> <b>PFN-CBO:</b> Displays results from real-time optimization. </li>
|
428 |
+
<li> <b>GP-CBO:</b> Provides pre-computed data from our past experiments, as GP real-time runs are impractical for a demo. </li>
|
429 |
+
</ul>
|
430 |
+
</ol>
|
431 |
+
</p>
|
432 |
+
"""
|
433 |
+
)
|
434 |
|
435 |
|
436 |
with gr.Row():
|
|
|
443 |
gr.Markdown("## Select a problem (objective): ")
|
444 |
img_key = gr.Markdown(value="", visible=False)
|
445 |
|
446 |
+
gallery = gr.Gallery(value=image_paths, label="Objectives",
|
447 |
# height = 450,
|
448 |
object_fit='contain',
|
449 |
columns=3, rows=3, elem_id="gallery")
|
|
|
459 |
|
460 |
with gr.Column():
|
461 |
# gr.Markdown("# Outputs: ")
|
462 |
+
gr.Markdown("""
|
463 |
+
## Convergence Plot:
|
464 |
+
""")
|
465 |
+
|
466 |
convergence_plot = gr.Plot(label="Convergence Plot")
|
467 |
+
|
468 |
+
|
469 |
+
gr.Markdown("")
|
470 |
+
gr.Markdown("## Problem formulation: ")
|
471 |
+
formulation = gr.Image(value='Formulation_default.png', label="Eq")
|
472 |
+
|
473 |
+
|
474 |
|
475 |
|
476 |
|
477 |
def handle_select(evt: gr.SelectData):
|
478 |
selected_image = evt.value
|
479 |
key = evt.value['image']['orig_name']
|
480 |
+
|
481 |
+
if key=="CantileverBeam.png":
|
482 |
+
formulation = 'Cantilever_formulation.png'
|
483 |
+
|
484 |
+
elif key=="CompressionSpring.png":
|
485 |
+
formulation = 'Compressed_Formulation.png'
|
486 |
+
|
487 |
+
elif key=="HeatExchanger.png":
|
488 |
+
formulation = 'Heat_Formulation.png'
|
489 |
+
|
490 |
+
elif key=="Reinforcement.png":
|
491 |
+
formulation = 'Reinforce_Formulation.png'
|
492 |
+
|
493 |
+
elif key=="PressureVessel.png":
|
494 |
+
formulation = 'Pressure_Formulation.png'
|
495 |
+
|
496 |
+
elif key=="SpeedReducer.png":
|
497 |
+
formulation = 'Speed_Formulation.png'
|
498 |
+
|
499 |
+
elif key=="WeldedBeam.png":
|
500 |
+
formulation = 'Welded_Formulation.png'
|
501 |
+
|
502 |
+
elif key=="Car.png":
|
503 |
+
formulation = 'Car_Formulation_2.png'
|
504 |
+
|
505 |
+
|
506 |
+
|
507 |
+
# formulation = 'Test_formulation.png'
|
508 |
+
# print('here')
|
509 |
+
# print(key)
|
510 |
|
511 |
return key, formulation
|
512 |
|
|
|
537 |
|
538 |
|
539 |
|
540 |
+
demo.launch(share=True, server_port=7959)
|