|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
from pathlib import Path |
|
|
|
import matplotlib |
|
import numpy as np |
|
import pandas as pd |
|
import seaborn as sns |
|
import seml |
|
from matplotlib import pyplot as plt |
|
|
|
matplotlib.style.use("fivethirtyeight") |
|
matplotlib.style.use("seaborn-talk") |
|
matplotlib.rcParams["font.family"] = "monospace" |
|
plt.rcParams["savefig.facecolor"] = "white" |
|
sns.set_context("poster") |
|
pd.set_option("display.max_columns", 100) |
|
|
|
|
|
results = seml.get_results( |
|
"sciplex_hparam", |
|
to_data_frame=True, |
|
fields=["config", "result", "seml", "config_hash"], |
|
states=["COMPLETED"], |
|
) |
|
|
|
|
|
|
|
results["config.model.embedding.model"].value_counts() |
|
|
|
|
|
results.loc[:, [c for c in results.columns if "disentanglement" in c]] |
|
|
|
|
|
|
|
|
|
|
|
sweeped_params = [ |
|
|
|
|
|
|
|
|
|
"model.hparams.dosers_lr", |
|
"model.hparams.dosers_wd", |
|
|
|
|
|
"model.hparams.autoencoder_lr", |
|
"model.hparams.autoencoder_wd", |
|
"model.hparams.adversary_width", |
|
"model.hparams.adversary_depth", |
|
"model.hparams.adversary_lr", |
|
"model.hparams.adversary_wd", |
|
"model.hparams.adversary_steps", |
|
"model.hparams.reg_adversary", |
|
"model.hparams.penalty_adversary", |
|
"model.hparams.batch_size", |
|
"model.hparams.step_size_lr", |
|
|
|
|
|
] |
|
|
|
|
|
|
|
nan_results = results[ |
|
results["result.loss_reconstruction"].apply(lambda x: math.isnan(sum(x))) |
|
] |
|
results_clean = results[ |
|
~results["result.loss_reconstruction"].apply(lambda x: math.isnan(sum(x))) |
|
].copy() |
|
print(len(nan_results) / len(results)) |
|
|
|
|
|
results_clean = results_clean[ |
|
results_clean["result.training"].apply(lambda x: x[0][0]) > 0.6 |
|
] |
|
|
|
|
|
results_clean["config.model.embedding.model"].value_counts() |
|
|
|
|
|
|
|
get_mean = lambda x: np.array(x)[-1, 0] |
|
get_mean_de = lambda x: np.array(x)[-1, 1] |
|
|
|
results_clean["result.training_mean"] = results_clean["result.training"].apply(get_mean) |
|
results_clean["result.training_mean_de"] = results_clean["result.training"].apply( |
|
get_mean_de |
|
) |
|
results_clean["result.val_mean"] = results_clean["result.test"].apply(get_mean) |
|
results_clean["result.val_mean_de"] = results_clean["result.test"].apply(get_mean_de) |
|
results_clean["result.test_mean"] = results_clean["result.ood"].apply(get_mean) |
|
results_clean["result.test_mean_de"] = results_clean["result.ood"].apply(get_mean_de) |
|
results_clean["result.perturbation disentanglement"] = results_clean[ |
|
"result.perturbation disentanglement" |
|
].apply(lambda x: x[0]) |
|
|
|
|
|
results_clean.head(3) |
|
|
|
|
|
|
|
|
|
|
|
ax = sns.histplot(data=results_clean["result.epoch"].apply(max)) |
|
ax.set_title("Total epochs before final stopping (min 125)") |
|
|
|
|
|
|
|
|
|
|
|
[c for c in results_clean.columns if "pretrain" in c] |
|
|
|
results_clean[["config.model.embedding.model", "config.model.load_pretrained"]] |
|
|
|
|
|
|
|
|
|
|
|
rows = 1 |
|
cols = 3 |
|
fig, ax = plt.subplots(rows, cols, figsize=(7 * cols, 7 * rows), sharex=True) |
|
|
|
for i, y in enumerate( |
|
("result.training_mean_de", "result.val_mean_de", "result.test_mean_de") |
|
): |
|
sns.violinplot( |
|
data=results_clean, |
|
x="config.model.embedding.model", |
|
y=y, |
|
inner="point", |
|
ax=ax[i], |
|
scale="width", |
|
) |
|
ax[i].set_ylim([0.0, 1]) |
|
ax[i].set_xticklabels(ax[i].get_xticklabels(), rotation=75, ha="right") |
|
ax[i].set_xlabel("") |
|
ax[i].set_ylabel(y.split(".")[-1]) |
|
plt.tight_layout() |
|
|
|
|
|
|
|
|
|
|
|
rows = 1 |
|
cols = 3 |
|
fig, ax = plt.subplots(rows, cols, figsize=(10 * cols, 7 * rows), sharex=True) |
|
|
|
for i, y in enumerate(("result.training_mean", "result.val_mean", "result.test_mean")): |
|
|
|
sns.violinplot( |
|
data=results_clean, |
|
x="config.model.embedding.model", |
|
y=y, |
|
|
|
inner="point", |
|
ax=ax[i], |
|
scale="width", |
|
) |
|
|
|
ax[i].set_xticklabels(ax[i].get_xticklabels(), rotation=75, ha="right") |
|
ax[i].set_xlabel("") |
|
ax[i].set_ylabel(y.split(".")[-1]) |
|
plt.tight_layout() |
|
|
|
|
|
|
|
|
|
|
|
rows = 1 |
|
cols = 1 |
|
fig, ax = plt.subplots(rows, cols, figsize=(10 * cols, 7 * rows), sharex=True) |
|
|
|
for y in ["result.perturbation disentanglement"]: |
|
sns.violinplot( |
|
data=results_clean, |
|
x="config.model.embedding.model", |
|
y=y, |
|
inner="point", |
|
ax=ax, |
|
scale="width", |
|
) |
|
|
|
ax.set_xticklabels(ax.get_xticklabels(), rotation=75, ha="right") |
|
ax.axhline(0.18, color="orange") |
|
ax.set_xlabel("") |
|
ax.set_ylabel(y.split(".")[-1]) |
|
plt.tight_layout() |
|
|
|
|
|
|
|
|
|
|
|
n_top = 5 |
|
|
|
performance_condition = ( |
|
lambda emb, pretrained, max_entangle: ( |
|
results_clean["config.model.embedding.model"] == emb |
|
) |
|
& (results_clean["result.perturbation disentanglement"] < max_entangle) |
|
& (results_clean["config.model.load_pretrained"] == pretrained) |
|
) |
|
|
|
best = [] |
|
for embedding in list(results_clean["config.model.embedding.model"].unique()): |
|
for pretrained in [True, False]: |
|
df = results_clean[performance_condition(embedding, pretrained, 0.18)] |
|
print(embedding, pretrained, len(df)) |
|
best.append( |
|
df.sort_values(by="result.val_mean_de", ascending=False).head(n_top) |
|
) |
|
|
|
best = pd.concat(best) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rows, cols = 1, 3 |
|
fig, ax = plt.subplots(rows, cols, figsize=(10 * cols, 6 * rows)) |
|
|
|
for i, y in enumerate( |
|
["result.test_mean", "result.test_mean_de", "result.perturbation disentanglement"] |
|
): |
|
sns.violinplot( |
|
data=best, |
|
x="config.model.embedding.model", |
|
y=y, |
|
hue="config.model.load_pretrained", |
|
inner="points", |
|
ax=ax[i], |
|
scale="width", |
|
) |
|
|
|
ax[i].set_xticklabels(ax[i].get_xticklabels(), rotation=75, ha="right") |
|
ax[i].set_xlabel("") |
|
ax[i].set_ylabel(y.split(".")[-1]) |
|
ax[i].legend(title="Pretrained", loc="lower right", fontsize=18, title_fontsize=24) |
|
plt.tight_layout() |
|
|
|
|
|
|
|
rows, cols = 1, 3 |
|
fig, ax = plt.subplots(rows, cols, figsize=(10 * cols, 6 * rows)) |
|
|
|
for i, y in enumerate( |
|
[ |
|
"result.training_mean", |
|
"result.training_mean_de", |
|
"result.perturbation disentanglement", |
|
] |
|
): |
|
sns.violinplot( |
|
data=best, |
|
x="config.model.embedding.model", |
|
y=y, |
|
hue="config.model.load_pretrained", |
|
inner="points", |
|
ax=ax[i], |
|
scale="width", |
|
) |
|
ax[i].set_xticklabels(ax[i].get_xticklabels(), rotation=75, ha="right") |
|
ax[i].set_xlabel("") |
|
ax[i].set_ylabel(y.split(".")[-1]) |
|
ax[i].legend(title="Pretrained", loc="best", fontsize=18, title_fontsize=24) |
|
plt.tight_layout() |
|
|
|
|
|
|
|
|
|
|
|
best[ |
|
["config." + col for col in sweeped_params] |
|
+ ["result.perturbation disentanglement", "result.test_mean", "result.test_mean_de"] |
|
] |
|
|
|
|
|
|