import streamlit as st from data_utils import * import xarray as xr import numpy as np import pandas as pd import matplotlib.pyplot as plt import pickle import glob, os import re import tensorflow as tf import netCDF4 import copy import string import h5py from tqdm import tqdm st.title('A _Quickstart Notebook_ for :blue[ClimSim]:') st.link_button("ClimSim", "https://huggingface.co/datasets/LEAP/subsampled_low_res/tree/main",use_container_width=True) st.header('**Step 1:** Import data_utils') st.code('''from data_utils import *''',language='python') st.header('**Step 2:** Instantiate class') st.code('''#Change the path to your own grid_info = xr.open_dataset('ClimSim_low-res_grid-info.nc') input_mean = xr.open_dataset('input_mean.nc') input_max = xr.open_dataset('input_max.nc') input_min = xr.open_dataset('input_min.nc') output_scale = xr.open_dataset('output_scale.nc') data = data_utils(grid_info = grid_info, input_mean = input_mean, input_max = input_max, input_min = input_min, output_scale = output_scale) data.set_to_v1_vars()''',language='python') #Change the path to your own grid_info = xr.open_dataset('ClimSim_low-res_grid-info.nc') input_mean = xr.open_dataset('input_mean.nc') input_max = xr.open_dataset('input_max.nc') input_min = xr.open_dataset('input_min.nc') output_scale = xr.open_dataset('output_scale.nc') data = data_utils(grid_info = grid_info, input_mean = input_mean, input_max = input_max, input_min = input_min, output_scale = output_scale) data.set_to_v1_vars() st.header('**Step 3:** Load training and validation data') st.code('''data.input_train = data.load_npy_file('train_input_small.npy') data.target_train = data.load_npy_file('train_target_small.npy') data.input_val = data.load_npy_file('val_input_small.npy') data.target_val = data.load_npy_file('val_target_small.npy')''',language='python') data.input_train = data.load_npy_file('train_input_small.npy') data.target_train = data.load_npy_file('train_target_small.npy') data.input_val = data.load_npy_file('val_input_small.npy') data.target_val = data.load_npy_file('val_target_small.npy') st.header('**Step 4:** Train models') st.subheader('Train constant prediction model') st.code('''const_model = data.target_train.mean(axis = 0)''',language='python') const_model = data.target_train.mean(axis = 0) st.subheader('Train multiple linear regression model') st.text('adding bias unit') st.code('''X = data.input_train bias_vector = np.ones((X.shape[0], 1)) X = np.concatenate((X, bias_vector), axis=1)''',language='python') X = data.input_train bias_vector = np.ones((X.shape[0], 1)) X = np.concatenate((X, bias_vector), axis=1) st.text('create model') st.code('''mlr_weights = np.linalg.inv(X.transpose()@X)@X.transpose()@data.target_train''',language='python') mlr_weights = np.linalg.inv(X.transpose()@X)@X.transpose()@data.target_train st.subheader('Train your models here') st.code('''### # train your model here ###''',language='python') ### # train your model here ### st.link_button("Go to Original Dataset", "https://huggingface.co/datasets/LEAP/subsampled_low_res/tree/main",use_container_width=True) st.header('**Step 5:** Evaluate on validation data') data.set_pressure_grid(data_split = 'val') # Constant Prediction const_pred_val = np.repeat(const_model[np.newaxis, :], data.target_val.shape[0], axis = 0) print(const_pred_val.shape) # Multiple Linear Regression X_val = data.input_val bias_vector_val = np.ones((X_val.shape[0], 1)) X_val = np.concatenate((X_val, bias_vector_val), axis=1) mlr_pred_val = X_val@mlr_weights print(mlr_pred_val.shape) # Load your prediction here # Load predictions into data_utils object data.model_names = ['const', 'mlr'] # add names of your models here preds = [const_pred_val, mlr_pred_val] # add your custom predictions here data.preds_val = dict(zip(data.model_names, preds)) data.reweight_target(data_split = 'val') data.reweight_preds(data_split = 'val') data.metrics_names = ['MAE', 'RMSE', 'R2', 'bias'] data.create_metrics_df(data_split = 'val') letters = string.ascii_lowercase # create custom dictionary for plotting dict_var = data.metrics_var_val plot_df_byvar = {} for metric in data.metrics_names: plot_df_byvar[metric] = pd.DataFrame([dict_var[model][metric] for model in data.model_names], index=data.model_names) plot_df_byvar[metric] = plot_df_byvar[metric].rename(columns = data.var_short_names).transpose() # plot figure fig, axes = plt.subplots(nrows = len(data.metrics_names), sharex = True) for i in range(len(data.metrics_names)): plot_df_byvar[data.metrics_names[i]].plot.bar( legend = False, ax = axes[i]) if data.metrics_names[i] != 'R2': axes[i].set_ylabel('$W/m^2$') else: axes[i].set_ylim(0,1) axes[i].set_title(f'({letters[i]}) {data.metrics_names[i]}') axes[i].set_xlabel('Output variable') axes[i].set_xticklabels(plot_df_byvar[data.metrics_names[i]].index, \ rotation=0, ha='center') axes[0].legend(columnspacing = .9, labelspacing = .3, handleheight = .07, handlelength = 1.5, handletextpad = .2, borderpad = .2, ncol = 3, loc = 'upper right') fig.set_size_inches(7,8) fig.tight_layout() st.pyplot(fig) # path to target input data.input_scoring = np.load('scoring_input_small.npy') # path to target output data.target_scoring = np.load('scoring_target_small.npy') data.set_pressure_grid(data_split = 'scoring') # constant prediction const_pred_scoring = np.repeat(const_model[np.newaxis, :], data.target_scoring.shape[0], axis = 0) print(const_pred_scoring.shape) # multiple linear regression X_scoring = data.input_scoring bias_vector_scoring = np.ones((X_scoring.shape[0], 1)) X_scoring = np.concatenate((X_scoring, bias_vector_scoring), axis=1) mlr_pred_scoring = X_scoring@mlr_weights print(mlr_pred_scoring.shape) # Your model prediction here # Load predictions into object data.model_names = ['const', 'mlr'] # model name here preds = [const_pred_scoring, mlr_pred_scoring] # add prediction here data.preds_scoring = dict(zip(data.model_names, preds)) # weight predictions and target data.reweight_target(data_split = 'scoring') data.reweight_preds(data_split = 'scoring') # set and calculate metrics data.metrics_names = ['MAE', 'RMSE', 'R2', 'bias'] data.create_metrics_df(data_split = 'scoring') # set plotting settings letters = string.ascii_lowercase # create custom dictionary for plotting dict_var = data.metrics_var_scoring plot_df_byvar = {} for metric in data.metrics_names: plot_df_byvar[metric] = pd.DataFrame([dict_var[model][metric] for model in data.model_names], index=data.model_names) plot_df_byvar[metric] = plot_df_byvar[metric].rename(columns = data.var_short_names).transpose() # plot figure fig, axes = plt.subplots(nrows = len(data.metrics_names), sharex = True) for i in range(len(data.metrics_names)): plot_df_byvar[data.metrics_names[i]].plot.bar( legend = False, ax = axes[i]) if data.metrics_names[i] != 'R2': axes[i].set_ylabel('$W/m^2$') else: axes[i].set_ylim(0,1) axes[i].set_title(f'({letters[i]}) {data.metrics_names[i]}') axes[i].set_xlabel('Output variable') axes[i].set_xticklabels(plot_df_byvar[data.metrics_names[i]].index, \ rotation=0, ha='center') axes[0].legend(columnspacing = .9, labelspacing = .3, handleheight = .07, handlelength = 1.5, handletextpad = .2, borderpad = .2, ncol = 3, loc = 'upper right') fig.set_size_inches(7,8) fig.tight_layout() st.pyplot(fig) st.markdown('Streamlit p')