Refactor code for improved readability and consistency

- Reorganized import statements in `ml_surrogate_train.py`, `rbf_model.py`, `rbf_optimization_de.py`, `rbf_surrogate_train.py` for clarity. - Simplified dictionary and list comprehensions for better readability. - Updated print statements for consistent formatting. - Enhanced function definitions with clearer parameter formatting. - Added a new `pyproject.toml` file for Ruff configuration to enforce code style. - Removed unnecessary comments and improved inline documentation.
parent 66a1c42f
output,model,cv_rmse,std_rmse,cv_rmse_dispersion,cv_mae,cv_r2,BEST_PARAMS,fit_time_sec,gpr_kernel
exymax_tw1,GradientBoosting,0.006493821737049297,0.006697384051332851,1.031347074577389,0.006493821737049297,,"{""learning_rate"": 0.025898788320641172, ""max_depth"": 1, ""max_features"": 0.6746420943668207, ""n_estimators"": 259, ""subsample"": 0.7527625863364908}",29.02,
exymax_tw1,SVR,0.006585985009931142,0.0064982791163432545,0.9866829497097801,0.006585985009931142,,"{""svr__C"": 1245.9545122809943, ""svr__epsilon"": 0.0012027645559314329, ""svr__gamma"": 0.005025785483194675}",17.74,
exymax_tw1,FlexibleMLP,0.006937396980222642,0.0068157807075165016,0.9824694661336453,0.006937396980222642,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0009379840497457353, ""mlp__learning_rate_init"": 0.004342197085610553, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 176}",254.69,
exymax_tw1,GaussianProcess,0.007006391573544815,0.007763545618619573,1.1080661902959676,0.007006391573544815,,"{""gpr__amplitude"": 12.794825054001086, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 0.13888023693241583, ""gpr__n_restarts_optimizer"": 8, ""gpr__noise"": 3.350098334788488e-05, ""gpr__rq_alpha"": 97.05779613825874}",25.22,"1.43**2 * Matern(length_scale=2.95, nu=1.5) + WhiteKernel(noise_level=3.35e-05)"
exymax_tw1,RandomForest,0.010079281536114666,0.009245291985595667,0.9172570438150016,0.010079281536114666,,"{""max_depth"": 4, ""max_features"": 0.6684634201495349, ""min_samples_leaf"": 2, ""min_samples_split"": 3, ""n_estimators"": 701}",44.92,
exymax_tw1,XGBoost,0.011286929222419112,0.010014957220104181,0.8873057518790474,0.011286929222419112,,"{""colsample_bytree"": 0.7772026979143375, ""learning_rate"": 0.010072838879230094, ""max_depth"": 1, ""min_child_weight"": 3, ""n_estimators"": 1060, ""subsample"": 0.7629481902087631}",23.54,
exymax_tw2,SVR,0.0074343683500841745,0.004806222418653051,0.6464869901958292,0.0074343683500841745,,"{""svr__C"": 1578.3879853890564, ""svr__epsilon"": 0.002061045404501547, ""svr__gamma"": 0.003800674800490907}",18.36,
exymax_tw2,FlexibleMLP,0.007658610586748594,0.006297387105883743,0.8222623457027408,0.007658610586748594,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 1.0929824065574703e-05, ""mlp__learning_rate_init"": 0.006624003188358499, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 220}",219.18,
exymax_tw2,GradientBoosting,0.009069898715006377,0.0077238690282264485,0.8515937466255397,0.009069898715006377,,"{""learning_rate"": 0.013130700245599175, ""max_depth"": 3, ""max_features"": 0.6725567287580294, ""n_estimators"": 979, ""subsample"": 0.7716737130174645}",32.52,
exymax_tw2,GaussianProcess,0.009939632428247051,0.005790324831213219,0.5825491911308432,0.009939632428247051,,"{""gpr__amplitude"": 72.5040766704692, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 6.0343367820082765, ""gpr__n_restarts_optimizer"": 3, ""gpr__noise"": 2.855728226541178e-08, ""gpr__rq_alpha"": 0.022351050575570935}",25.32,"1.59**2 * Matern(length_scale=3.64, nu=1.5) + WhiteKernel(noise_level=2.86e-08)"
exymax_tw2,RandomForest,0.01190016183112282,0.014154848905866877,1.189466925470485,0.01190016183112282,,"{""max_depth"": 3, ""max_features"": 0.6668808751451677, ""min_samples_leaf"": 2, ""min_samples_split"": 4, ""n_estimators"": 745}",40.66,
exymax_tw2,XGBoost,0.01344777279254198,0.014678885541959797,1.091547705959204,0.01344777279254198,,"{""colsample_bytree"": 0.7473810557459762, ""learning_rate"": 0.18866984873972462, ""max_depth"": 4, ""min_child_weight"": 3, ""n_estimators"": 614, ""subsample"": 0.729683700209585}",21.82,
exymax_tw3,SVR,0.006837992343800181,0.004553580884860971,0.6659236594480216,0.006837992343800181,,"{""svr__C"": 448.7880306471758, ""svr__epsilon"": 0.00013403355081127117, ""svr__gamma"": 0.005847007418075073}",17.24,
exymax_tw3,GradientBoosting,0.008563949492028333,0.005947970551815772,0.6945359214638505,0.008563949492028333,,"{""learning_rate"": 0.06607013348294709, ""max_depth"": 2, ""max_features"": 0.7517846219435218, ""n_estimators"": 396, ""subsample"": 0.7057609458244425}",31.62,
exymax_tw3,GaussianProcess,0.009885086821969896,0.007845400559850292,0.7936602582400852,0.009885086821969896,,"{""gpr__amplitude"": 91.04050193518773, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 16.603129904873665, ""gpr__n_restarts_optimizer"": 2, ""gpr__noise"": 5.027538398209319e-09, ""gpr__rq_alpha"": 8.130666345000174}",24.0,"2.35**2 * Matern(length_scale=6.54, nu=1.5) + WhiteKernel(noise_level=5.03e-09)"
exymax_tw3,FlexibleMLP,0.010403826869819305,0.008525822888958094,0.8194891164222315,0.010403826869819305,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0016236756337057437, ""mlp__learning_rate_init"": 0.007091056527634161, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 114}",133.62,
exymax_tw3,XGBoost,0.012591324124972523,0.01203655924174753,0.9559407034781418,0.012591324124972523,,"{""colsample_bytree"": 0.9504936269160258, ""learning_rate"": 0.022379620569679962, ""max_depth"": 4, ""min_child_weight"": 3, ""n_estimators"": 1171, ""subsample"": 0.7713136631361498}",23.35,
exymax_tw3,RandomForest,0.019219946982543132,0.014028424014223698,0.7298887987029969,0.019219946982543132,,"{""max_depth"": 3, ""max_features"": 0.666678136009284, ""min_samples_leaf"": 2, ""min_samples_split"": 4, ""n_estimators"": 202}",42.04,
tfmmax_frame,SVR,2.619870006205032,2.179555686783801,0.8319327606414179,2.619870006205032,,"{""svr__C"": 5387.889822738673, ""svr__epsilon"": 0.00023269836732785153, ""svr__gamma"": 0.07097468738318204}",16.09,
tfmmax_frame,GaussianProcess,2.6319634234770533,2.6761700837879463,1.0167960769958164,2.6319634234770533,,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",25.88,"2.04**2 * Matern(length_scale=4.89, nu=2.5) + WhiteKernel(noise_level=2.91e-12)"
tfmmax_frame,FlexibleMLP,9.118847413164623,7.156604842870021,0.7848146282761823,9.118847413164623,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.008499951197596679, ""mlp__learning_rate_init"": 0.0018647216179401307, ""mlp__n_layers"": 2, ""mlp__n_neurons"": 189}",230.53,
tfmmax_frame,GradientBoosting,10.342653453427404,9.274970333998832,0.8967689361113846,10.342653453427404,,"{""learning_rate"": 0.02100426043099579, ""max_depth"": 3, ""max_features"": 0.847182811955193, ""n_estimators"": 1273, ""subsample"": 0.8200678637420077}",37.04,
tfmmax_frame,XGBoost,11.273763220596315,13.025226181092236,1.155357437106372,11.273763220596315,,"{""colsample_bytree"": 0.9663842030420524, ""learning_rate"": 0.0795550204255391, ""max_depth"": 2, ""min_child_weight"": 3, ""n_estimators"": 695, ""subsample"": 0.8013266265338792}",22.69,
tfmmax_frame,RandomForest,14.753822626899181,12.767048890410846,0.8653383745534499,14.753822626899181,,"{""max_depth"": 4, ""max_features"": 0.680638315287123, ""min_samples_leaf"": 2, ""min_samples_split"": 3, ""n_estimators"": 718}",47.55,
tfmmax_tw1,FlexibleMLP,9.347145297566009,11.923942423962007,1.275677444221071,9.347145297566009,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 1.9379088771032204e-05, ""mlp__learning_rate_init"": 0.0004224115725246676, ""mlp__n_layers"": 4, ""mlp__n_neurons"": 489}",1060.19,
tfmmax_tw1,GaussianProcess,9.62698068906717,12.597108758969705,1.3085212452202741,9.62698068906717,,"{""gpr__amplitude"": 7.502493970046291, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 0.8380582958252333, ""gpr__n_restarts_optimizer"": 5, ""gpr__noise"": 2.9263795924381803e-05, ""gpr__rq_alpha"": 0.015640337252370295}",24.59,"1.98**2 * Matern(length_scale=4.66, nu=1.5) + WhiteKernel(noise_level=5.82e-08)"
tfmmax_tw1,GradientBoosting,10.25379604927948,11.822295469586088,1.1529676826775606,10.25379604927948,,"{""learning_rate"": 0.05944360886648676, ""max_depth"": 3, ""max_features"": 0.6749510567763413, ""n_estimators"": 827, ""subsample"": 0.7051508254376537}",32.47,
tfmmax_tw1,SVR,10.519809880015345,10.720543782316861,1.0190815142660377,10.519809880015345,,"{""svr__C"": 2821.889984424592, ""svr__epsilon"": 0.0036809826285089673, ""svr__gamma"": 0.05831076296072628}",16.97,
tfmmax_tw1,RandomForest,20.16973352251655,17.615419387667444,0.8733590539509315,20.16973352251655,,"{""max_depth"": 4, ""max_features"": 0.7045883364886902, ""min_samples_leaf"": 2, ""min_samples_split"": 3, ""n_estimators"": 779}",52.93,
tfmmax_tw1,XGBoost,20.503049638521674,21.422135827603316,1.044826804074787,20.503049638521674,,"{""colsample_bytree"": 0.8185601354408085, ""learning_rate"": 0.011521005232891813, ""max_depth"": 3, ""min_child_weight"": 3, ""n_estimators"": 402, ""subsample"": 0.7088635494200846}",22.99,
tfmmax_tw2,GaussianProcess,7.603551373657773,7.080381390450335,0.9311939963975333,7.603551373657773,,"{""gpr__amplitude"": 17.76576664980768, ""gpr__kernel_type"": ""RBF"", ""gpr__length_scale"": 2.467108843522573, ""gpr__n_restarts_optimizer"": 4, ""gpr__noise"": 2.3000512910597964e-12, ""gpr__rq_alpha"": 0.024337729656929853}",27.63,1.91**2 * RBF(length_scale=2.5) + WhiteKernel(noise_level=2.3e-12)
tfmmax_tw2,FlexibleMLP,7.610330906438847,9.279426886803238,1.219319764263105,7.610330906438847,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.005038476075373874, ""mlp__learning_rate_init"": 0.00014182668289381992, ""mlp__n_layers"": 4, ""mlp__n_neurons"": 308}",441.2,
tfmmax_tw2,SVR,9.027869677167802,9.88485278689751,1.094926393531919,9.027869677167802,,"{""svr__C"": 716.9454024181599, ""svr__epsilon"": 0.019504725037331867, ""svr__gamma"": 0.037685008549082605}",17.12,
tfmmax_tw2,GradientBoosting,10.969325006134355,11.03206565345827,1.0057196452187194,10.969325006134355,,"{""learning_rate"": 0.0794000458723623, ""max_depth"": 2, ""max_features"": 0.9523780968960629, ""n_estimators"": 1497, ""subsample"": 0.8095580047092626}",34.65,
tfmmax_tw2,XGBoost,17.299430461883546,18.49480242483776,1.0690989200822547,17.299430461883546,,"{""colsample_bytree"": 0.7419500396501548, ""learning_rate"": 0.01426193159307184, ""max_depth"": 3, ""min_child_weight"": 3, ""n_estimators"": 1200, ""subsample"": 0.8866577275892942}",22.58,
tfmmax_tw2,RandomForest,18.388992385201867,19.1309842775827,1.0403497851778947,18.388992385201867,,"{""max_depth"": 4, ""max_features"": 0.7471645822804288, ""min_samples_leaf"": 2, ""min_samples_split"": 2, ""n_estimators"": 799}",50.53,
tfmmax_tw3,SVR,5.238414599827629,6.046768431344908,1.1543126868086917,5.238414599827629,,"{""svr__C"": 5330.328717919815, ""svr__epsilon"": 0.05582902448723831, ""svr__gamma"": 0.04143240634749279}",16.48,
tfmmax_tw3,GaussianProcess,6.373824735317225,6.927116943988003,1.0868069380077237,6.373824735317225,,"{""gpr__amplitude"": 5.1006870662557064, ""gpr__kernel_type"": ""RQ"", ""gpr__length_scale"": 0.6011915845322179, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 6.28714484104137e-07, ""gpr__rq_alpha"": 0.010399730643601359}",26.39,"2.47**2 * RationalQuadratic(alpha=1e+03, length_scale=3.84) + WhiteKernel(noise_level=0.00044)"
tfmmax_tw3,FlexibleMLP,11.19864054900149,10.147086281186624,0.9060998285270772,11.19864054900149,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.003513807659187882, ""mlp__learning_rate_init"": 0.004434665898161324, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 362}",354.79,
tfmmax_tw3,GradientBoosting,14.737353298887765,16.680981563055898,1.1318844859554815,14.737353298887765,,"{""learning_rate"": 0.012453723661255066, ""max_depth"": 2, ""max_features"": 0.8668530221055493, ""n_estimators"": 911, ""subsample"": 0.7069743934701856}",36.21,
tfmmax_tw3,XGBoost,21.51369373817444,23.19103567169123,1.0779662457749164,21.51369373817444,,"{""colsample_bytree"": 0.7959588335760514, ""learning_rate"": 0.06303081692903291, ""max_depth"": 4, ""min_child_weight"": 3, ""n_estimators"": 505, ""subsample"": 0.7984053532645751}",21.97,
tfmmax_tw3,RandomForest,27.9838762364769,26.699362474101843,0.9540980759234242,27.9838762364769,,"{""max_depth"": 4, ""max_features"": 0.6773578522514452, ""min_samples_leaf"": 2, ""min_samples_split"": 4, ""n_estimators"": 727}",47.34,
output,best_model,cv_rmse,std_rmse,cv_rmse_dispersion,cv_mae,cv_r2,BEST_PARAMS,model_path,train_time_sec,gpr_kernel,selected_by
exymax_tw1,SVR,0.006585985009931142,0.0064982791163432545,0.9866829497097801,0.006585985009931142,,"{""svr__C"": 1245.9545122809943, ""svr__epsilon"": 0.0012027645559314329, ""svr__gamma"": 0.005025785483194675}",../../models/width_optimization/3W/ml_models/per_output_models_B29_H45/it0/best_model_exymax_tw1.joblib,395.13,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
exymax_tw3,SVR,0.006837992343800181,0.004553580884860971,0.6659236594480216,0.006837992343800181,,"{""svr__C"": 448.7880306471758, ""svr__epsilon"": 0.00013403355081127117, ""svr__gamma"": 0.005847007418075073}",../../models/width_optimization/3W/ml_models/per_output_models_B29_H45/it0/best_model_exymax_tw3.joblib,271.87,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
exymax_tw2,SVR,0.0074343683500841745,0.004806222418653051,0.6464869901958292,0.0074343683500841745,,"{""svr__C"": 1578.3879853890564, ""svr__epsilon"": 0.002061045404501547, ""svr__gamma"": 0.003800674800490907}",../../models/width_optimization/3W/ml_models/per_output_models_B29_H45/it0/best_model_exymax_tw2.joblib,357.86,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_frame,SVR,2.619870006205032,2.179555686783801,0.8319327606414179,2.619870006205032,,"{""svr__C"": 5387.889822738673, ""svr__epsilon"": 0.00023269836732785153, ""svr__gamma"": 0.07097468738318204}",../../models/width_optimization/3W/ml_models/per_output_models_B29_H45/it0/best_model_tfmmax_frame.joblib,379.78,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw3,SVR,5.238414599827629,6.046768431344908,1.1543126868086917,5.238414599827629,,"{""svr__C"": 5330.328717919815, ""svr__epsilon"": 0.05582902448723831, ""svr__gamma"": 0.04143240634749279}",../../models/width_optimization/3W/ml_models/per_output_models_B29_H45/it0/best_model_tfmmax_tw3.joblib,503.18,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw2,GaussianProcess,7.603551373657773,7.080381390450335,0.9311939963975333,7.603551373657773,,"{""gpr__amplitude"": 17.76576664980768, ""gpr__kernel_type"": ""RBF"", ""gpr__length_scale"": 2.467108843522573, ""gpr__n_restarts_optimizer"": 4, ""gpr__noise"": 2.3000512910597964e-12, ""gpr__rq_alpha"": 0.024337729656929853}",../../models/width_optimization/3W/ml_models/per_output_models_B29_H45/it0/best_model_tfmmax_tw2.joblib,593.72,1.91**2 * RBF(length_scale=2.5) + WhiteKernel(noise_level=2.3e-12),lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw1,FlexibleMLP,9.347145297566009,11.923942423962007,1.275677444221071,9.347145297566009,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 1.9379088771032204e-05, ""mlp__learning_rate_init"": 0.0004224115725246676, ""mlp__n_layers"": 4, ""mlp__n_neurons"": 489}",../../models/width_optimization/3W/ml_models/per_output_models_B29_H45/it0/best_model_tfmmax_tw1.joblib,1210.15,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
output,model,cv_rmse,std_rmse,cv_rmse_dispersion,cv_mae,cv_r2,BEST_PARAMS,fit_time_sec,gpr_kernel
exymax_tw1,GradientBoosting,0.007300943573175188,0.007955575607590385,1.089664031484974,0.007300943573175188,,"{""learning_rate"": 0.016754195403105226, ""max_depth"": 3, ""max_features"": 0.6676595772974903, ""n_estimators"": 1169, ""subsample"": 0.7322018725625201}",31.99,
exymax_tw1,FlexibleMLP,0.008928697714601926,0.007240983086215549,0.8109786351456046,0.008928697714601926,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 6.650761242433353e-05, ""mlp__learning_rate_init"": 0.005585797004935133, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 295}",281.06,
exymax_tw1,GaussianProcess,0.008959708606366402,0.010679991441770292,1.192002096383082,0.008959708606366402,,"{""gpr__amplitude"": 15.783879853890564, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 1.2778531518898433, ""gpr__n_restarts_optimizer"": 4, ""gpr__noise"": 3.279409439150647e-05, ""gpr__rq_alpha"": 3.0182479639711155}",26.1,"1.42**2 * Matern(length_scale=2.82, nu=1.5) + WhiteKernel(noise_level=3.28e-05)"
exymax_tw1,SVR,0.010633655193556206,0.012138674766547789,1.1415336067981212,0.010633655193556206,,"{""svr__C"": 43.69339947510315, ""svr__epsilon"": 0.015246748254295629, ""svr__gamma"": 0.06289324408100376}",16.38,
exymax_tw1,RandomForest,0.012373070162229391,0.011292798959624569,0.9126917419491802,0.012373070162229391,,"{""max_depth"": 3, ""max_features"": 0.666678136009284, ""min_samples_leaf"": 2, ""min_samples_split"": 4, ""n_estimators"": 202}",46.88,
exymax_tw1,XGBoost,0.013111848823930695,0.014855790703068427,1.1330050325134033,0.013111848823930695,,"{""colsample_bytree"": 0.708286329625656, ""learning_rate"": 0.015800483357585358, ""max_depth"": 2, ""min_child_weight"": 3, ""n_estimators"": 320, ""subsample"": 0.7740358836160686}",23.14,
exymax_tw2,SVR,0.008460412231077893,0.011444731445050746,1.3527392203196034,0.008460412231077893,,"{""svr__C"": 9808.35589449746, ""svr__epsilon"": 0.00016322664292135178, ""svr__gamma"": 0.00038258041488189597}",18.57,
exymax_tw2,GradientBoosting,0.01106798696171253,0.012449959186724643,1.1248621117636628,0.01106798696171253,,"{""learning_rate"": 0.016861891001283144, ""max_depth"": 2, ""max_features"": 0.7511271397145509, ""n_estimators"": 779, ""subsample"": 0.7964378889439548}",29.47,
exymax_tw2,GaussianProcess,0.012796596734800235,0.010452820628629228,0.8168437941162022,0.012796596734800235,,"{""gpr__amplitude"": 17.76576664980768, ""gpr__kernel_type"": ""RBF"", ""gpr__length_scale"": 2.467108843522573, ""gpr__n_restarts_optimizer"": 4, ""gpr__noise"": 2.3000512910597964e-12, ""gpr__rq_alpha"": 0.024337729656929853}",25.24,1**2 * RBF(length_scale=1.22) + WhiteKernel(noise_level=2.3e-12)
exymax_tw2,FlexibleMLP,0.01381939739925558,0.013384123990448868,0.968502721484067,0.01381939739925558,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.00017623384441354837, ""mlp__learning_rate_init"": 0.008737146055928138, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 126}",237.54,
exymax_tw2,XGBoost,0.014129863824865222,0.01420304484433387,1.0051791737256424,0.014129863824865222,,"{""colsample_bytree"": 0.8121544546042689, ""learning_rate"": 0.1003870683312305, ""max_depth"": 3, ""min_child_weight"": 3, ""n_estimators"": 911, ""subsample"": 0.9460263275434964}",21.93,
exymax_tw2,RandomForest,0.018540453772891104,0.014945734506514891,0.8061148173389248,0.018540453772891104,,"{""max_depth"": 4, ""max_features"": 0.7958367795416341, ""min_samples_leaf"": 2, ""min_samples_split"": 6, ""n_estimators"": 499}",49.66,
exymax_tw3,SVR,0.007202212127447048,0.004481301562810276,0.6222118265209654,0.007202212127447048,,"{""svr__C"": 1117.125712746634, ""svr__epsilon"": 0.0006465958496706061, ""svr__gamma"": 0.004670356278081838}",18.35,
exymax_tw3,FlexibleMLP,0.009574224078496153,0.005453227306471528,0.5695738121190996,0.009574224078496153,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0007343773636396239, ""mlp__learning_rate_init"": 0.00434210374366876, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 508}",747.41,
exymax_tw3,GradientBoosting,0.009793759507230378,0.005505517160119351,0.5621454310834186,0.009793759507230378,,"{""learning_rate"": 0.07501271280636532, ""max_depth"": 2, ""max_features"": 0.6737510461367449, ""n_estimators"": 300, ""subsample"": 0.70198384555758}",29.61,
exymax_tw3,GaussianProcess,0.010158138393528571,0.00819461526699299,0.8067044324000863,0.010158138393528571,,"{""gpr__amplitude"": 15.783879853890564, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 1.2778531518898433, ""gpr__n_restarts_optimizer"": 4, ""gpr__noise"": 3.279409439150647e-05, ""gpr__rq_alpha"": 3.0182479639711155}",24.47,"2.56**2 * Matern(length_scale=7.45, nu=1.5) + WhiteKernel(noise_level=6.29e-10)"
exymax_tw3,XGBoost,0.013058955349829794,0.01244028993222156,0.9526251984913712,0.013058955349829794,,"{""colsample_bytree"": 0.9282786779000243, ""learning_rate"": 0.02077844213385322, ""max_depth"": 2, ""min_child_weight"": 3, ""n_estimators"": 900, ""subsample"": 0.8099741288300227}",22.27,
exymax_tw3,RandomForest,0.02117151423509203,0.013589018174188896,0.6418538619058688,0.02117151423509203,,"{""max_depth"": 3, ""max_features"": 0.666678136009284, ""min_samples_leaf"": 2, ""min_samples_split"": 4, ""n_estimators"": 202}",47.29,
tfmmax_frame,SVR,1.7183858166563564,1.8940457119290446,1.1022237809285973,1.7183858166563564,,"{""svr__C"": 2338.7077018101695, ""svr__epsilon"": 0.0015121721150378067, ""svr__gamma"": 0.08059242367780602}",16.4,
tfmmax_frame,GaussianProcess,2.329849041969559,2.4714209836044256,1.0607644268296401,2.329849041969559,,"{""gpr__amplitude"": 2.9397976202716882, ""gpr__kernel_type"": ""RQ"", ""gpr__length_scale"": 0.2729554233267, ""gpr__n_restarts_optimizer"": 3, ""gpr__noise"": 2.408569608205542e-07, ""gpr__rq_alpha"": 0.3551763619275992}",27.42,"1.35**2 * RationalQuadratic(alpha=1e+03, length_scale=2.53) + WhiteKernel(noise_level=1.28e-09)"
tfmmax_frame,FlexibleMLP,7.460456828007423,5.449732908585638,0.7304824669887112,7.460456828007423,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.00022168437108307395, ""mlp__learning_rate_init"": 0.003864323485545105, ""mlp__n_layers"": 1, ""mlp__n_neurons"": 124}",183.75,
tfmmax_frame,GradientBoosting,9.730948350079762,6.340066644812742,0.6515363576830385,9.730948350079762,,"{""learning_rate"": 0.032360453236975235, ""max_depth"": 3, ""max_features"": 0.9412562993996781, ""n_estimators"": 1301, ""subsample"": 0.7680003531759929}",35.79,
tfmmax_frame,XGBoost,11.570496972656251,12.073343353555009,1.043459358927028,11.570496972656251,,"{""colsample_bytree"": 0.8051273114064031, ""learning_rate"": 0.16654560462450568, ""max_depth"": 4, ""min_child_weight"": 3, ""n_estimators"": 995, ""subsample"": 0.8461821341122017}",22.59,
tfmmax_frame,RandomForest,14.609734471256301,10.883909707755192,0.7449765585520103,14.609734471256301,,"{""max_depth"": 4, ""max_features"": 0.719322933940416, ""min_samples_leaf"": 2, ""min_samples_split"": 4, ""n_estimators"": 760}",48.66,
tfmmax_tw1,GaussianProcess,12.729753975937573,16.8283131798545,1.3219668826015203,12.729753975937573,,"{""gpr__amplitude"": 42.64486576023699, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 1.5803508571081364, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 2.537051332183834e-05, ""gpr__rq_alpha"": 0.01469218297101831}",25.21,"1.98**2 * Matern(length_scale=4.65, nu=1.5) + WhiteKernel(noise_level=2.91e-12)"
tfmmax_tw1,GradientBoosting,13.384466644077264,14.304868082659462,1.0687663889087045,13.384466644077264,,"{""learning_rate"": 0.07340186622500515, ""max_depth"": 3, ""max_features"": 0.6696830878373136, ""n_estimators"": 507, ""subsample"": 0.704234291170677}",29.61,
tfmmax_tw1,SVR,13.974163760769088,12.878401245425776,0.9215865411267368,13.974163760769088,,"{""svr__C"": 8433.04724917327, ""svr__epsilon"": 0.01770333417501503, ""svr__gamma"": 0.05616380261763679}",16.51,
tfmmax_tw1,FlexibleMLP,14.849951752421243,14.884573269939603,1.0023314228958835,14.849951752421243,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.000258401605854622, ""mlp__learning_rate_init"": 0.0011945412191786166, ""mlp__n_layers"": 4, ""mlp__n_neurons"": 487}",733.48,
tfmmax_tw1,XGBoost,26.365377836850286,29.98372363054691,1.137238533659068,26.365377836850286,,"{""colsample_bytree"": 0.9446720303070671, ""learning_rate"": 0.012626475442678873, ""max_depth"": 3, ""min_child_weight"": 3, ""n_estimators"": 539, ""subsample"": 0.7364163367585075}",21.95,
tfmmax_tw1,RandomForest,27.148631867407566,20.85365954502796,0.7681292982598936,27.148631867407566,,"{""max_depth"": 4, ""max_features"": 0.6795241654927764, ""min_samples_leaf"": 2, ""min_samples_split"": 3, ""n_estimators"": 408}",45.5,
tfmmax_tw2,SVR,6.570230475951083,8.231729016702195,1.252882839777489,6.570230475951083,,"{""svr__C"": 9598.629415551462, ""svr__epsilon"": 0.008177764525277858, ""svr__gamma"": 0.06450806413377351}",16.8,
tfmmax_tw2,GaussianProcess,6.814424692851277,8.667563468615072,1.2719435402533459,6.814424692851277,,"{""gpr__amplitude"": 17.76576664980768, ""gpr__kernel_type"": ""RBF"", ""gpr__length_scale"": 2.467108843522573, ""gpr__n_restarts_optimizer"": 4, ""gpr__noise"": 2.3000512910597964e-12, ""gpr__rq_alpha"": 0.024337729656929853}",26.44,1.82**2 * RBF(length_scale=2.56) + WhiteKernel(noise_level=2.3e-12)
tfmmax_tw2,FlexibleMLP,7.5119021354997395,6.04287293683089,0.8044397847348792,7.5119021354997395,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0025840928147925525, ""mlp__learning_rate_init"": 0.008441124517063102, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 352}",346.2,
tfmmax_tw2,GradientBoosting,12.532922022225186,11.89682426712509,0.9492458539220083,12.532922022225186,,"{""learning_rate"": 0.01041358366647463, ""max_depth"": 3, ""max_features"": 0.8294173131088899, ""n_estimators"": 1485, ""subsample"": 0.8189020562940738}",37.93,
tfmmax_tw2,XGBoost,16.198008113098147,18.628541664530946,1.1500513849889606,16.198008113098147,,"{""colsample_bytree"": 0.7259514529374792, ""learning_rate"": 0.01979862368399526, ""max_depth"": 4, ""min_child_weight"": 3, ""n_estimators"": 984, ""subsample"": 0.919298911582289}",22.4,
tfmmax_tw2,RandomForest,19.59401232944863,20.329426317379788,1.0375325877909076,19.59401232944863,,"{""max_depth"": 4, ""max_features"": 0.668958029907144, ""min_samples_leaf"": 2, ""min_samples_split"": 3, ""n_estimators"": 368}",47.41,
tfmmax_tw3,SVR,6.867884256625402,7.773194090294852,1.131817864110931,6.867884256625402,,"{""svr__C"": 4770.813296275575, ""svr__epsilon"": 0.0156951512570451, ""svr__gamma"": 0.04171961411779374}",16.47,
tfmmax_tw3,GaussianProcess,8.194171429706405,9.348848543814851,1.1409144443722992,8.194171429706405,,"{""gpr__amplitude"": 0.07795874834087786, ""gpr__kernel_type"": ""RQ"", ""gpr__length_scale"": 0.16829001502791593, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 9.714431097498268e-05, ""gpr__rq_alpha"": 0.24852278170635883}",28.09,"2.33**2 * RationalQuadratic(alpha=1e+03, length_scale=3.63) + WhiteKernel(noise_level=0.00128)"
tfmmax_tw3,FlexibleMLP,11.992162856019233,13.307461520261965,1.1096798534204815,11.992162856019233,,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 1.812989003121976e-05, ""mlp__learning_rate_init"": 0.008319488950703918, ""mlp__n_layers"": 4, ""mlp__n_neurons"": 214}",352.51,
tfmmax_tw3,GradientBoosting,15.684447590276225,13.95037456132023,0.8894399679061024,15.684447590276225,,"{""learning_rate"": 0.019208317545957237, ""max_depth"": 1, ""max_features"": 0.9136923107411692, ""n_estimators"": 839, ""subsample"": 0.7137859045145266}",38.19,
tfmmax_tw3,XGBoost,25.492465800094607,25.384851476736458,0.995778583201718,25.492465800094607,,"{""colsample_bytree"": 0.8873818110686668, ""learning_rate"": 0.15463320758542579, ""max_depth"": 2, ""min_child_weight"": 3, ""n_estimators"": 200, ""subsample"": 0.8571759162038339}",21.17,
tfmmax_tw3,RandomForest,29.11847171376,26.327520770414292,0.9041518740824973,29.11847171376,,"{""max_depth"": 4, ""max_features"": 0.7379149040053794, ""min_samples_leaf"": 2, ""min_samples_split"": 3, ""n_estimators"": 753}",48.75,
output,best_model,cv_rmse,std_rmse,cv_rmse_dispersion,cv_mae,cv_r2,BEST_PARAMS,model_path,train_time_sec,gpr_kernel,selected_by
exymax_tw3,SVR,0.007202212127447048,0.004481301562810276,0.6222118265209654,0.007202212127447048,,"{""svr__C"": 1117.125712746634, ""svr__epsilon"": 0.0006465958496706061, ""svr__gamma"": 0.004670356278081838}",../../models/width_optimization/3W/ml_models/per_output_models_B34_H45/it0/best_model_exymax_tw3.joblib,889.41,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
exymax_tw1,GradientBoosting,0.007300943573175188,0.007955575607590385,1.089664031484974,0.007300943573175188,,"{""learning_rate"": 0.016754195403105226, ""max_depth"": 3, ""max_features"": 0.6676595772974903, ""n_estimators"": 1169, ""subsample"": 0.7322018725625201}",../../models/width_optimization/3W/ml_models/per_output_models_B34_H45/it0/best_model_exymax_tw1.joblib,425.58,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
exymax_tw2,SVR,0.008460412231077893,0.011444731445050746,1.3527392203196034,0.008460412231077893,,"{""svr__C"": 9808.35589449746, ""svr__epsilon"": 0.00016322664292135178, ""svr__gamma"": 0.00038258041488189597}",../../models/width_optimization/3W/ml_models/per_output_models_B34_H45/it0/best_model_exymax_tw2.joblib,382.41,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_frame,SVR,1.7183858166563564,1.8940457119290446,1.1022237809285973,1.7183858166563564,,"{""svr__C"": 2338.7077018101695, ""svr__epsilon"": 0.0015121721150378067, ""svr__gamma"": 0.08059242367780602}",../../models/width_optimization/3W/ml_models/per_output_models_B34_H45/it0/best_model_tfmmax_frame.joblib,334.62,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw2,SVR,6.570230475951083,8.231729016702195,1.252882839777489,6.570230475951083,,"{""svr__C"": 9598.629415551462, ""svr__epsilon"": 0.008177764525277858, ""svr__gamma"": 0.06450806413377351}",../../models/width_optimization/3W/ml_models/per_output_models_B34_H45/it0/best_model_tfmmax_tw2.joblib,497.18,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw3,SVR,6.867884256625402,7.773194090294852,1.131817864110931,6.867884256625402,,"{""svr__C"": 4770.813296275575, ""svr__epsilon"": 0.0156951512570451, ""svr__gamma"": 0.04171961411779374}",../../models/width_optimization/3W/ml_models/per_output_models_B34_H45/it0/best_model_tfmmax_tw3.joblib,505.18,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw1,GaussianProcess,12.729753975937573,16.8283131798545,1.3219668826015203,12.729753975937573,,"{""gpr__amplitude"": 42.64486576023699, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 1.5803508571081364, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 2.537051332183834e-05, ""gpr__rq_alpha"": 0.01469218297101831}",../../models/width_optimization/3W/ml_models/per_output_models_B34_H45/it0/best_model_tfmmax_tw1.joblib,872.27,"1.98**2 * Matern(length_scale=4.65, nu=1.5) + WhiteKernel(noise_level=2.91e-12)",lowest_cv_rmse_dispersion_within_5pct_rmse_band
output,model,cv_rmse,std_rmse,cv_rmse_dispersion,cv_mae,cv_r2,BEST_PARAMS,fit_time_sec,gpr_kernel
exymax_tw1,SVR,0.0065461931973238765,0.0016275998090011273,0.2486330237956466,0.004822458246245301,0.941070262320398,"{""svr__C"": 1535.8841058246376, ""svr__epsilon"": 0.00014352665858615663, ""svr__gamma"": 0.0018620005892167522}",19.16,
exymax_tw1,GaussianProcess,0.00749884982878555,0.002124109287726401,0.28325801105826437,0.005145875779916589,0.9183993762293116,"{""gpr__amplitude"": 5.673220380627782, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 9.737001278914109, ""gpr__n_restarts_optimizer"": 1, ""gpr__noise"": 3.459534964192888e-09, ""gpr__rq_alpha"": 0.10932986492923848}",29.33,"6.55**2 * Matern(length_scale=16.8, nu=1.5) + WhiteKernel(noise_level=3.46e-09)"
exymax_tw1,FlexibleMLP,0.0077553185406522415,0.002573995807558598,0.3319007200111884,0.005507160895955526,0.9221534453054219,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 7.102645437739196e-05, ""mlp__learning_rate_init"": 0.009549558624787907, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 378}",1021.83,
exymax_tw1,GradientBoosting,0.009053911707924104,0.003631373354987036,0.40108336287494495,0.006315024333392476,0.894435389515042,"{""learning_rate"": 0.002145417334433661, ""max_depth"": 2, ""max_features"": 0.9792949667948585, ""n_estimators"": 2286, ""subsample"": 0.87911101680119}",60.81,
exymax_tw1,XGBoost,0.009514362738044506,0.003991583787681193,0.4195324371773517,0.0065393182630018795,0.8677141025216223,"{""colsample_bytree"": 0.9198213766428692, ""learning_rate"": 0.012163299704553493, ""max_depth"": 3, ""min_child_weight"": 4, ""n_estimators"": 665, ""subsample"": 0.8479754916341041}",27.84,
exymax_tw1,RandomForest,0.011294553400999906,0.004569533786505622,0.4045785277442737,0.0078783263055379,0.8317550556014917,"{""max_depth"": 5, ""max_features"": 0.9665000854566961, ""min_samples_leaf"": 3, ""min_samples_split"": 3, ""n_estimators"": 321}",76.55,
exymax_tw2,GaussianProcess,0.005176366137444404,0.0013486668112550314,0.2605431639580415,0.003913884402047848,0.9629242548153287,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",31.51,"3.2**2 * Matern(length_scale=6.89, nu=2.5) + WhiteKernel(noise_level=2.91e-12)"
exymax_tw2,SVR,0.00549328576512069,0.0014846473861939078,0.2702658207990185,0.004158446872757603,0.9550249949908676,"{""svr__C"": 2723.8860738228227, ""svr__epsilon"": 0.0010350188816504358, ""svr__gamma"": 0.011634592871513411}",19.09,
exymax_tw2,GradientBoosting,0.0063022250631444045,0.0015571698771346557,0.2470825560072474,0.004638463929771039,0.9410252168535983,"{""learning_rate"": 0.005066255152555779, ""max_depth"": 2, ""max_features"": 0.9984065004465011, ""n_estimators"": 2251, ""subsample"": 0.700330206507103}",65.56,
exymax_tw2,FlexibleMLP,0.0066043251926731815,0.0016937887963903565,0.25646659529567695,0.005015983010561082,0.9325832185908456,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0020105391304935157, ""mlp__learning_rate_init"": 0.00935569473287281, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 383}",1239.21,
exymax_tw2,XGBoost,0.006789516218779616,0.0018698261752040921,0.27539902917268305,0.005291296283855478,0.9325615571874495,"{""colsample_bytree"": 0.9198213766428692, ""learning_rate"": 0.012163299704553493, ""max_depth"": 3, ""min_child_weight"": 4, ""n_estimators"": 665, ""subsample"": 0.8479754916341041}",25.29,
exymax_tw2,RandomForest,0.008062958938387018,0.002286588341755693,0.28359171356677176,0.006136914117403004,0.9088458568945392,"{""max_depth"": 5, ""max_features"": 0.9665000854566961, ""min_samples_leaf"": 3, ""min_samples_split"": 3, ""n_estimators"": 321}",71.82,
exymax_tw3,GradientBoosting,0.010948528227998437,0.0034107014322390613,0.31152145395368736,0.0076872759336242625,0.8867045503204167,"{""learning_rate"": 0.08360332720304438, ""max_depth"": 2, ""max_features"": 0.8403383090951897, ""n_estimators"": 1466, ""subsample"": 0.6122911956826925}",60.29,
exymax_tw3,XGBoost,0.011296370910068609,0.0028544467636858533,0.25268706086320575,0.008438430960753036,0.8887122017660922,"{""colsample_bytree"": 0.922975236986968, ""learning_rate"": 0.09616759998521371, ""max_depth"": 3, ""min_child_weight"": 2, ""n_estimators"": 645, ""subsample"": 0.8928281167713635}",26.28,
exymax_tw3,FlexibleMLP,0.01151668139179081,0.003845638166528552,0.3339189507551851,0.008120067846947111,0.8855996777096025,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0007404074025879882, ""mlp__learning_rate_init"": 0.006249997583817419, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 192}",1036.2,
exymax_tw3,SVR,0.012610704429209361,0.0038269782849864506,0.3034706194621664,0.009007333618508175,0.8647818655975721,"{""svr__C"": 5.433787499314771, ""svr__epsilon"": 0.003670363910649628, ""svr__gamma"": 0.013128648920919089}",16.92,
exymax_tw3,GaussianProcess,0.012744626202480885,0.00371691556337156,0.2916457104601483,0.009110429131315295,0.8598605675679357,"{""gpr__amplitude"": 66.75461353250009, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 11.666937146554956, ""gpr__n_restarts_optimizer"": 8, ""gpr__noise"": 8.412432184601446e-06, ""gpr__rq_alpha"": 26.624403652640854}",30.48,"4.32**2 * Matern(length_scale=10.5, nu=1.5) + WhiteKernel(noise_level=8.41e-06)"
exymax_tw3,RandomForest,0.013194278019754065,0.0039006578806040255,0.295632536677193,0.009291727464552445,0.8476890171029957,"{""max_depth"": 4, ""max_features"": 0.9546529537518855, ""min_samples_leaf"": 1, ""min_samples_split"": 2, ""n_estimators"": 1170}",93.18,
exymax_tw4,GaussianProcess,0.006603966975088371,0.001775601440057033,0.2688689157221706,0.004692998066122281,0.9507369331408887,"{""gpr__amplitude"": 50.805556206537666, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 21.652164558122173, ""gpr__n_restarts_optimizer"": 1, ""gpr__noise"": 2.9548225428033293e-06, ""gpr__rq_alpha"": 2.558711165466693}",25.83,"4.2**2 * Matern(length_scale=13.2, nu=1.5) + WhiteKernel(noise_level=2.95e-06)"
exymax_tw4,SVR,0.006944793389536292,0.0020802129746267006,0.2995356172526246,0.004944986439052567,0.9459928860276304,"{""svr__C"": 4.947369496373032, ""svr__epsilon"": 0.0009752049888774252, ""svr__gamma"": 0.011256410405167115}",18.14,
exymax_tw4,FlexibleMLP,0.0073994984229403945,0.0023977562171306394,0.3240430742841925,0.0052617354527009705,0.9334408521924865,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0030542759652763133, ""mlp__learning_rate_init"": 0.009766498482813756, ""mlp__n_layers"": 2, ""mlp__n_neurons"": 429}",1809.57,
exymax_tw4,XGBoost,0.010703389935528081,0.0026171367839142473,0.24451475650972102,0.008220086835396278,0.8711063796787495,"{""colsample_bytree"": 0.9198213766428692, ""learning_rate"": 0.012163299704553493, ""max_depth"": 3, ""min_child_weight"": 4, ""n_estimators"": 665, ""subsample"": 0.8479754916341041}",24.56,
exymax_tw4,GradientBoosting,0.010906042544367736,0.003097049554106855,0.28397556139245766,0.008104854920936865,0.868598240677122,"{""learning_rate"": 0.007756486208064788, ""max_depth"": 4, ""max_features"": 0.7604472678335725, ""n_estimators"": 1840, ""subsample"": 0.6539583062766647}",50.31,
exymax_tw4,RandomForest,0.013520117479280901,0.0031381653373753733,0.2321108039323253,0.010193579890787105,0.800611849850438,"{""max_depth"": 5, ""max_features"": 0.9704984365963154, ""min_samples_leaf"": 1, ""min_samples_split"": 3, ""n_estimators"": 311}",72.8,
exymax_tw5,GaussianProcess,0.0037397014620559475,0.0018524494485466028,0.4953468792474669,0.002564404130278725,0.9436289596508454,"{""gpr__amplitude"": 66.2941072502412, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 9.110383232654469, ""gpr__n_restarts_optimizer"": 2, ""gpr__noise"": 1.1864143515274533e-09, ""gpr__rq_alpha"": 0.09863897962847874}",29.63,"2.7**2 * Matern(length_scale=5.93, nu=2.5) + WhiteKernel(noise_level=1.19e-09)"
exymax_tw5,SVR,0.004008059254430852,0.00135753355198164,0.3387009686747785,0.002962861360615533,0.9338683318929641,"{""svr__C"": 1379.5670789472415, ""svr__epsilon"": 0.0010892195261973932, ""svr__gamma"": 0.005000109344508642}",18.57,
exymax_tw5,FlexibleMLP,0.0052527684413940165,0.002231428198428414,0.42480993086309016,0.003914652263130728,0.8923105272276857,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0015246748254295628, ""mlp__learning_rate_init"": 0.007340675018434775, ""mlp__n_layers"": 2, ""mlp__n_neurons"": 421}",1022.04,
exymax_tw5,XGBoost,0.005735151965795607,0.002404284355408967,0.41921894480705946,0.004136565703399065,0.8616132458237553,"{""colsample_bytree"": 0.9934273434104941, ""learning_rate"": 0.012434270187284806, ""max_depth"": 2, ""min_child_weight"": 3, ""n_estimators"": 1435, ""subsample"": 0.6063688430058806}",28.2,
exymax_tw5,GradientBoosting,0.006198754253837943,0.0024142948453431392,0.3894806515112831,0.004553801978282849,0.8493913387310847,"{""learning_rate"": 0.029380287142759306, ""max_depth"": 4, ""max_features"": 0.9665000854566961, ""n_estimators"": 470, ""subsample"": 0.6753701167061721}",49.04,
exymax_tw5,RandomForest,0.007191370010921679,0.0025718611568131956,0.35763159911216613,0.00550905206878697,0.7931602311915849,"{""max_depth"": 9, ""max_features"": 0.8048972959164513, ""min_samples_leaf"": 1, ""min_samples_split"": 2, ""n_estimators"": 1181}",98.37,
tfmmax_frame,SVR,7.362379567538453,2.0424917941910397,0.2774227782545486,5.5591589929609775,0.6082838664236161,"{""svr__C"": 193.7932286428093, ""svr__epsilon"": 0.00012005001235516034, ""svr__gamma"": 0.04170927843815279}",17.7,
tfmmax_frame,GradientBoosting,7.9028829840688,2.0138482257395443,0.25482450262761125,5.7831225978282035,0.5572197324054536,"{""learning_rate"": 0.016798918500195253, ""max_depth"": 4, ""max_features"": 0.8772977752245934, ""n_estimators"": 2247, ""subsample"": 0.6108529594746946}",74.83,
tfmmax_frame,GaussianProcess,8.021676666879692,2.3996981717237436,0.29915169501056305,5.962860907674642,0.5436433276748588,"{""gpr__amplitude"": 15.783879853890564, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 1.2778531518898433, ""gpr__n_restarts_optimizer"": 4, ""gpr__noise"": 3.279409439150647e-05, ""gpr__rq_alpha"": 3.0182479639711155}",31.44,"1.59**2 * Matern(length_scale=2.99, nu=1.5) + WhiteKernel(noise_level=3.28e-05)"
tfmmax_frame,XGBoost,8.092871073261252,1.7516271378620083,0.21644075656281775,6.034590880499425,0.5304369319667841,"{""colsample_bytree"": 0.9966144852196204, ""learning_rate"": 0.026746408337655674, ""max_depth"": 2, ""min_child_weight"": 1, ""n_estimators"": 1161, ""subsample"": 0.7372589228703577}",26.92,
tfmmax_frame,RandomForest,8.810489382272454,2.387344604202706,0.27096617459255684,6.603476260646821,0.47768613174931285,"{""max_depth"": 8, ""max_features"": 0.6854299480021521, ""min_samples_leaf"": 1, ""min_samples_split"": 2, ""n_estimators"": 447}",68.38,
tfmmax_frame,FlexibleMLP,9.076271891818163,1.6919611535406944,0.18641587357755543,7.309952877726658,0.32165102205195667,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 2.241228922472407e-05, ""mlp__learning_rate_init"": 0.005294194789748615, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 476}",2639.64,
tfmmax_tw1,SVR,9.729020339610338,3.3410734652444916,0.3434131442445218,6.791137893865481,0.9561686375984351,"{""svr__C"": 3513.591055460674, ""svr__epsilon"": 0.08923278628428069, ""svr__gamma"": 0.013747294826820433}",17.11,
tfmmax_tw1,FlexibleMLP,10.567567105873762,3.7757055132054744,0.3572918416677787,7.1753393860424275,0.946573782890216,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.005703843027403089, ""mlp__learning_rate_init"": 0.0001620758534278906, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 433}",546.13,
tfmmax_tw1,GradientBoosting,11.207290798080374,4.516131013193322,0.40296366843330783,7.8746560763995,0.9388041512624029,"{""learning_rate"": 0.013323956500939154, ""max_depth"": 2, ""max_features"": 0.9956546741382792, ""n_estimators"": 920, ""subsample"": 0.6347747313437349}",62.42,
tfmmax_tw1,GaussianProcess,11.868198703169591,3.8240634270807425,0.32221093720477273,7.906010741512257,0.9260054209704935,"{""gpr__amplitude"": 15.783879853890564, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 1.2778531518898433, ""gpr__n_restarts_optimizer"": 4, ""gpr__noise"": 3.279409439150647e-05, ""gpr__rq_alpha"": 3.0182479639711155}",35.23,"6.29**2 * Matern(length_scale=16, nu=1.5) + WhiteKernel(noise_level=0.00139)"
tfmmax_tw1,XGBoost,12.536663388899767,4.7709836503623135,0.38056247522659387,9.255978965788058,0.9177799306991768,"{""colsample_bytree"": 0.9433420481789779, ""learning_rate"": 0.18454614358110935, ""max_depth"": 2, ""min_child_weight"": 3, ""n_estimators"": 353, ""subsample"": 0.8431861732256097}",25.22,
tfmmax_tw1,RandomForest,14.03434718137902,5.30733298651704,0.3781674286609418,9.702457263827213,0.9063437740722035,"{""max_depth"": 8, ""max_features"": 0.991481967933623, ""min_samples_leaf"": 2, ""min_samples_split"": 3, ""n_estimators"": 1029}",98.92,
tfmmax_tw2,GaussianProcess,6.576362420451899,1.5033765740006209,0.22860306015453988,5.3484335100709925,0.9804048469267181,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",36.2,"4.15**2 * Matern(length_scale=8.2, nu=2.5) + WhiteKernel(noise_level=2.91e-12)"
tfmmax_tw2,SVR,7.657427940207296,2.100703718164527,0.2743354210536206,5.994081001208826,0.9735165176039114,"{""svr__C"": 1340.5232650579583, ""svr__epsilon"": 0.05796108747979427, ""svr__gamma"": 0.03668314998666572}",16.99,
tfmmax_tw2,FlexibleMLP,7.980507993935166,1.5962158661667167,0.20001431830903124,6.298943789905389,0.9684612136280244,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.007238640020408304, ""mlp__learning_rate_init"": 0.0016036116681595535, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 494}",1917.49,
tfmmax_tw2,GradientBoosting,9.899875277894829,2.9764513737774863,0.30065544163203006,7.440744907780339,0.9503831468717436,"{""learning_rate"": 0.016985683043962967, ""max_depth"": 4, ""max_features"": 0.9936774842576587, ""n_estimators"": 2060, ""subsample"": 0.6437674543355264}",50.69,
tfmmax_tw2,XGBoost,10.099192649582342,3.2860485230368246,0.3253773481757199,8.079716894763653,0.9531558811928433,"{""colsample_bytree"": 0.8986667435645168, ""learning_rate"": 0.11485403227868564, ""max_depth"": 3, ""min_child_weight"": 5, ""n_estimators"": 1571, ""subsample"": 0.6831596161312871}",26.53,
tfmmax_tw2,RandomForest,11.56584321669535,3.193576413881085,0.2761213647848124,8.772567095328059,0.933910373168995,"{""max_depth"": 10, ""max_features"": 0.9935052496112765, ""min_samples_leaf"": 1, ""min_samples_split"": 2, ""n_estimators"": 826}",85.91,
tfmmax_tw3,SVR,8.374586577761166,2.0976818461201883,0.2504818389113813,6.383639717046682,0.9825331553971159,"{""svr__C"": 9970.084750880415, ""svr__epsilon"": 0.019654089504284208, ""svr__gamma"": 0.01125560227044352}",16.98,
tfmmax_tw3,GaussianProcess,8.477766506568951,2.0065315944971487,0.23668162987768046,6.501673146641316,0.9810053418137713,"{""gpr__amplitude"": 1.102823280779196, ""gpr__kernel_type"": ""RBF"", ""gpr__length_scale"": 0.4345689693488521, ""gpr__n_restarts_optimizer"": 6, ""gpr__noise"": 7.227269699156097e-05, ""gpr__rq_alpha"": 0.05336277986968177}",31.09,4.53**2 * RBF(length_scale=4.85) + WhiteKernel(noise_level=0.000709)
tfmmax_tw3,FlexibleMLP,13.014447486213895,3.6063784770366003,0.2771057688662396,10.17427795401947,0.9554205860351951,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.005703843027403089, ""mlp__learning_rate_init"": 0.0001620758534278906, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 433}",1809.77,
tfmmax_tw3,XGBoost,13.039189957431233,4.523022791116308,0.3468791240776861,9.995221141379332,0.956517590954632,"{""colsample_bytree"": 0.9325025971934403, ""learning_rate"": 0.03425800709860654, ""max_depth"": 2, ""min_child_weight"": 2, ""n_estimators"": 1262, ""subsample"": 0.913661131163698}",27.91,
tfmmax_tw3,GradientBoosting,13.277164535005829,4.691886341213357,0.3533801459523223,9.559163877812399,0.9556581865303098,"{""learning_rate"": 0.02570355997614, ""max_depth"": 3, ""max_features"": 0.9738949229184465, ""n_estimators"": 2276, ""subsample"": 0.6065425733285567}",66.72,
tfmmax_tw3,RandomForest,17.517518266622535,6.803347411952449,0.3883739299370611,12.796251760563694,0.922046477931469,"{""max_depth"": 10, ""max_features"": 0.9971054319205748, ""min_samples_leaf"": 1, ""min_samples_split"": 2, ""n_estimators"": 1140}",87.75,
tfmmax_tw4,GaussianProcess,5.133123529769486,1.3469976699248127,0.26241286852204443,3.8771940588901117,0.978992912143702,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",30.93,"3.2**2 * Matern(length_scale=7.86, nu=2.5) + WhiteKernel(noise_level=2.91e-12)"
tfmmax_tw4,SVR,5.396268226578247,1.818368886473659,0.33696784706098265,3.8988745279127617,0.9768361851687005,"{""svr__C"": 8750.778649832111, ""svr__epsilon"": 0.018154902235300346, ""svr__gamma"": 0.008119739295931886}",18.47,
tfmmax_tw4,XGBoost,9.02429561599758,2.144891751451963,0.23767968634024614,6.99760355872208,0.934237921517701,"{""colsample_bytree"": 0.8100911172745991, ""learning_rate"": 0.23274257366941636, ""max_depth"": 2, ""min_child_weight"": 5, ""n_estimators"": 568, ""subsample"": 0.8778631710421243}",23.31,
tfmmax_tw4,FlexibleMLP,9.215545751583612,2.389229595942364,0.25926078176453043,7.146514997658536,0.9333790198168344,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 1.4579672374408887e-05, ""mlp__learning_rate_init"": 0.008684626796549477, ""mlp__n_layers"": 5, ""mlp__n_neurons"": 242}",886.92,
tfmmax_tw4,GradientBoosting,9.290922908114966,1.7138744181518322,0.18446761802908554,7.067732091869783,0.9337325703679988,"{""learning_rate"": 0.026352038001792536, ""max_depth"": 1, ""max_features"": 0.9215130182716909, ""n_estimators"": 1659, ""subsample"": 0.6047796193478406}",66.38,
tfmmax_tw4,RandomForest,12.067342775008084,2.191102486552252,0.18157290527042183,9.092633100542361,0.891210164350204,"{""max_depth"": 10, ""max_features"": 0.9004474709212577, ""min_samples_leaf"": 1, ""min_samples_split"": 2, ""n_estimators"": 999}",91.44,
tfmmax_tw5,GaussianProcess,3.7384297568091562,1.3274899260227968,0.3550929166463309,2.7368501751524046,0.9723615024471302,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",32.35,"3.38**2 * Matern(length_scale=7.88, nu=2.5) + WhiteKernel(noise_level=2.91e-12)"
tfmmax_tw5,SVR,3.928763766305908,1.2485542394446676,0.3177982474162969,3.0071502757796598,0.9707197647455166,"{""svr__C"": 8704.145709738918, ""svr__epsilon"": 0.01715568612874355, ""svr__gamma"": 0.008745502889591729}",19.18,
tfmmax_tw5,FlexibleMLP,6.824447039581629,1.8606618102153465,0.27264653083591284,5.4177063336344125,0.9000039332587478,"{""mlp__activation"": ""relu"", ""mlp__alpha"": 0.0035276931766295796, ""mlp__learning_rate_init"": 0.0035728368942260346, ""mlp__n_layers"": 4, ""mlp__n_neurons"": 446}",1550.06,
tfmmax_tw5,GradientBoosting,8.288650919529477,2.5383297612272155,0.3062416050417176,6.480972296773851,0.8595808521301442,"{""learning_rate"": 0.016680603387731113, ""max_depth"": 2, ""max_features"": 0.6196325519809496, ""n_estimators"": 2496, ""subsample"": 0.6282288764493943}",78.57,
tfmmax_tw5,XGBoost,8.817898824785466,2.224511432578745,0.2522722790066562,6.942297413636819,0.841110236661875,"{""colsample_bytree"": 0.7606729661729963, ""learning_rate"": 0.013344479664965542, ""max_depth"": 3, ""min_child_weight"": 7, ""n_estimators"": 1282, ""subsample"": 0.609690494793553}",27.91,
tfmmax_tw5,RandomForest,10.882512731029662,3.019489833328643,0.2774625592414033,8.687589892726612,0.7586123331793203,"{""max_depth"": 10, ""max_features"": 0.9048675595752709, ""min_samples_leaf"": 1, ""min_samples_split"": 2, ""n_estimators"": 1183}",102.6,
output,best_model,cv_rmse,std_rmse,cv_rmse_dispersion,cv_mae,cv_r2,BEST_PARAMS,model_path,train_time_sec,gpr_kernel,selected_by
exymax_tw5,GaussianProcess,0.0037397014620559475,0.0018524494485466028,0.4953468792474669,0.002564404130278725,0.9436289596508454,"{""gpr__amplitude"": 66.2941072502412, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 9.110383232654469, ""gpr__n_restarts_optimizer"": 2, ""gpr__noise"": 1.1864143515274533e-09, ""gpr__rq_alpha"": 0.09863897962847874}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_exymax_tw5.joblib,1245.86,"2.7**2 * Matern(length_scale=5.93, nu=2.5) + WhiteKernel(noise_level=1.19e-09)",lowest_cv_rmse_dispersion_within_5pct_rmse_band
exymax_tw2,GaussianProcess,0.005176366137444404,0.0013486668112550314,0.2605431639580415,0.003913884402047848,0.9629242548153287,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_exymax_tw2.joblib,1452.49,"3.2**2 * Matern(length_scale=6.89, nu=2.5) + WhiteKernel(noise_level=2.91e-12)",lowest_cv_rmse_dispersion_within_5pct_rmse_band
exymax_tw1,SVR,0.0065461931973238765,0.0016275998090011273,0.2486330237956466,0.004822458246245301,0.941070262320398,"{""svr__C"": 1535.8841058246376, ""svr__epsilon"": 0.00014352665858615663, ""svr__gamma"": 0.0018620005892167522}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_exymax_tw1.joblib,1235.52,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
exymax_tw4,GaussianProcess,0.006603966975088371,0.001775601440057033,0.2688689157221706,0.004692998066122281,0.9507369331408887,"{""gpr__amplitude"": 50.805556206537666, ""gpr__kernel_type"": ""Matern32"", ""gpr__length_scale"": 21.652164558122173, ""gpr__n_restarts_optimizer"": 1, ""gpr__noise"": 2.9548225428033293e-06, ""gpr__rq_alpha"": 2.558711165466693}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_exymax_tw4.joblib,2001.22,"4.2**2 * Matern(length_scale=13.2, nu=1.5) + WhiteKernel(noise_level=2.95e-06)",lowest_cv_rmse_dispersion_within_5pct_rmse_band
exymax_tw3,XGBoost,0.011296370910068609,0.0028544467636858533,0.25268706086320575,0.008438430960753036,0.8887122017660922,"{""colsample_bytree"": 0.922975236986968, ""learning_rate"": 0.09616759998521371, ""max_depth"": 3, ""min_child_weight"": 2, ""n_estimators"": 645, ""subsample"": 0.8928281167713635}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_exymax_tw3.joblib,1263.38,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw5,GaussianProcess,3.7384297568091562,1.3274899260227968,0.3550929166463309,2.7368501751524046,0.9723615024471302,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_tfmmax_tw5.joblib,1810.68,"3.38**2 * Matern(length_scale=7.88, nu=2.5) + WhiteKernel(noise_level=2.91e-12)",lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw4,GaussianProcess,5.133123529769486,1.3469976699248127,0.26241286852204443,3.8771940588901117,0.978992912143702,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_tfmmax_tw4.joblib,1117.45,"3.2**2 * Matern(length_scale=7.86, nu=2.5) + WhiteKernel(noise_level=2.91e-12)",lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw2,GaussianProcess,6.576362420451899,1.5033765740006209,0.22860306015453988,5.3484335100709925,0.9804048469267181,"{""gpr__amplitude"": 0.4369339947510315, ""gpr__kernel_type"": ""Matern52"", ""gpr__length_scale"": 53.88550972627239, ""gpr__n_restarts_optimizer"": 10, ""gpr__noise"": 3.3825346762279944e-05, ""gpr__rq_alpha"": 0.014523514317963207}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_tfmmax_tw2.joblib,2133.81,"4.15**2 * Matern(length_scale=8.2, nu=2.5) + WhiteKernel(noise_level=2.91e-12)",lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_frame,SVR,7.362379567538453,2.0424917941910397,0.2774227782545486,5.5591589929609775,0.6082838664236161,"{""svr__C"": 193.7932286428093, ""svr__epsilon"": 0.00012005001235516034, ""svr__gamma"": 0.04170927843815279}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_tfmmax_frame.joblib,2858.91,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw3,GaussianProcess,8.477766506568951,2.0065315944971487,0.23668162987768046,6.501673146641316,0.9810053418137713,"{""gpr__amplitude"": 1.102823280779196, ""gpr__kernel_type"": ""RBF"", ""gpr__length_scale"": 0.4345689693488521, ""gpr__n_restarts_optimizer"": 6, ""gpr__noise"": 7.227269699156097e-05, ""gpr__rq_alpha"": 0.05336277986968177}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_tfmmax_tw3.joblib,2040.23,4.53**2 * RBF(length_scale=4.85) + WhiteKernel(noise_level=0.000709),lowest_cv_rmse_dispersion_within_5pct_rmse_band
tfmmax_tw1,SVR,9.729020339610338,3.3410734652444916,0.3434131442445218,6.791137893865481,0.9561686375984351,"{""svr__C"": 3513.591055460674, ""svr__epsilon"": 0.08923278628428069, ""svr__gamma"": 0.013747294826820433}",../../models/width_optimization/5W/ml_models/per_output_models_B34_H60/it0/best_model_tfmmax_tw1.joblib,785.03,,lowest_cv_rmse_dispersion_within_5pct_rmse_band
Parameter,Value
Configuration_W,3.0
Configuration_B,29.0
Configuration_H,45.0
Configuration_TFD_W,90.0
Iteration,0.0
tw1_optimal,5.525014015514342
tw2_optimal,8.471896290298302
tw3_optimal,9.333863643334162
Objective_score,-1.1926307429844958e-06
Exy_tw1,0.062156306095972946
Exy_tw2,0.04245576123008932
Exy_tw3,0.04710451673468341
TFM_tw1,90.00000000228151
TFM_tw2,90.00000000327702
TFM_tw3,90.00000000384767
TFM_frame,70.8310257119319
Parameter,Value
Configuration_W,3.0
Configuration_B,34.0
Configuration_H,45.0
Configuration_TFD_W,90.0
Iteration,0.0
tw1_optimal,7.333816578195384
tw2_optimal,9.279818580001832
tw3_optimal,10.121143978945401
Objective_score,-1.0932680460256248e-06
Exy_tw1,0.030541286356165126
Exy_tw2,0.038966386800767694
Exy_tw3,0.04736176051237062
TFM_tw1,90.00000000888014
TFM_tw2,90.0000000363739
TFM_tw3,90.00000004969976
TFM_frame,71.65152648580627
Parameter,Value
Configuration_W,5.0
Configuration_B,34.0
Configuration_H,60.0
Configuration_TFD_W,90.0
Iteration,0.0
tw1_optimal,5.694179976812822
tw2_optimal,7.254866605325219
tw3_optimal,8.140135383492023
tw4_optimal,6.653343430447909
tw5_optimal,5.0
Objective_score,17.106344387688598
Exy_tw1,0.06127584151539267
Exy_tw2,0.05514946520902724
Exy_tw3,0.06789993494749069
Exy_tw4,0.06857663218038944
Exy_tw5,0.053409088461247346
TFM_tw1,90.00000005473717
TFM_tw2,90.00000072572061
TFM_tw3,90.0000006322008
TFM_tw4,90.00031542767577
TFM_tw5,72.89365290337983
TFM_frame,73.03210418991345
......@@ -29,7 +29,7 @@ THICKNESS_FILE = os.path.join(BASE_DIR, f"widthsH{H}_B{B}.txt")
# === Read window thickness data ===
thickness_data = []
with open(THICKNESS_FILE, "r", encoding="utf-8") as f:
with open(THICKNESS_FILE, encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
if parts:
......@@ -130,7 +130,7 @@ for folder_name in case_folders_sorted:
df_curve["Case"] = i
# thickness correcto por case
trow = thickness_by_case.get(i, None)
trow = thickness_by_case.get(i)
if trow is None:
print(f"⚠️ No thickness data for case {i}, skipping.")
continue
......@@ -290,10 +290,7 @@ for (case, cycle), g in df.groupby(["Case", "CycleNum"]):
peak_displ = df.loc[idx_peak, "Displ"]
peak_force = df.loc[idx_peak, "Force"]
if peak_displ != 0:
K_sec = peak_force / peak_displ
else:
K_sec = np.nan
K_sec = peak_force / peak_displ if peak_displ != 0 else np.nan
row["SecantStiffness"] = K_sec
cycle_rows.append(row)
......
import os
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_theme(style="whitegrid", context="talk")
......@@ -29,6 +30,7 @@ os.makedirs(PLOT_DIR, exist_ok=True)
output_file_cycles = os.path.join(BASE_DIR, "merged_dataset_cycles.csv")
df_cycles = pd.read_csv(output_file_cycles)
def plot_envelope_per_case(df_cycles, case_number, outdir=PLOT_DIR):
g = df_cycles[df_cycles["Case"] == case_number]
......@@ -51,6 +53,7 @@ def plot_envelope_per_case(df_cycles, case_number, outdir=PLOT_DIR):
print(f"Saved: {fname}")
def plot_energy_degradation(df_cycles, case_number, outdir=PLOT_DIR):
g = df_cycles[df_cycles["Case"] == case_number]
......@@ -78,6 +81,7 @@ def plot_energy_degradation(df_cycles, case_number, outdir=PLOT_DIR):
print(f"Saved: {fname}")
def plot_stiffness_degradation(df_cycles, case_number, outdir=PLOT_DIR):
g = df_cycles[df_cycles["Case"] == case_number]
......@@ -118,7 +122,7 @@ def plot_all_envelopes(df_cycles, outdir=PLOT_DIR):
g["PosEnvForce"],
marker="o",
linestyle="-",
label=f"Case {case} (pos)"
label=f"Case {case} (pos)",
)
# Plot negative envelope
......@@ -127,7 +131,7 @@ def plot_all_envelopes(df_cycles, outdir=PLOT_DIR):
g["NegEnvForce"],
marker="o",
linestyle="--",
label=f"Case {case} (neg)"
label=f"Case {case} (neg)",
)
plt.axhline(0, color="black", linewidth=0.8)
......@@ -139,7 +143,7 @@ def plot_all_envelopes(df_cycles, outdir=PLOT_DIR):
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.tight_layout()
fname = os.path.join(outdir, f"envelopes_all_cases.png")
fname = os.path.join(outdir, "envelopes_all_cases.png")
plt.savefig(fname)
plt.close()
......
......@@ -27,9 +27,10 @@ Stability fix:
from __future__ import annotations
import argparse
import contextlib
import gc
import os
from typing import Dict, List, Optional, Tuple, cast
from typing import cast
import joblib
import numpy as np
......@@ -86,8 +87,8 @@ def _make_grad_scaler(use_amp: bool):
def make_windows_for_case_fast(
df_case: pd.DataFrame, feature_cols: List[str], target_col: str, window_size: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
df_case: pd.DataFrame, feature_cols: list[str], target_col: str, window_size: int
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Vectorized sliding windows.
xw: (n_windows, window_size, n_features) float32
......@@ -124,18 +125,18 @@ def make_windows_for_case_fast(
# -------------------------
# Window cache per window_size
# -------------------------
CaseWindows = Dict[
int, Tuple[np.ndarray, np.ndarray, np.ndarray]
CaseWindows = dict[
int, tuple[np.ndarray, np.ndarray, np.ndarray]
] # case -> (x, y, idx)
WINDOW_CACHE: Dict[
Tuple[str, int], CaseWindows
WINDOW_CACHE: dict[
tuple[str, int], CaseWindows
] = {} # (target_col, window_size) -> cache
def precompute_windows_for_cases(
df: pd.DataFrame,
cases: List[int],
feature_cols: List[str],
cases: list[int],
feature_cols: list[str],
target_col: str,
window_size: int,
) -> CaseWindows:
......@@ -151,8 +152,8 @@ def precompute_windows_for_cases(
def get_cache_for_window_size(
df: pd.DataFrame,
cases_to_cache: List[int],
feature_cols: List[str],
cases_to_cache: list[int],
feature_cols: list[str],
target_col: str,
window_size: int,
) -> CaseWindows:
......@@ -169,8 +170,8 @@ def get_cache_for_window_size(
def build_from_cache(
cache_ws: CaseWindows, cases: List[int]
) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
cache_ws: CaseWindows, cases: list[int]
) -> tuple[np.ndarray | None, np.ndarray | None]:
xs, ys = [], []
for c in cases:
x, y, _ = cache_ws[int(c)]
......@@ -229,17 +230,15 @@ class LSTMRegressor(nn.Module):
def maybe_compile(model: nn.Module, device: str, enable: bool) -> nn.Module:
if not enable or device != "cuda":
return model
try:
with contextlib.suppress(Exception):
model = torch.compile(model) # type: ignore
except Exception:
pass
return model
# -------------------------
# CV folds by CASE
# -------------------------
def make_case_folds(train_pool: List[int], seed=123, n_splits=5):
def make_case_folds(train_pool: list[int], seed=123, n_splits=5):
tp = np.array(sorted(train_pool), dtype=int)
if len(tp) < 2:
......@@ -291,8 +290,8 @@ def make_loader(
def train_one_fold_cached( # type: ignore
cache_ws: CaseWindows,
feature_dim: int,
train_cases: List[int],
val_cases: List[int],
train_cases: list[int],
val_cases: list[int],
hidden_dim,
dense_dim,
num_layers,
......@@ -449,7 +448,7 @@ def cv_score_for_params_cached(
grad_clip,
device,
compile_model: bool,
trial: Optional[object] = None,
trial: object | None = None,
):
fold_rmses = []
......@@ -491,12 +490,12 @@ def cv_score_for_params_cached(
def strip_compile_prefix(state_dict: dict) -> dict:
# torch.compile suele guardar keys como "_orig_mod.xxx"
if not any(k.startswith("_orig_mod.") for k in state_dict.keys()):
if not any(k.startswith("_orig_mod.") for k in state_dict):
return state_dict
return {k.replace("_orig_mod.", "", 1): v for k, v in state_dict.items()}
def target_sort_key(col_name: str) -> Tuple[int, int]:
def target_sort_key(col_name: str) -> tuple[int, int]:
if col_name == "Force":
return (0, 0)
if col_name.startswith("TFDMapW"):
......@@ -520,7 +519,7 @@ def target_prediction_col(target_col: str) -> str:
def train_final_full_trainpool_cached( # type: ignore
cache_ws: CaseWindows,
feature_dim: int,
train_cases: List[int],
train_cases: list[int],
hidden_dim,
dense_dim,
num_layers,
......@@ -713,9 +712,7 @@ def main():
predict_case = 0
if w_val == 2:
n_train_case = 8
if b_val == 29:
predict_case = 12
elif b_val == 34:
if b_val == 29 or b_val == 34:
predict_case = 12
else:
print(f"Warning: No predict_case set for W={w_val}, B={b_val}")
......@@ -818,7 +815,7 @@ def main():
direction="minimize", sampler=sampler, pruner=pruner
)
def objective(trial) -> float:
def objective(trial, target_col=target_col) -> float:
params = suggest_params(trial, device=device)
ws = int(params["window_size"])
......@@ -1041,7 +1038,7 @@ def main():
pred_col = target_prediction_col(target_col)
df_test_pred[pred_col] = np.nan
for idx, pred in zip(test_indices, y_pred):
for idx, pred in zip(test_indices, y_pred, strict=False):
df_test_pred.loc[idx, pred_col] = pred
del final_model, test_loader, test_ds
......
......@@ -418,7 +418,7 @@ print(f"Test RMSE on Case {PREDICT_CASE}: {test_rmse:.4f}")
df_test_pred = df_test.copy()
df_test_pred["Force_TCN"] = np.nan
for idx, pred in zip(test_indices, y_pred):
for idx, pred in zip(test_indices, y_pred, strict=False):
df_test_pred.loc[idx, "Force_TCN"] = pred
OUT_FILE = f"../../data/hysteretic_curves/{W}W/H{H}_B{B}/case_{PREDICT_CASE}_with_tcn_preds.csv"
......
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 08:23:00 2023.
@author: jig
"""
from pyDOE import lhs
import pandas as pd
from pyDOE import lhs
class GenerateLHSCases(object):
class GenerateLHSCases:
"""
Class for generating a number of cases by varying a set of geometric features.
......@@ -22,14 +22,20 @@ class GenerateLHSCases(object):
"""Define the desired parameters for this case."""
# Info available in: https://github.com/tisimst/pyDOE/blob/master/pyDOE/doe_lhs.py
self.n_calculations = 64 # Number of desired calculations
self.criterion = 'maximin' # 'center', 'maximin', 'centermaximin', 'correlation'
self.iterations = 5 # Number of iterations for maximin and correlation (Default: 5)
self.ranges = [[4.0, 14.0],
self.criterion = (
"maximin" # 'center', 'maximin', 'centermaximin', 'correlation'
)
self.iterations = (
5 # Number of iterations for maximin and correlation (Default: 5)
)
self.ranges = [
[4.0, 14.0],
[4.0, 14.0],
[4.0, 14.0],
[4.0, 14.0],
# [4.0, 14.0],
[4.0, 14.0]]
[4.0, 14.0],
]
self.lhs_parameters = None
def run(self):
......@@ -39,14 +45,15 @@ class GenerateLHSCases(object):
def create_lhs_design(self):
"""Create the Latin Hypercube Sampling design."""
self.lhs_parameters = lhs(len(self.ranges),
samples=self.n_calculations,
criterion=self.criterion)
self.lhs_parameters = lhs(
len(self.ranges), samples=self.n_calculations, criterion=self.criterion
)
def generate_features_dataframe(self):
"""Generate the DataFrame from the Latin Hypercube Sampling design."""
features = [
[f'ml{ii + 1}'] + [
[f"ml{ii + 1}"]
+ [
round(r[0] + (r[1] - r[0]) * self.lhs_parameters[ii][i], 2)
for i, r in enumerate(self.ranges)
]
......@@ -54,14 +61,21 @@ class GenerateLHSCases(object):
]
# Define the column names for the DataFrame
column_names = ['Model', 'tw1', 'tw2', 'tw3', 'tw4', 'tw5',] # 'tf']
column_names = [
"Model",
"tw1",
"tw2",
"tw3",
"tw4",
"tw5",
] # 'tf']
# Create the DataFrame
features_df = pd.DataFrame(features, columns=column_names)
# Save as CSV and Excel
features_df.to_csv('ml_features.csv', index=False)
features_df.to_excel('ml_features.xlsx', index=False)
features_df.to_csv("ml_features.csv", index=False)
features_df.to_excel("ml_features.xlsx", index=False)
print("DataFrame saved as 'ml_features.csv' and 'ml_features.xlsx'.")
......
......@@ -10,7 +10,11 @@ of kernels during Bayesian optimization.
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (
RBF, Matern, RationalQuadratic, WhiteKernel, ConstantKernel
RBF,
ConstantKernel,
Matern,
RationalQuadratic,
WhiteKernel,
)
......@@ -69,30 +73,27 @@ class FlexibleGPR(BaseEstimator, RegressorMixin):
if self.kernel_type == "RBF":
base = RBF(length_scale=ls, length_scale_bounds=(1e-6, 1e3))
elif self.kernel_type == "Matern32":
base = Matern(
length_scale=ls, nu=1.5, length_scale_bounds=(1e-6, 1e3)
)
base = Matern(length_scale=ls, nu=1.5, length_scale_bounds=(1e-6, 1e3))
elif self.kernel_type == "Matern52":
base = Matern(
length_scale=ls, nu=2.5, length_scale_bounds=(1e-6, 1e3)
)
base = Matern(length_scale=ls, nu=2.5, length_scale_bounds=(1e-6, 1e3))
elif self.kernel_type == "RQ":
base = RationalQuadratic(
length_scale=ls,
alpha=float(self.rq_alpha),
length_scale_bounds=(1e-6, 1e3),
alpha_bounds=(1e-6, 1e3)
alpha_bounds=(1e-6, 1e3),
)
else:
raise ValueError(f"Unknown kernel_type: {self.kernel_type}")
k = ConstantKernel(
constant_value=float(self.amplitude),
constant_value_bounds=(1e-6, 1e3)
) * base
k = (
ConstantKernel(
constant_value=float(self.amplitude), constant_value_bounds=(1e-6, 1e3)
)
* base
)
k += WhiteKernel(
noise_level=float(self.noise),
noise_level_bounds=(1e-12, 1e-2)
noise_level=float(self.noise), noise_level_bounds=(1e-12, 1e-2)
)
return k
......
......@@ -6,8 +6,8 @@ This module provides a scikit-learn compatible wrapper for MLPRegressor
that allows dynamic configuration of hidden layer architecture.
"""
from sklearn.neural_network import MLPRegressor
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.neural_network import MLPRegressor
class FlexibleMLP(BaseEstimator, RegressorMixin):
......@@ -17,9 +17,16 @@ class FlexibleMLP(BaseEstimator, RegressorMixin):
optimization.
"""
def __init__(self, n_layers=2, n_neurons=128, activation='relu',
alpha=1e-4, learning_rate_init=1e-3,
random_state=42, max_iter=5000):
def __init__(
self,
n_layers=2,
n_neurons=128,
activation="relu",
alpha=1e-4,
learning_rate_init=1e-3,
random_state=42,
max_iter=5000,
):
"""
Parameters
----------
......@@ -76,7 +83,7 @@ class FlexibleMLP(BaseEstimator, RegressorMixin):
n_iter_no_change=100,
validation_fraction=0.2,
max_iter=self.max_iter,
random_state=self.random_state
random_state=self.random_state,
)
self.model_.fit(x_data, y)
......
......@@ -10,13 +10,15 @@ Author: Joaquín Irazábal González
Date: 5th September 2025
"""
import os
import argparse
import contextlib
import os
import joblib
import numpy as np
import pandas as pd
from scipy.optimize import differential_evolution
import plotly.graph_objects as go
from scipy.optimize import differential_evolution
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
......@@ -31,7 +33,7 @@ def save_optimization_results(
b_val,
h_val,
tfd_w_val,
it_val
it_val,
):
"""
Save optimization results to CSV file.
......@@ -69,40 +71,42 @@ def save_optimization_results(
frame_pred = model_frame.predict(inp_best)[0]
# Create results dictionary
results_dict = {
'Parameter': [],
'Value': []
}
results_dict = {"Parameter": [], "Value": []}
# Add configuration
results_dict['Parameter'].extend([
'Configuration_W', 'Configuration_B', 'Configuration_H',
'Configuration_TFD_W', 'Iteration'
])
results_dict['Value'].extend([w_val, b_val, h_val, tfd_w_val, it_val])
results_dict["Parameter"].extend(
[
"Configuration_W",
"Configuration_B",
"Configuration_H",
"Configuration_TFD_W",
"Iteration",
]
)
results_dict["Value"].extend([w_val, b_val, h_val, tfd_w_val, it_val])
# Add optimal widths
for window_num in range(w_val):
results_dict['Parameter'].append(f'tw{window_num+1}_optimal')
results_dict['Value'].append(result.x[window_num])
results_dict["Parameter"].append(f"tw{window_num + 1}_optimal")
results_dict["Value"].append(result.x[window_num])
# Add objective score
results_dict['Parameter'].append('Objective_score')
results_dict['Value'].append(result.fun)
results_dict["Parameter"].append("Objective_score")
results_dict["Value"].append(result.fun)
# Add Exy predictions
for window_num, exy_val in enumerate(exy_preds):
results_dict['Parameter'].append(f'Exy_tw{window_num+1}')
results_dict['Value'].append(exy_val)
results_dict["Parameter"].append(f"Exy_tw{window_num + 1}")
results_dict["Value"].append(exy_val)
# Add TFM predictions
for window_num, tfm_val in enumerate(tfm_preds):
results_dict['Parameter'].append(f'TFM_tw{window_num+1}')
results_dict['Value'].append(tfm_val)
results_dict["Parameter"].append(f"TFM_tw{window_num + 1}")
results_dict["Value"].append(tfm_val)
# Add frame prediction
results_dict['Parameter'].append('TFM_frame')
results_dict['Value'].append(frame_pred)
results_dict["Parameter"].append("TFM_frame")
results_dict["Value"].append(frame_pred)
# Create DataFrame
df_results = pd.DataFrame(results_dict)
......@@ -114,7 +118,7 @@ def save_optimization_results(
# Save to CSV
csv_path = os.path.join(
it_dir,
f'optimization_results_{w_val}W_B{b_val}_H{h_val}_TFD_W{int(tfd_w_val)}.csv'
f"optimization_results_{w_val}W_B{b_val}_H{h_val}_TFD_W{int(tfd_w_val)}.csv",
)
df_results.to_csv(csv_path, index=False)
......@@ -137,7 +141,7 @@ def make_input_df(x, w_val):
pd.DataFrame: DataFrame with the appropriate number of tw columns
"""
# Create column names based on number of windows
columns = [f"tw{i+1}" for i in range(w_val)]
columns = [f"tw{i + 1}" for i in range(w_val)]
# Verify input length matches number of windows
if len(x) != w_val:
......@@ -220,16 +224,13 @@ def objective(x, models_exy, models_tfm, model_frame, w_val, b_val, tfd_w_val):
raw_vals = []
# --- 1. Equivalent strain maximization (negative sign) ---
for model_i, tw_i, i in zip(models_exy, x, range(w_val)):
for model_i, tw_i, i in zip(models_exy, x, range(w_val), strict=False):
# Calculate predicted equivalent strain squared multiplied by volume
pred_i = model_i.predict(inp)[0]
v_factor = get_window_v_factor(w_val, b_val, i)
if v_factor == 1.0 and w_val not in [2, 3, 5]:
print(
"Warning: Not considering window area for W not equal to "
"2, 3, or 5"
)
print("Warning: Not considering window area for W not equal to 2, 3, or 5")
raw_vals.append(pred_i * pred_i * tw_i)
else:
raw_vals.append(pred_i * pred_i * tw_i * 1e-3 * v_factor)
......@@ -245,10 +246,7 @@ def objective(x, models_exy, models_tfm, model_frame, w_val, b_val, tfd_w_val):
# --- 3. Penalize frame failure exceeding the limit (TFD_MAX) ---
frame_pred = model_frame.predict(inp)[0]
penalty_frame = (
(frame_pred - tfd_max) ** 3 if frame_pred > tfd_max
else 0.0
)
penalty_frame = (frame_pred - tfd_max) ** 3 if frame_pred > tfd_max else 0.0
# --- Combine all objective terms ---
total_score = score_exy + penalty_windows + penalty_frame
......@@ -264,7 +262,7 @@ def plot_3d_tw_pairs_at_optimum(
h_val,
tfd_w_val,
it_val,
output_dir="figures_3D"
output_dir="figures_3D",
):
"""
Generate a 3D surface plot for every pair (tw_i, tw_j), fixing the
......@@ -296,7 +294,6 @@ def plot_3d_tw_pairs_at_optimum(
for i in range(w_val):
for j in range(i + 1, w_val):
# Define variable ranges
tw_i_vals = np.linspace(bounds[i][0], bounds[i][1], 50)
tw_j_vals = np.linspace(bounds[j][0], bounds[j][1], 50)
......@@ -326,7 +323,7 @@ def plot_3d_tw_pairs_at_optimum(
z=z_vals,
colorscale="Viridis",
showscale=True,
name="Objective surface"
name="Objective surface",
)
]
)
......@@ -339,35 +336,36 @@ def plot_3d_tw_pairs_at_optimum(
z=[opt_z],
mode="markers",
marker=dict(size=6, color="red"),
name="Optimum"
name="Optimum",
)
)
fig.update_layout(
title=(
f"RBF 3D Objective Surface – (tw{i+1}, tw{j+1}) "
f"RBF 3D Objective Surface – (tw{i + 1}, tw{j + 1}) "
f" at optimum for other windows width"
),
scene=dict(
xaxis_title=f"tw{i+1} (mm)",
yaxis_title=f"tw{j+1} (mm)",
zaxis_title="Objective score"
xaxis_title=f"tw{i + 1} (mm)",
yaxis_title=f"tw{j + 1} (mm)",
zaxis_title="Objective score",
),
width=950,
height=850
height=850,
)
# Save HTML
fname_base = f"surface_tw{i+1}_tw{j+1}_B{b_val}_H{h_val}_TFD_W{int(tfd_w_val)}"
fname_base = (
f"surface_tw{i + 1}_tw{j + 1}_B{b_val}_H{h_val}_TFD_W{int(tfd_w_val)}"
)
html_path = f"{it_dir}/{fname_base}.html"
fig.write_html(html_path)
# Save PNG (optional if Kaleido installed)
try:
fig.write_image(html_path.replace(".html", ".png"),
width=1200, height=1000)
except Exception: # pylint: disable=broad-except
pass
with contextlib.suppress(Exception):
fig.write_image(
html_path.replace(".html", ".png"), width=1200, height=1000
)
print(f"Saved 3D RBF surface: {html_path}")
......@@ -405,7 +403,11 @@ def main():
bounds_vals = [(5, 14), (5, 14), (5, 14)] # tw1, tw2, tw3 between 5 and 14 mm
elif w_val == 5:
bounds_vals = [
(5, 12), (5, 12), (5, 12), (5, 12), (5, 12)
(5, 12),
(5, 12),
(5, 12),
(5, 12),
(5, 12),
] # tw1 to tw5 between 5 and 12 mm
else:
# General case for any W
......@@ -415,17 +417,24 @@ def main():
# Path to trained models
input_models_dir = os.path.join(
BASE_DIR,
"..", "..", "models", "width_optimization",
f"{w_val}W", "ml_models",
f"per_output_models_B{b_val}_H{h_val}"
"..",
"..",
"models",
"width_optimization",
f"{w_val}W",
"ml_models",
f"per_output_models_B{b_val}_H{h_val}",
)
# Output directory for optimization cruves
output_graphs_dir = os.path.join(
BASE_DIR,
"..", "..", "reports", "width_optimization",
"..",
"..",
"reports",
"width_optimization",
f"{w_val}W",
"ml_optimization"
"ml_optimization",
)
os.makedirs(output_graphs_dir, exist_ok=True)
......@@ -440,10 +449,11 @@ def main():
# Check Exy and TFM models for each window
for window_idx in range(w_val):
exy_name = f"best_model_exymax_tw{window_idx+1}.joblib"
tfm_name = f"best_model_tfmmax_tw{window_idx+1}.joblib"
if not os.path.exists(os.path.join(input_models_dir_it, exy_name)) or \
not os.path.exists(os.path.join(input_models_dir_it, tfm_name)):
exy_name = f"best_model_exymax_tw{window_idx + 1}.joblib"
tfm_name = f"best_model_tfmmax_tw{window_idx + 1}.joblib"
if not os.path.exists(
os.path.join(input_models_dir_it, exy_name)
) or not os.path.exists(os.path.join(input_models_dir_it, tfm_name)):
all_models_exist = False
break
......@@ -462,12 +472,12 @@ def main():
for window_idx in range(w_val):
# Load Exy models
exy_model_name = f"best_model_exymax_tw{window_idx+1}.joblib"
exy_model_name = f"best_model_exymax_tw{window_idx + 1}.joblib"
exy_model_path = os.path.join(input_models_dir_it, exy_model_name)
models_exy.append(joblib.load(exy_model_path))
# Load TFM models
tfm_model_name = f"best_model_tfmmax_tw{window_idx+1}.joblib"
tfm_model_name = f"best_model_tfmmax_tw{window_idx + 1}.joblib"
tfm_model_path = os.path.join(input_models_dir_it, tfm_model_name)
models_tfm.append(joblib.load(tfm_model_path))
......@@ -490,7 +500,7 @@ def main():
maxiter=500, # Maximum number of iterations
popsize=25, # Population size
tol=1e-6, # Tolerance for convergence
seed=42 # Random seed for reproducibility
seed=42, # Random seed for reproducibility
)
# Save results to CSV
......@@ -504,7 +514,7 @@ def main():
b_val,
h_val,
tfd_w_val,
it
it,
)
# Extract optimum for plotting
......@@ -514,15 +524,13 @@ def main():
print(f"\nResults for {w_val} windows optimization (Iteration {it}):")
print("Best solution found:")
for window_num, tw in enumerate(optimum):
print(f"tw{window_num+1} = {tw:.2f}")
print(f"tw{window_num + 1} = {tw:.2f}")
print(f"Objective score = {result.fun:.4f}")
# Calculate predictions for optimal solution
inp_best = make_input_df(optimum, w_val)
print("\nPredictions:")
print(
"Exy:", [m.predict(inp_best)[0] for m in models_exy]
)
print("Exy:", [m.predict(inp_best)[0] for m in models_exy])
print("TFM tw:", [m.predict(inp_best)[0] for m in models_tfm])
print("TFM frame:", model_frame.predict(inp_best)[0])
......@@ -536,7 +544,7 @@ def main():
h_val,
tfd_w_val,
it,
output_dir=output_graphs_dir
output_dir=output_graphs_dir,
)
......
......@@ -33,51 +33,40 @@ Author: Joaquín Irazábal González
Date: 5th September 2025
"""
import argparse
import itertools
import json
import os
import time
import json
import itertools
import warnings
import argparse
import joblib
import numpy as np
import pandas as pd
from flexible_gpr import FlexibleGPR
from flexible_mlp import FlexibleMLP
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import make_scorer, mean_absolute_error, root_mean_squared_error
from sklearn.model_selection import KFold, LeaveOneOut, RepeatedKFold
from sklearn.metrics import (
make_scorer, mean_absolute_error, root_mean_squared_error
)
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from skopt import BayesSearchCV
from skopt.space import Real, Integer, Categorical
from skopt.space import Categorical, Integer, Real
from xgboost import XGBRegressor
from flexible_mlp import FlexibleMLP
from flexible_gpr import FlexibleGPR
# Silence convergence warnings
warnings.filterwarnings("ignore", category=ConvergenceWarning)
# --- Custom scorers ---
rmse_scorer = make_scorer(
root_mean_squared_error,
greater_is_better=False
)
rmse_scorer = make_scorer(root_mean_squared_error, greater_is_better=False)
mae_scorer = make_scorer(mean_absolute_error, greater_is_better=False)
SCORING = {
"rmse": rmse_scorer,
"mae": mae_scorer,
"r2": "r2"
}
SCORING = {"rmse": rmse_scorer, "mae": mae_scorer, "r2": "r2"}
DEFAULT_RMSE_BAND_DELTA = 0.05
DISPERSION_SELECTION_CRITERION = (
"lowest_cv_rmse_dispersion_within_5pct_rmse_band"
)
DISPERSION_SELECTION_CRITERION = "lowest_cv_rmse_dispersion_within_5pct_rmse_band"
def param_product(grid_dict):
......@@ -96,7 +85,7 @@ def param_product(grid_dict):
"""
keys = list(grid_dict.keys())
for values in itertools.product(*[grid_dict[k] for k in keys]):
yield dict(zip(keys, values))
yield dict(zip(keys, values, strict=False))
def get_scoring_for_cv(cv, base_scoring):
......@@ -165,11 +154,8 @@ def extract_best_fold_rmse(cv_results, best_index):
"""
fold_rmse = []
split_keys = sorted(
[
k for k in cv_results
if k.startswith("split") and k.endswith("_test_rmse")
],
key=lambda k: int(k.split("_", 1)[0].replace("split", ""))
[k for k in cv_results if k.startswith("split") and k.endswith("_test_rmse")],
key=lambda k: int(k.split("_", 1)[0].replace("split", "")),
)
for key in split_keys:
......@@ -204,8 +190,7 @@ def compute_rmse_dispersion(fold_rmse):
return mean_rmse, std_rmse, cv_rmse_dispersion
def select_best_model_by_rmse_band(candidate_models,
delta=DEFAULT_RMSE_BAND_DELTA):
def select_best_model_by_rmse_band(candidate_models, delta=DEFAULT_RMSE_BAND_DELTA):
"""
Select best model using RMSE competitiveness plus dispersion stability.
......@@ -219,20 +204,15 @@ def select_best_model_by_rmse_band(candidate_models,
best_mean_rmse = min(m["cv_rmse"] for m in candidate_models)
threshold = (1.0 + delta) * best_mean_rmse
competitive = [
m for m in candidate_models
if m["cv_rmse"] <= threshold
]
competitive = [m for m in candidate_models if m["cv_rmse"] <= threshold]
stable_candidates = [
m for m in competitive
if np.isfinite(m.get("cv_rmse_dispersion", float("nan")))
m for m in competitive if np.isfinite(m.get("cv_rmse_dispersion", float("nan")))
]
if stable_candidates and len(stable_candidates) == len(competitive):
selected = min(
stable_candidates,
key=lambda m: (m["cv_rmse_dispersion"], m["cv_rmse"])
stable_candidates, key=lambda m: (m["cv_rmse_dispersion"], m["cv_rmse"])
)
return selected, DISPERSION_SELECTION_CRITERION
......@@ -258,8 +238,9 @@ def extract_gpr_kernel(estimator):
try:
if hasattr(estimator, "named_steps") and "gpr" in estimator.named_steps:
gpr = estimator.named_steps["gpr"]
if (getattr(gpr, "model_", None) is not None and
hasattr(gpr.model_, "kernel_")):
if getattr(gpr, "model_", None) is not None and hasattr(
gpr.model_, "kernel_"
):
return str(gpr.model_.kernel_)
except (AttributeError, KeyError):
pass
......@@ -292,59 +273,71 @@ def adjust_tree_search_space(model_name, grid, n_samples):
if model_name == "RandomForest":
if very_small:
g.update({
g.update(
{
"n_estimators": Integer(200, 800),
"max_depth": Integer(1, 4),
"min_samples_split": Integer(2, 8),
"min_samples_leaf": Integer(2, 4),
"max_features": Real(0.5, 1.0),
})
}
)
elif small:
g.update({
g.update(
{
"n_estimators": Integer(300, 1200),
"max_depth": Integer(2, 10),
"min_samples_split": Integer(2, 10),
"min_samples_leaf": Integer(1, 6),
"max_features": Real(0.5, 1.0),
})
}
)
elif model_name == "GradientBoosting":
if very_small:
g.update({
g.update(
{
"n_estimators": Integer(200, 1500),
"learning_rate": Real(0.01, 0.08, prior="log-uniform"),
"max_depth": Integer(1, 3),
"subsample": Real(0.7, 0.9),
"max_features": Real(0.6, 1.0),
})
}
)
elif small:
g.update({
g.update(
{
"n_estimators": Integer(300, 2500),
"learning_rate": Real(1e-3, 0.1, prior="log-uniform"),
"max_depth": Integer(1, 4),
"subsample": Real(0.6, 1.0),
"max_features": Real(0.5, 1.0),
})
}
)
elif model_name == "XGBoost":
if very_small:
g.update({
g.update(
{
"n_estimators": Integer(200, 1200),
"max_depth": Integer(1, 4),
"learning_rate": Real(0.01, 0.2, prior="log-uniform"),
"subsample": Real(0.7, 0.95),
"colsample_bytree": Real(0.7, 1.0),
"min_child_weight": Integer(3, 20),
})
}
)
elif small:
g.update({
g.update(
{
"n_estimators": Integer(300, 1600),
"max_depth": Integer(2, 6),
"learning_rate": Real(1e-3, 0.3, prior="log-uniform"),
"subsample": Real(0.6, 1.0),
"colsample_bytree": Real(0.6, 1.0),
"min_child_weight": Integer(1, 15),
})
}
)
return g
......@@ -361,9 +354,8 @@ model_grids = {
"min_samples_leaf": Integer(1, 6),
"max_features": Real(0.5, 1.0),
},
"bayes"
"bayes",
),
"GradientBoosting": (
GradientBoostingRegressor(random_state=42),
{
......@@ -373,14 +365,10 @@ model_grids = {
"subsample": Real(0.6, 1.0),
"max_features": Real(0.5, 1.0),
},
"bayes"
"bayes",
),
"FlexibleMLP": (
Pipeline([
("scaler", StandardScaler()),
("mlp", FlexibleMLP())
]),
Pipeline([("scaler", StandardScaler()), ("mlp", FlexibleMLP())]),
{
"mlp__n_layers": Integer(1, 5),
"mlp__n_neurons": Integer(64, 512),
......@@ -388,29 +376,24 @@ model_grids = {
"mlp__alpha": Real(1e-5, 1e-2, prior="log-uniform"),
"mlp__learning_rate_init": Real(1e-4, 1e-2, prior="log-uniform"),
},
"bayes"
"bayes",
),
"SVR": (
Pipeline([
("scaler", StandardScaler()),
("svr", SVR(kernel="rbf", tol=1e-4))
]),
Pipeline([("scaler", StandardScaler()), ("svr", SVR(kernel="rbf", tol=1e-4))]),
{
"svr__C": Real(1e0, 1e4, prior="log-uniform"),
"svr__gamma": Real(1e-4, 1e-1, prior="log-uniform"),
"svr__epsilon": Real(1e-4, 1e-1, prior="log-uniform"),
},
"bayes"
"bayes",
),
"XGBoost": (
XGBRegressor(
objective="reg:squarederror",
random_state=42,
n_jobs=-1,
tree_method="approx",
verbosity=0
verbosity=0,
),
{
"n_estimators": Integer(200, 2000),
......@@ -420,25 +403,19 @@ model_grids = {
"colsample_bytree": Real(0.6, 1.0),
"min_child_weight": Integer(1, 10),
},
"bayes"
"bayes",
),
"GaussianProcess": (
Pipeline([
("scaler", StandardScaler()),
("gpr", FlexibleGPR())
]),
Pipeline([("scaler", StandardScaler()), ("gpr", FlexibleGPR())]),
{
"gpr__kernel_type": Categorical(
["RBF", "Matern32", "Matern52", "RQ"]
),
"gpr__kernel_type": Categorical(["RBF", "Matern32", "Matern52", "RQ"]),
"gpr__amplitude": Real(1e-2, 1e2, prior="log-uniform"),
"gpr__length_scale": Real(1e-2, 1e2, prior="log-uniform"),
"gpr__rq_alpha": Real(1e-2, 1e2, prior="log-uniform"),
"gpr__noise": Real(1e-12, 1e-4, prior="log-uniform"),
"gpr__n_restarts_optimizer": Integer(1, 10),
},
"bayes"
"bayes",
),
}
......@@ -471,7 +448,9 @@ def main():
start_rows = 10
# Path configuration
data_path = f"../../data/width_optimization/{w_val}W/ml_FEMdata_B{b_val}_H{h_val}.csv"
data_path = (
f"../../data/width_optimization/{w_val}W/ml_FEMdata_B{b_val}_H{h_val}.csv"
)
output_models_dir = (
f"../../models/width_optimization/{w_val}W/ml_models/"
f"per_output_models_B{b_val}_H{h_val}"
......@@ -496,9 +475,9 @@ def main():
)
return
print(f"\n\n{'='*60}")
print(f"\n\n{'=' * 60}")
print(f"ITERATION {iteration_idx}: Training with {n_current_rows} rows")
print(f"{'='*60}")
print(f"{'=' * 60}")
# Create iteration specific directory
it_dir = os.path.join(output_models_dir, f"it{iteration_idx}")
......@@ -508,7 +487,7 @@ def main():
df = df_full.iloc[:n_current_rows].copy()
# Define features and targets dynamically
input_features = [f"tw{i+1}" for i in range(w_val)]
input_features = [f"tw{i + 1}" for i in range(w_val)]
target_features = [
c for c in df.columns if c not in input_features and c != "eyymax_tf"
]
......@@ -549,7 +528,7 @@ def main():
start_model = time.time()
# Initialize variables
best_rmse, mean_mae, mean_r2 = float('inf'), None, None
best_rmse, mean_mae, mean_r2 = float("inf"), None, None
scoring_used, refit_metric = get_scoring_for_cv(cv, SCORING)
......@@ -557,9 +536,7 @@ def main():
# Bayesian Search CV
# ---------------------------------------------------------
# Shrink tree search spaces automatically for small N
grid_used = adjust_tree_search_space(
model_name, grid, n_samples
)
grid_used = adjust_tree_search_space(model_name, grid, n_samples)
rs = BayesSearchCV(
estimator=est,
......@@ -572,7 +549,7 @@ def main():
random_state=42,
optimizer_kwargs={"base_estimator": "RF"},
return_train_score=False,
verbose=0
verbose=0,
)
rs.fit(features_df, y)
......@@ -588,8 +565,8 @@ def main():
# RMSE scorer is negative, so invert sign
fold_rmse = extract_best_fold_rmse(rs.cv_results_, i)
fold_mean_rmse, std_rmse, cv_rmse_dispersion = (
compute_rmse_dispersion(fold_rmse)
fold_mean_rmse, std_rmse, cv_rmse_dispersion = compute_rmse_dispersion(
fold_rmse
)
best_rmse = float(-rs.cv_results_["mean_test_rmse"][i])
if np.isfinite(fold_mean_rmse):
......@@ -616,7 +593,7 @@ def main():
"cv_r2": float(mean_r2),
"BEST_PARAMS": json.dumps(best_params),
"fit_time_sec": round(elapsed_model, 2),
"gpr_kernel": gpr_kernel
"gpr_kernel": gpr_kernel,
}
detailed_rows.append(detailed_row)
......@@ -628,7 +605,8 @@ def main():
f"time={elapsed_model:6.1f}s"
)
candidate_models.append({
candidate_models.append(
{
"output": tgt,
"model": model_name,
"cv_rmse": float(best_rmse),
......@@ -639,31 +617,26 @@ def main():
"BEST_PARAMS": best_params,
"BEST_ESTIMATOR": best_estimator,
"CV_RESULTS": rs.cv_results_,
"gpr_kernel": gpr_kernel
})
best_for_tgt, selected_by = select_best_model_by_rmse_band(
candidate_models
"gpr_kernel": gpr_kernel,
}
)
best_for_tgt, selected_by = select_best_model_by_rmse_band(candidate_models)
best_cv_results = best_for_tgt["CV_RESULTS"]
# =============================================================
# SAVE BEST MODEL AND RESULTS FOR THIS OUTPUT
# =============================================================
# Modified filename to save in itX folder without suffix
out_model_path = os.path.join(
it_dir, f"best_model_{tgt}.joblib"
)
out_model_path = os.path.join(it_dir, f"best_model_{tgt}.joblib")
joblib.dump(best_for_tgt["BEST_ESTIMATOR"], out_model_path)
joblib.dump(
best_cv_results,
os.path.join(it_dir, f"cv_results_{tgt}.pkl")
)
joblib.dump(best_cv_results, os.path.join(it_dir, f"cv_results_{tgt}.pkl"))
elapsed_target = time.time() - start_target
# --- Summary for this output ---
summary_rows.append({
summary_rows.append(
{
"output": tgt,
"best_model": best_for_tgt["model"],
"cv_rmse": best_for_tgt["cv_rmse"],
......@@ -675,33 +648,34 @@ def main():
"model_path": out_model_path,
"train_time_sec": round(elapsed_target, 2),
"gpr_kernel": best_for_tgt.get("gpr_kernel", ""),
"selected_by": selected_by
})
"selected_by": selected_by,
}
)
print(f" 🏁 Best model for {tgt}: {best_for_tgt['model']} "
print(
f" 🏁 Best model for {tgt}: {best_for_tgt['model']} "
f"(RMSE={best_for_tgt['cv_rmse']:.5f}, "
f"CV={best_for_tgt['cv_rmse_dispersion']:.5f}, "
f"R²={best_for_tgt['cv_r2']:.5f}) "
f"selected by {selected_by} "
f"→ saved to {out_model_path}")
f"→ saved to {out_model_path}"
)
# =============================================================
# SAVE SUMMARY TABLES
# =============================================================
summary_df = (pd.DataFrame(summary_rows)
.sort_values(["cv_rmse"])
.reset_index(drop=True))
detailed_df = (pd.DataFrame(detailed_rows)
.sort_values(["output", "cv_rmse"])
.reset_index(drop=True))
summary_csv = os.path.join(
it_dir, f"cv_summary_per_output_B{b_val}_H{h_val}.csv"
summary_df = (
pd.DataFrame(summary_rows).sort_values(["cv_rmse"]).reset_index(drop=True)
)
detailed_csv = os.path.join(
it_dir, f"cv_detailed_per_output_B{b_val}_H{h_val}.csv"
detailed_df = (
pd.DataFrame(detailed_rows)
.sort_values(["output", "cv_rmse"])
.reset_index(drop=True)
)
summary_csv = os.path.join(it_dir, f"cv_summary_per_output_B{b_val}_H{h_val}.csv")
detailed_csv = os.path.join(it_dir, f"cv_detailed_per_output_B{b_val}_H{h_val}.csv")
summary_df.to_csv(summary_csv, index=False)
detailed_df.to_csv(detailed_csv, index=False)
......
......@@ -19,7 +19,7 @@ class RBFModel:
"""
def __init__(
self, x_data, y_data, function='multiquadric', smooth=0.0, epsilon=None
self, x_data, y_data, function="multiquadric", smooth=0.0, epsilon=None
):
"""
Initialize and fit the RBF model.
......@@ -49,10 +49,11 @@ class RBFModel:
def fit(self):
"""Fit the RBF model using the stored data."""
self.model = Rbf(
*self.x_data.T, self.y_data,
*self.x_data.T,
self.y_data,
function=self.function,
smooth=self.smooth,
epsilon=self.epsilon
epsilon=self.epsilon,
)
def predict(self, x_new):
......
......@@ -6,8 +6,11 @@ This module performs structural optimization using Radial Basis Function
surrogate models and Differential Evolution. It finds the optimal window
widths to maximize strain while maintaining failure within limits.
"""
import os
import argparse
import contextlib
import os
import joblib
import numpy as np
import pandas as pd
......@@ -27,7 +30,7 @@ def save_optimization_results(
b_val,
h_val,
tfd_w_val,
it_val
it_val,
):
"""
Save optimization results to CSV file.
......@@ -65,40 +68,42 @@ def save_optimization_results(
frame_pred = model_frame.predict(inp_best)[0]
# Create results dictionary
results_dict = {
'Parameter': [],
'Value': []
}
results_dict = {"Parameter": [], "Value": []}
# Add configuration
results_dict['Parameter'].extend([
'Configuration_W', 'Configuration_B', 'Configuration_H',
'Configuration_TFD_W', 'Iteration'
])
results_dict['Value'].extend([w_val, b_val, h_val, tfd_w_val, it_val])
results_dict["Parameter"].extend(
[
"Configuration_W",
"Configuration_B",
"Configuration_H",
"Configuration_TFD_W",
"Iteration",
]
)
results_dict["Value"].extend([w_val, b_val, h_val, tfd_w_val, it_val])
# Add optimal widths
for window_num in range(w_val):
results_dict['Parameter'].append(f'tw{window_num+1}_optimal')
results_dict['Value'].append(result.x[window_num])
results_dict["Parameter"].append(f"tw{window_num + 1}_optimal")
results_dict["Value"].append(result.x[window_num])
# Add objective score
results_dict['Parameter'].append('Objective_score')
results_dict['Value'].append(result.fun)
results_dict["Parameter"].append("Objective_score")
results_dict["Value"].append(result.fun)
# Add Exy predictions
for window_num, exy_val in enumerate(exy_preds):
results_dict['Parameter'].append(f'Exy_tw{window_num+1}')
results_dict['Value'].append(exy_val)
results_dict["Parameter"].append(f"Exy_tw{window_num + 1}")
results_dict["Value"].append(exy_val)
# Add TFM predictions
for window_num, tfm_val in enumerate(tfm_preds):
results_dict['Parameter'].append(f'TFM_tw{window_num+1}')
results_dict['Value'].append(tfm_val)
results_dict["Parameter"].append(f"TFM_tw{window_num + 1}")
results_dict["Value"].append(tfm_val)
# Add frame prediction
results_dict['Parameter'].append('TFM_frame')
results_dict['Value'].append(frame_pred)
results_dict["Parameter"].append("TFM_frame")
results_dict["Value"].append(frame_pred)
# Create DataFrame
df_results = pd.DataFrame(results_dict)
......@@ -110,7 +115,7 @@ def save_optimization_results(
# Save to CSV
csv_path = os.path.join(
it_dir,
f'optimization_results_{w_val}W_B{b_val}_H{h_val}_TFD_W{int(tfd_w_val)}.csv'
f"optimization_results_{w_val}W_B{b_val}_H{h_val}_TFD_W{int(tfd_w_val)}.csv",
)
df_results.to_csv(csv_path, index=False)
......@@ -133,7 +138,7 @@ def make_input_df(x, w_val):
pd.DataFrame: DataFrame with the appropriate number of tw columns
"""
# Create column names based on number of windows
columns = [f"tw{i+1}" for i in range(w_val)]
columns = [f"tw{i + 1}" for i in range(w_val)]
# Verify input length matches number of windows
if len(x) != w_val:
......@@ -216,16 +221,13 @@ def objective(x, models_exy, models_tfm, model_frame, w_val, b_val, tfd_w_val):
raw_vals = []
# --- 1. Equivalent strain maximization (negative sign) ---
for model_i, tw_i, i in zip(models_exy, x, range(w_val)):
for model_i, tw_i, i in zip(models_exy, x, range(w_val), strict=False):
# Calculate predicted equivalent strain squared multiplied by volume
pred_i = model_i.predict(inp)[0]
v_factor = get_window_v_factor(w_val, b_val, i)
if v_factor == 1.0 and w_val not in [2, 3, 5]:
print(
"Warning: Not considering window area for W not equal to "
"2, 3, or 5"
)
print("Warning: Not considering window area for W not equal to 2, 3, or 5")
raw_vals.append(pred_i * pred_i * tw_i)
else:
raw_vals.append(pred_i * pred_i * tw_i * 1e-3 * v_factor)
......@@ -241,10 +243,7 @@ def objective(x, models_exy, models_tfm, model_frame, w_val, b_val, tfd_w_val):
# --- 3. Penalize frame failure exceeding the limit (TFD_MAX) ---
frame_pred = model_frame.predict(inp)[0]
penalty_frame = (
(frame_pred - tfd_max) ** 3 if frame_pred > tfd_max
else abs(0.0)
)
penalty_frame = (frame_pred - tfd_max) ** 3 if frame_pred > tfd_max else abs(0.0)
# --- Combine all objective terms ---
total_score = score_exy + penalty_windows + penalty_frame
......@@ -260,7 +259,7 @@ def plot_3d_tw_pairs_at_optimum(
h_val,
tfd_w_val,
it_val,
output_dir="figures_3D"
output_dir="figures_3D",
):
"""
Generate a 3D surface plot for every pair (tw_i, tw_j), fixing the
......@@ -292,7 +291,6 @@ def plot_3d_tw_pairs_at_optimum(
for i in range(w_val):
for j in range(i + 1, w_val):
# Define variable ranges
tw_i_vals = np.linspace(bounds[i][0], bounds[i][1], 50)
tw_j_vals = np.linspace(bounds[j][0], bounds[j][1], 50)
......@@ -322,7 +320,7 @@ def plot_3d_tw_pairs_at_optimum(
z=z_vals,
colorscale="Viridis",
showscale=True,
name="Objective surface"
name="Objective surface",
)
]
)
......@@ -335,35 +333,36 @@ def plot_3d_tw_pairs_at_optimum(
z=[opt_z],
mode="markers",
marker={"size": 6, "color": "red"},
name="Optimum"
name="Optimum",
)
)
fig.update_layout(
title=(
f"RBF 3D Objective Surface – (tw{i+1}, tw{j+1}) "
f"RBF 3D Objective Surface – (tw{i + 1}, tw{j + 1}) "
f" at optimum for other windows width"
),
scene={
"xaxis_title": f"tw{i + 1} (mm)",
"yaxis_title": f"tw{j + 1} (mm)",
"zaxis_title": "Objective score"
"zaxis_title": "Objective score",
},
width=950,
height=850
height=850,
)
# Save HTML
fname_base = f"surface_tw{i+1}_tw{j+1}_B{b_val}_H{h_val}_TFD_W{int(tfd_w_val)}"
fname_base = (
f"surface_tw{i + 1}_tw{j + 1}_B{b_val}_H{h_val}_TFD_W{int(tfd_w_val)}"
)
html_path = f"{it_dir}/{fname_base}.html"
fig.write_html(html_path)
# Save PNG (optional if Kaleido installed)
try:
fig.write_image(html_path.replace(".html", ".png"),
width=1200, height=1000)
except Exception: # pylint: disable=broad-except
pass
with contextlib.suppress(Exception):
fig.write_image(
html_path.replace(".html", ".png"), width=1200, height=1000
)
print(f"Saved 3D RBF surface: {html_path}")
......@@ -401,7 +400,11 @@ def main():
bounds_vals = [(5, 14), (5, 14), (5, 14)] # tw1, tw2, tw3 between 5 and 14 mm
elif w_val == 5:
bounds_vals = [
(5, 12), (5, 12), (5, 12), (5, 12), (5, 12)
(5, 12),
(5, 12),
(5, 12),
(5, 12),
(5, 12),
] # tw1 to tw5 between 5 and 12 mm
else:
# General case for any W
......@@ -411,17 +414,24 @@ def main():
# Path to trained models
input_models_dir = os.path.join(
BASE_DIR,
"..", "..", "models", "width_optimization",
f"{w_val}W", "rbf_models",
f"per_output_models_B{b_val}_H{h_val}"
"..",
"..",
"models",
"width_optimization",
f"{w_val}W",
"rbf_models",
f"per_output_models_B{b_val}_H{h_val}",
)
# Output directory for optimization cruves
output_graphs_dir = os.path.join(
BASE_DIR,
"..", "..", "reports", "width_optimization",
"..",
"..",
"reports",
"width_optimization",
f"{w_val}W",
"rbf_optimization"
"rbf_optimization",
)
os.makedirs(output_graphs_dir, exist_ok=True)
......@@ -436,10 +446,11 @@ def main():
# Check Exy and TFM models for each window
for window_idx in range(w_val):
exy_name = f"rbf_exymax_tw{window_idx+1}.joblib"
tfm_name = f"rbf_tfmmax_tw{window_idx+1}.joblib"
if not os.path.exists(os.path.join(input_models_dir_it, exy_name)) or \
not os.path.exists(os.path.join(input_models_dir_it, tfm_name)):
exy_name = f"rbf_exymax_tw{window_idx + 1}.joblib"
tfm_name = f"rbf_tfmmax_tw{window_idx + 1}.joblib"
if not os.path.exists(
os.path.join(input_models_dir_it, exy_name)
) or not os.path.exists(os.path.join(input_models_dir_it, tfm_name)):
all_models_exist = False
break
......@@ -458,12 +469,12 @@ def main():
for window_idx in range(w_val):
# Load Exy models
exy_model_name = f"rbf_exymax_tw{window_idx+1}.joblib"
exy_model_name = f"rbf_exymax_tw{window_idx + 1}.joblib"
exy_model_path = os.path.join(input_models_dir_it, exy_model_name)
models_exy.append(joblib.load(exy_model_path))
# Load TFM models
tfm_model_name = f"rbf_tfmmax_tw{window_idx+1}.joblib"
tfm_model_name = f"rbf_tfmmax_tw{window_idx + 1}.joblib"
tfm_model_path = os.path.join(input_models_dir_it, tfm_model_name)
models_tfm.append(joblib.load(tfm_model_path))
......@@ -486,7 +497,7 @@ def main():
maxiter=500, # Maximum number of iterations
popsize=25, # Population size
tol=1e-6, # Tolerance for convergence
seed=42 # Random seed for reproducibility
seed=42, # Random seed for reproducibility
)
# Save results to CSV
......@@ -500,7 +511,7 @@ def main():
b_val,
h_val,
tfd_w_val,
it
it,
)
# Extract optimum for plotting
......@@ -510,7 +521,7 @@ def main():
print(f"\nResults for {w_val} windows optimization (Iteration {it}):")
print("Best solution found:")
for window_num, tw in enumerate(optimum):
print(f"tw{window_num+1} = {tw:.2f}")
print(f"tw{window_num + 1} = {tw:.2f}")
print(f"Objective score = {result.fun:.4f}")
# Calculate predictions for optimal solution
......@@ -530,7 +541,7 @@ def main():
h_val,
tfd_w_val,
it,
output_dir=output_graphs_dir
output_dir=output_graphs_dir,
)
......
......@@ -6,17 +6,25 @@ This module handles the training of RBF surrogate models and performs
Leave-One-Out (LOO) validation to assess their predictive performance.
It automates the process for multiple output variables.
"""
import os
import argparse
import os
import joblib
import numpy as np
import pandas as pd
from rbf_model import RBFModel
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def loo_validation_rbf(df_val, input_features, target_features,
function="multiquadric", smooth=0.0, epsilon=None):
def loo_validation_rbf(
df_val,
input_features,
target_features,
function="multiquadric",
smooth=0.0,
epsilon=None,
):
"""
Leave-One-Out (LOO) validation for RBF models.
......@@ -68,11 +76,7 @@ def loo_validation_rbf(df_val, input_features, target_features,
# Train RBF with N-1 samples
model = RBFModel(
x_train,
y_train,
function=function,
smooth=smooth,
epsilon=epsilon
x_train, y_train, function=function, smooth=smooth, epsilon=epsilon
)
pred_i = model.predict(x_test)[0]
......@@ -86,11 +90,10 @@ def loo_validation_rbf(df_val, input_features, target_features,
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
mae = mean_absolute_error(y_true, y_pred)
r2 = r2_score(y_true, y_pred)
rmse_std, rmse_cv, mae_std, mse_std = compute_error_dispersion(
y_true, y_pred
)
rmse_std, rmse_cv, mae_std, mse_std = compute_error_dispersion(y_true, y_pred)
results.append({
results.append(
{
"output": tgt,
"LOO_RMSE": rmse,
"LOO_MAE": mae,
......@@ -98,8 +101,9 @@ def loo_validation_rbf(df_val, input_features, target_features,
"LOO_RMSE_STD": rmse_std,
"LOO_RMSE_CV": rmse_cv,
"LOO_MAE_STD": mae_std,
"LOO_SQERR_STD": mse_std
})
"LOO_SQERR_STD": mse_std,
}
)
print(
f" LOO_RMSE = {rmse:.5f} | LOO_MAE = {mae:.5f} | "
......@@ -164,7 +168,9 @@ def main():
print("Warning: H not set for W != 2, 3, or 5")
# Path configuration
data_path = f"../../data/width_optimization/{w_val}W/rbf_FEMdata_B{b_val}_H{h_val}.csv"
data_path = (
f"../../data/width_optimization/{w_val}W/rbf_FEMdata_B{b_val}_H{h_val}.csv"
)
output_models_dir = (
f"../../models/width_optimization/{w_val}W/rbf_models/"
f"per_output_models_B{b_val}_H{h_val}"
......@@ -187,9 +193,9 @@ def main():
)
return
print(f"\n\n{'='*60}")
print(f"\n\n{'=' * 60}")
print(f"ITERATION {i_iter}: Training RBF with {n_curr} rows")
print(f"{'='*60}")
print(f"{'=' * 60}")
# Create iteration specific directory
it_dir = os.path.join(output_models_dir, f"it{i_iter}")
......@@ -197,7 +203,7 @@ def main():
df = df_tot.iloc[:n_curr].copy()
input_features = [f"tw{i+1}" for i in range(w_val)]
input_features = [f"tw{i + 1}" for i in range(w_val)]
target_features = [
c for c in df.columns if c not in input_features and c != "eyymax_tf"
]
......@@ -210,12 +216,7 @@ def main():
print(f"→ Training RBF for {tgt} ...")
y = df[tgt].values
rbf = RBFModel(
x_values, y,
function="multiquadric",
smooth=0.0,
epsilon=None
)
rbf = RBFModel(x_values, y, function="multiquadric", smooth=0.0, epsilon=None)
joblib.dump(rbf, os.path.join(it_dir, f"rbf_{tgt}.joblib"))
print(f" saved to it{i_iter}/rbf_{tgt}.joblib")
......@@ -224,8 +225,12 @@ def main():
# Execute LOO validation
# =============================================================
loo_results = loo_validation_rbf(
df, input_features, target_features,
function="multiquadric", smooth=0.0, epsilon=None
df,
input_features,
target_features,
function="multiquadric",
smooth=0.0,
epsilon=None,
)
loo_csv = os.path.join(it_dir, "rbf_LOO_results.csv")
......
[tool.ruff]
target-version = "py310"
line-length = 88
src = ["Code/src"]
[tool.ruff.lint]
select = [
"E",
"F",
"I",
"UP",
"B",
"SIM",
]
ignore = [
"E501",
]
[tool.ruff.lint.per-file-ignores]
"Code/src/hysteretic_curves/predict_hysteretic_curves.py" = ["F821"]
"Code/src/hysteretic_curves/predict_hysteretic_curves_1d_cnn.py" = ["F821"]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment