diff --git a/README.md b/README.md index b342ad4c..974eab7c 100644 --- a/README.md +++ b/README.md @@ -77,7 +77,7 @@ instruction for the installation on Ubuntu. - Using Docker container with GUI disabled ``` -docker run -it mdmitry1/python311-dev:latest +docker run -it mdmitry1/python313-dev:latest ``` Within docker container prepend SMLP Python script with `xvfb-run`. @@ -137,12 +137,12 @@ bin/test_install 1. Pull Docker container from the Docker repository: ``` - docker pull mdmitry1/python311-dev:latest + docker pull mdmitry1/python313-dev:latest ``` 2. Start Docker container: ``` - docker run -it mdmitry1/python311-dev:latest + docker run -it mdmitry1/python313-dev:latest ``` 3. Run the tool diff --git a/bin/enter b/bin/enter index 64367d48..a388cc8e 120000 --- a/bin/enter +++ b/bin/enter @@ -1 +1 @@ -../docker/python3.11/enter_released_container \ No newline at end of file +../docker/python3.13/enter_released_container \ No newline at end of file diff --git a/bin/enter_wslg b/bin/enter_wslg index ceb23d50..ddb45afc 120000 --- a/bin/enter_wslg +++ b/bin/enter_wslg @@ -1 +1 @@ -../docker/python3.11/enter_released_container_wslg \ No newline at end of file +../docker/python3.13/enter_released_container_wslg \ No newline at end of file diff --git a/bin/enter_x11 b/bin/enter_x11 index 86985aa5..fb34538d 120000 --- a/bin/enter_x11 +++ b/bin/enter_x11 @@ -1 +1 @@ -../docker/python3.11/enter_released_container_x11_forwarding \ No newline at end of file +../docker/python3.13/enter_released_container_x11_forwarding \ No newline at end of file diff --git a/bin/run_installation_test_expected.log b/bin/run_installation_test_expected.log index be751667..35f87597 100644 --- a/bin/run_installation_test_expected.log +++ b/bin/run_installation_test_expected.log @@ -1,12 +1,9 @@ Script is running inside a Docker container. -2026-02-08 10:39:55.640415: I external/local_tsl/tsl/cuda/cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. -2026-02-08 10:39:56.909453: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered -2026-02-08 10:39:56.909584: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered -2026-02-08 10:39:57.061866: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered -2026-02-08 10:39:57.645863: I external/local_tsl/tsl/cuda/cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. -2026-02-08 10:39:57.649740: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. +2026-02-08 17:56:24.208808: I external/local_xla/xla/tsl/cuda/cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. +2026-02-08 17:56:25.379711: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. -2026-02-08 10:40:00.557679: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT +2026-02-08 17:56:28.256185: I external/local_xla/xla/tsl/cuda/cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. +2026-02-08 17:56:29.904234: E external/local_xla/xla/stream_executor/cuda/cuda_platform.cc:51] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303) usage: smlp/src/run_smlp.py [-h] [-model MODEL] [-save_model SAVE_MODEL] [-use_model USE_MODEL] [-model_name MODEL_NAME] [-save_model_config SAVE_MODEL_RERUN_CONFIGURATION] @@ -74,15 +71,6 @@ usage: smlp/src/run_smlp.py [-h] [-model MODEL] [-save_model SAVE_MODEL] [-et_sklearn_max_leaf_nodes ET_SKLEARN_MAX_LEAF_NODES] [-et_sklearn_min_impurity_decrease ET_SKLEARN_MIN_IMPURITY_DECREASE] [-et_sklearn_ccp_alpha ET_SKLEARN_CCP_ALPHA] - [-setup_caret_session_id SETUP_CARET_SESSION_ID] - [-setup_caret_fold SETUP_CARET_FOLD] - [-setup_caret_data_split_shuffle SETUP_CARET_DATA_SPLIT_SHUFFLE] - [-setup_caret_verbose SETUP_CARET_VERBOSE] - [-model_caret_cross_validation MODEL_CARET_CROSS_VALIDATION] - [-model_caret_verbose MODEL_CARET_VERBOSE] - [-model_caret_return_train_score MODEL_CARET_RETURN_TRAIN_SCORE] - [-tuner_caret_search_algo TUNER_CARET_SEARCH_ALGORITHM] - [-tuner_caret_tuner_verbose TUNER_CARET_TUNER_VERBOSE] [-resp RESPONSE] [-feat FEATURES] [-keep_feat KEEP_FEATURES] [-new_data NEW_DATA] [-data_scaler DATA_SCALER] @@ -160,15 +148,15 @@ usage: smlp/src/run_smlp.py [-h] [-model MODEL] [-save_model SAVE_MODEL] options: -h, --help show this help message and exit - -model MODEL, --model MODEL + -model, --model MODEL Type of model to train (NN, Poly, ... [default: none] - -save_model SAVE_MODEL, --save_model SAVE_MODEL + -save_model, --save_model SAVE_MODEL Should the trained models be saved for future use? [default: True] - -use_model USE_MODEL, --use_model USE_MODEL + -use_model, --use_model USE_MODEL Should the saved models be reused (and training skipped)? [default: False] - -model_name MODEL_NAME, --model_name MODEL_NAME + -model_name, --model_name MODEL_NAME Name of saved model. If not specified, the name is defined as follows: filename_prefix + "_" + model_algo + "_model_complete" + model_format where @@ -177,43 +165,43 @@ options: model_algo is the training algo name and model_format is .h5 for nn_keras and .pkl for models trained using sklearn and keras packages. - -save_model_config SAVE_MODEL_RERUN_CONFIGURATION, --save_model_rerun_configuration SAVE_MODEL_RERUN_CONFIGURATION + -save_model_config, --save_model_rerun_configuration SAVE_MODEL_RERUN_CONFIGURATION Should a config file enabling to re-run a saved model be written out? [default: True] - -model_per_response MODEL_PER_RESPONSE, --model_per_response MODEL_PER_RESPONSE + -model_per_response, --model_per_response MODEL_PER_RESPONSE Should a separate model, possible with a different, dedicated feature set, be built per response (as opposite to building one multi-response model)?[default: False] - -pred_plots PREDICTION_PLOTS, --prediction_plots PREDICTION_PLOTS + -pred_plots, --prediction_plots PREDICTION_PLOTS Should response distribution plots and plots comparing response values in data with the predicted values be generated? A related option interactive_plots controls whether the generated plots should be displayed interactively during runtime [default: True] - -nn_keras_layers NN_KERAS_LAYERS, --nn_keras_layers NN_KERAS_LAYERS + -nn_keras_layers, --nn_keras_layers NN_KERAS_LAYERS specify number and sizes of the hidden layers of the NN as non-empty, comma-separated list of positive fractions in the number of input features in, e.g. "0.5,0.25" specifies the second layer of half input size, third layer of quarter input size (the input layer has one node per input) [default: 2,1] - -nn_keras_epochs NN_KERAS_EPOCHS, --nn_keras_epochs NN_KERAS_EPOCHS + -nn_keras_epochs, --nn_keras_epochs NN_KERAS_EPOCHS epochs for NN [default: 2000] - -nn_keras_batch NN_KERAS_BATCH_SIZE, --nn_keras_batch_size NN_KERAS_BATCH_SIZE + -nn_keras_batch, --nn_keras_batch_size NN_KERAS_BATCH_SIZE batch_size for NN [default: not exposed] - -nn_keras_optimizer NN_KERAS_OPTIMIZER, --nn_keras_optimizer NN_KERAS_OPTIMIZER + -nn_keras_optimizer, --nn_keras_optimizer NN_KERAS_OPTIMIZER optimizer for NN [default: adam] - -nn_keras_learning_rate NN_KERAS_LEARNING_RATE, --nn_keras_learning_rate NN_KERAS_LEARNING_RATE + -nn_keras_learning_rate, --nn_keras_learning_rate NN_KERAS_LEARNING_RATE optimizer for NN [default: 0.001] - -nn_keras_loss NN_KERAS_LOSS_FUNCTION, --nn_keras_loss_function NN_KERAS_LOSS_FUNCTION + -nn_keras_loss, --nn_keras_loss_function NN_KERAS_LOSS_FUNCTION The loss function for NN training convergence. Possible options are: "mse" (MeanSquaredError), "mae" (MeanAbsoluteError), "mspe" (MeanAbsolutePercentageError) "msle" (MeanSquaredLogarithmicError), "huber" (Huber), "logcosh" (LogCosh) [default: mse] - -nn_keras_metrics NN_KERAS_METRICS, --nn_keras_metrics NN_KERAS_METRICS + -nn_keras_metrics, --nn_keras_metrics NN_KERAS_METRICS The metrics for NN training convergence. Possible options are: "rmse (RootMeanSquaredError), "mse" (MeanSquaredError), "mae" (MeanAbsoluteError), "mspe" @@ -221,68 +209,68 @@ options: (MeanSquaredLogarithmicError), "logcosh" (LogCoshError), and "cosine" (CosineSimilarity) [default: ['mse']] - -nn_keras_hid_activation NN_KERAS_HID_ACTIVATION, --nn_keras_hid_activation NN_KERAS_HID_ACTIVATION + -nn_keras_hid_activation, --nn_keras_hid_activation NN_KERAS_HID_ACTIVATION hidden layer activation for NN [default: relu] - -nn_keras_out_activation NN_KERAS_OUT_ACTIVATION, --nn_keras_out_activation NN_KERAS_OUT_ACTIVATION + -nn_keras_out_activation, --nn_keras_out_activation NN_KERAS_OUT_ACTIVATION output layer activation for NN [default: linear] - -nn_keras_seq_api NN_KERAS_SEQUENTIAL_API, --nn_keras_sequential_api NN_KERAS_SEQUENTIAL_API + -nn_keras_seq_api, --nn_keras_sequential_api NN_KERAS_SEQUENTIAL_API Should sequential api be used building NN layers or should functional api be used instead? [default: True] - -nn_keras_weights_precision NN_KERAS_WEIGHTS_PRECISION, --nn_keras_weights_precision NN_KERAS_WEIGHTS_PRECISION + -nn_keras_weights_precision, --nn_keras_weights_precision NN_KERAS_WEIGHTS_PRECISION Decimal precison (theat is, decimal points after the dot) to use for rounding model weights (after a NN model has been trained). The default value {} implies that weight will not be rounded [default: linear] - -nn_keras_tuner NN_KERAS_TUNER_ALGO, --nn_keras_tuner_algo NN_KERAS_TUNER_ALGO + -nn_keras_tuner, --nn_keras_tuner_algo NN_KERAS_TUNER_ALGO NN Keras tuner algorithm to be invoked. Supported options are hyperband (Hyperband), bayesian (BayesianOptimization) and random (RandomSearch). The option value None indicates that keras tuner will not be invoked [default: None] - -nn_keras_layers_grid NN_KERAS_LAYERS_GRID, --nn_keras_layers_grid NN_KERAS_LAYERS_GRID + -nn_keras_layers_grid, --nn_keras_layers_grid NN_KERAS_LAYERS_GRID Semicolon separated list of NN Keras layers specifications, to be used by Keras tuner. Each such specification itself is a comma separated list of numbers, see the layers options for a detailed description [default: None] - -nn_keras_batches_grid NN_KERAS_BATCHES_GRID, --nn_keras_batches_grid NN_KERAS_BATCHES_GRID + -nn_keras_batches_grid, --nn_keras_batches_grid NN_KERAS_BATCHES_GRID Comma separated list of NN Keras batch sizes, to be used by Keras tuner. [default: None] - -nn_keras_lrates_grid NN_KERAS_LEARNING_RATES_GRID, --nn_keras_learning_rates_grid NN_KERAS_LEARNING_RATES_GRID + -nn_keras_lrates_grid, --nn_keras_learning_rates_grid NN_KERAS_LEARNING_RATES_GRID Comma separated list of NN Keras learning rates, to be used by Keras tuner. [default: None] - -nn_keras_losses_grid NN_KERAS_LOSS_FUNCTIONS_GRID, --nn_keras_loss_functions_grid NN_KERAS_LOSS_FUNCTIONS_GRID + -nn_keras_losses_grid, --nn_keras_loss_functions_grid NN_KERAS_LOSS_FUNCTIONS_GRID Comma separated list of NN Keras loss functions, to be used by Keras tuner. It can be a subset of loss functions mse, mae, mape, msle, huber, logcosh. [default: None] - -poly_sklearn_degree POLY_SKLEARN_DEGREE, --poly_sklearn_degree POLY_SKLEARN_DEGREE + -poly_sklearn_degree, --poly_sklearn_degree POLY_SKLEARN_DEGREE Degree of the polynomial to train [default: 2] - -poly_sklearn_fit_intercept POLY_SKLEARN_FIT_INTERCEPT, --poly_sklearn_fit_intercept POLY_SKLEARN_FIT_INTERCEPT + -poly_sklearn_fit_intercept, --poly_sklearn_fit_intercept POLY_SKLEARN_FIT_INTERCEPT Whether to calculate the intercept for this model. If set to False, no intercept will be used in calculations (i.e. data is expected to be centered). [default: True] - -poly_sklearn_copy_X POLY_SKLEARN_COPY_X, --poly_sklearn_copy_X POLY_SKLEARN_COPY_X + -poly_sklearn_copy_X, --poly_sklearn_copy_X POLY_SKLEARN_COPY_X If True, X will be copied; else, it may be overwritten. [default: True] - -poly_sklearn_n_jobs POLY_SKLEARN_N_JOBS, --poly_sklearn_n_jobs POLY_SKLEARN_N_JOBS + -poly_sklearn_n_jobs, --poly_sklearn_n_jobs POLY_SKLEARN_N_JOBS The number of jobs to use for the computation. This will only provide speedup in case of sufficiently large problems, that is if firstly n_targets > 1 and secondly X is sparse or if positive is set to True. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors [default: None] - -poly_sklearn_positive POLY_SKLEARN_POSITIVE, --poly_sklearn_positive POLY_SKLEARN_POSITIVE + -poly_sklearn_positive, --poly_sklearn_positive POLY_SKLEARN_POSITIVE When set to True, forces the coefficients to be positive. This option is only supported for dense arrays. [default: False] - -dt_sklearn_splitter DT_SKLEARN_SPLITTER, --dt_sklearn_splitter DT_SKLEARN_SPLITTER + -dt_sklearn_splitter, --dt_sklearn_splitter DT_SKLEARN_SPLITTER The strategy used to choose the split at each node. Supported strategies are “best” to choose the best split and “random” to choose the best random split [default: best] - -dt_sklearn_max_features DT_SKLEARN_MAX_FEATURES, --dt_sklearn_max_features DT_SKLEARN_MAX_FEATURES + -dt_sklearn_max_features, --dt_sklearn_max_features DT_SKLEARN_MAX_FEATURES The number of features to consider when looking for the best split: If int, then consider max_features features at each split. If float, max_features is a @@ -291,7 +279,7 @@ options: split. If “sqrt”, then max_features=sqrt(n_features). If “log2”, then max_features=log2(n_features). If None, then max_features=n_features. [default: None] - -dt_sklearn_rand_state DT_SKLEARN_RANDOM_STATE, --dt_sklearn_random_state DT_SKLEARN_RANDOM_STATE + -dt_sklearn_rand_state, --dt_sklearn_random_state DT_SKLEARN_RANDOM_STATE Controls the randomness of the estimator. The features are always randomly permuted at each split, even if splitter is set to "best". When max_features < @@ -304,7 +292,7 @@ options: selected at random. To obtain a deterministic behaviour during fitting, random_state has to be fixed to an integer. [default: None] - -dt_sklearn_criterion DT_SKLEARN_CRITERION, --dt_sklearn_criterion DT_SKLEARN_CRITERION + -dt_sklearn_criterion, --dt_sklearn_criterion DT_SKLEARN_CRITERION The function to measure the quality of a split. Supported criteria are “squared_error” for the mean squared error, which is equal to variance reduction as @@ -318,35 +306,35 @@ options: Poisson deviance to find splits. Training using “absolute_error” is slower than when using “squared_error”. [default: squared_error] - -dt_sklearn_max_depth DT_SKLEARN_MAX_DEPTH, --dt_sklearn_max_depth DT_SKLEARN_MAX_DEPTH + -dt_sklearn_max_depth, --dt_sklearn_max_depth DT_SKLEARN_MAX_DEPTH The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. [default: None] - -dt_sklearn_min_samples_split DT_SKLEARN_MIN_SAMPLES_SPLIT, --dt_sklearn_min_samples_split DT_SKLEARN_MIN_SAMPLES_SPLIT + -dt_sklearn_min_samples_split, --dt_sklearn_min_samples_split DT_SKLEARN_MIN_SAMPLES_SPLIT The minimum number of samples required to split an internal node.If int, then consider min_samples_split as the minimum number. If float, min_samples_split is a fraction and ceil(min_samples_split * n_samples) is the minimum number of samples for each split. [default: 2] - -dt_sklearn_min_samples_leaf DT_SKLEARN_MIN_SAMPLES_LEAF, --dt_sklearn_min_samples_leaf DT_SKLEARN_MIN_SAMPLES_LEAF + -dt_sklearn_min_samples_leaf, --dt_sklearn_min_samples_leaf DT_SKLEARN_MIN_SAMPLES_LEAF The minimum number of samples required to be at a leaf node. If int, then consider min_samples_leaf as the minimum number. If float, min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) is the minimum number of samples for each node. [default: 1] - -dt_sklearn_min_weight_fraction_leaf DT_SKLEARN_MIN_WEIGHT_FRACTION_LEAF, --dt_sklearn_min_weight_fraction_leaf DT_SKLEARN_MIN_WEIGHT_FRACTION_LEAF + -dt_sklearn_min_weight_fraction_leaf, --dt_sklearn_min_weight_fraction_leaf DT_SKLEARN_MIN_WEIGHT_FRACTION_LEAF The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. [default: 0.0] - -dt_sklearn_max_leaf_nodes DT_SKLEARN_MAX_LEAF_NODES, --dt_sklearn_max_leaf_nodes DT_SKLEARN_MAX_LEAF_NODES + -dt_sklearn_max_leaf_nodes, --dt_sklearn_max_leaf_nodes DT_SKLEARN_MAX_LEAF_NODES Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes [default: None] - -dt_sklearn_min_impurity_decrease DT_SKLEARN_MIN_IMPURITY_DECREASE, --dt_sklearn_min_impurity_decrease DT_SKLEARN_MIN_IMPURITY_DECREASE + -dt_sklearn_min_impurity_decrease, --dt_sklearn_min_impurity_decrease DT_SKLEARN_MIN_IMPURITY_DECREASE A node will be split if this split induces a decrease of the impurity greater than or equal to this value N_t / N * (impurity - N_t_R / N_t * right_impurity - @@ -357,14 +345,14 @@ options: right child. N, N_t, N_t_R and N_t_L all refer to the weighted sum, if sample_weight is passed. [default: 0.0] - -dt_sklearn_ccp_alpha DT_SKLEARN_CCP_ALPHA, --dt_sklearn_ccp_alpha DT_SKLEARN_CCP_ALPHA + -dt_sklearn_ccp_alpha, --dt_sklearn_ccp_alpha DT_SKLEARN_CCP_ALPHA Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed. [default: 0.0] - -rf_sklearn_n_estimators RF_SKLEARN_N_ESTIMATORS, --rf_sklearn_n_estimators RF_SKLEARN_N_ESTIMATORS + -rf_sklearn_n_estimators, --rf_sklearn_n_estimators RF_SKLEARN_N_ESTIMATORS The number of trees in the forest. [default: 100] - -rf_sklearn_max_features RF_SKLEARN_MAX_FEATURES, --rf_sklearn_max_features RF_SKLEARN_MAX_FEATURES + -rf_sklearn_max_features, --rf_sklearn_max_features RF_SKLEARN_MAX_FEATURES The number of features to consider when looking for the best split: If int, then consider max_features features at each split. If float, max_features is a @@ -375,19 +363,19 @@ options: max_features=sqrt(n_features). If “log2”, then max_features=log2(n_features). If None or 1.0, then max_features=n_features. [default: 1.0] - -rf_sklearn_bootstrap RF_SKLEARN_BOOTSTRAP, --rf_sklearn_bootstrap RF_SKLEARN_BOOTSTRAP + -rf_sklearn_bootstrap, --rf_sklearn_bootstrap RF_SKLEARN_BOOTSTRAP Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree [default: True] - -rf_sklearn_verbose RF_SKLEARN_VERBOSE, --rf_sklearn_verbose RF_SKLEARN_VERBOSE + -rf_sklearn_verbose, --rf_sklearn_verbose RF_SKLEARN_VERBOSE Controls the verbosity when fitting and predicting. [default: 0] - -rf_sklearn_warm_start RF_SKLEARN_WARM_START, --rf_sklearn_warm_start RF_SKLEARN_WARM_START + -rf_sklearn_warm_start, --rf_sklearn_warm_start RF_SKLEARN_WARM_START When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest [default: False] - -rf_sklearn_max_samples RF_SKLEARN_MAX_SAMPLES, --rf_sklearn_max_samples RF_SKLEARN_MAX_SAMPLES + -rf_sklearn_max_samples, --rf_sklearn_max_samples RF_SKLEARN_MAX_SAMPLES If bootstrap is True, the number of samples to draw from X to train each base estimator. If None (default), then draw X.shape[0] samples. If int, then @@ -395,13 +383,13 @@ options: max(round(n_samples * max_samples), 1) samples. Thus, max_samples should be in the interval (0.0, 1.0]. [default: None] - -rf_sklearn_rand_state RF_SKLEARN_RANDOM_STATE, --rf_sklearn_random_state RF_SKLEARN_RANDOM_STATE + -rf_sklearn_rand_state, --rf_sklearn_random_state RF_SKLEARN_RANDOM_STATE Controls both the randomness of the bootstrapping of the samples used when building trees (if bootstrap=True) and the sampling of the features to consider when looking for the best split at each node (if max_features < n_features). [default: None] - -rf_sklearn_criterion RF_SKLEARN_CRITERION, --rf_sklearn_criterion RF_SKLEARN_CRITERION + -rf_sklearn_criterion, --rf_sklearn_criterion RF_SKLEARN_CRITERION The function to measure the quality of a split. Supported criteria are “squared_error” for the mean squared error, which is equal to variance reduction as @@ -415,35 +403,35 @@ options: Poisson deviance to find splits. Training using “absolute_error” is slower than when using “squared_error”. [default: squared_error] - -rf_sklearn_max_depth RF_SKLEARN_MAX_DEPTH, --rf_sklearn_max_depth RF_SKLEARN_MAX_DEPTH + -rf_sklearn_max_depth, --rf_sklearn_max_depth RF_SKLEARN_MAX_DEPTH The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. [default: None] - -rf_sklearn_min_samples_split RF_SKLEARN_MIN_SAMPLES_SPLIT, --rf_sklearn_min_samples_split RF_SKLEARN_MIN_SAMPLES_SPLIT + -rf_sklearn_min_samples_split, --rf_sklearn_min_samples_split RF_SKLEARN_MIN_SAMPLES_SPLIT The minimum number of samples required to split an internal node.If int, then consider min_samples_split as the minimum number. If float, min_samples_split is a fraction and ceil(min_samples_split * n_samples) is the minimum number of samples for each split. [default: 2] - -rf_sklearn_min_samples_leaf RF_SKLEARN_MIN_SAMPLES_LEAF, --rf_sklearn_min_samples_leaf RF_SKLEARN_MIN_SAMPLES_LEAF + -rf_sklearn_min_samples_leaf, --rf_sklearn_min_samples_leaf RF_SKLEARN_MIN_SAMPLES_LEAF The minimum number of samples required to be at a leaf node. If int, then consider min_samples_leaf as the minimum number. If float, min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) is the minimum number of samples for each node. [default: 1] - -rf_sklearn_min_weight_fraction_leaf RF_SKLEARN_MIN_WEIGHT_FRACTION_LEAF, --rf_sklearn_min_weight_fraction_leaf RF_SKLEARN_MIN_WEIGHT_FRACTION_LEAF + -rf_sklearn_min_weight_fraction_leaf, --rf_sklearn_min_weight_fraction_leaf RF_SKLEARN_MIN_WEIGHT_FRACTION_LEAF The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. [default: 0.0] - -rf_sklearn_max_leaf_nodes RF_SKLEARN_MAX_LEAF_NODES, --rf_sklearn_max_leaf_nodes RF_SKLEARN_MAX_LEAF_NODES + -rf_sklearn_max_leaf_nodes, --rf_sklearn_max_leaf_nodes RF_SKLEARN_MAX_LEAF_NODES Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes [default: None] - -rf_sklearn_min_impurity_decrease RF_SKLEARN_MIN_IMPURITY_DECREASE, --rf_sklearn_min_impurity_decrease RF_SKLEARN_MIN_IMPURITY_DECREASE + -rf_sklearn_min_impurity_decrease, --rf_sklearn_min_impurity_decrease RF_SKLEARN_MIN_IMPURITY_DECREASE A node will be split if this split induces a decrease of the impurity greater than or equal to this value N_t / N * (impurity - N_t_R / N_t * right_impurity - @@ -454,14 +442,14 @@ options: right child. N, N_t, N_t_R and N_t_L all refer to the weighted sum, if sample_weight is passed. [default: 0.0] - -rf_sklearn_ccp_alpha RF_SKLEARN_CCP_ALPHA, --rf_sklearn_ccp_alpha RF_SKLEARN_CCP_ALPHA + -rf_sklearn_ccp_alpha, --rf_sklearn_ccp_alpha RF_SKLEARN_CCP_ALPHA Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed. [default: 0.0] - -et_sklearn_n_estimators ET_SKLEARN_N_ESTIMATORS, --et_sklearn_n_estimators ET_SKLEARN_N_ESTIMATORS + -et_sklearn_n_estimators, --et_sklearn_n_estimators ET_SKLEARN_N_ESTIMATORS The number of trees in the forest. [default: 100] - -et_sklearn_max_features ET_SKLEARN_MAX_FEATURES, --et_sklearn_max_features ET_SKLEARN_MAX_FEATURES + -et_sklearn_max_features, --et_sklearn_max_features ET_SKLEARN_MAX_FEATURES The number of features to consider when looking for the best split: If int, then consider max_features features at each split. If float, max_features is a @@ -472,19 +460,19 @@ options: max_features=sqrt(n_features). If “log2”, then max_features=log2(n_features). If None, then max_features=n_features. [default: 1.0] - -et_sklearn_bootstrap ET_SKLEARN_BOOTSTRAP, --et_sklearn_bootstrap ET_SKLEARN_BOOTSTRAP + -et_sklearn_bootstrap, --et_sklearn_bootstrap ET_SKLEARN_BOOTSTRAP Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree [default: True] - -et_sklearn_verbose ET_SKLEARN_VERBOSE, --et_sklearn_verbose ET_SKLEARN_VERBOSE + -et_sklearn_verbose, --et_sklearn_verbose ET_SKLEARN_VERBOSE Controls the verbosity when fitting and predicting. [default: 0] - -et_sklearn_warm_start ET_SKLEARN_WARM_START, --et_sklearn_warm_start ET_SKLEARN_WARM_START + -et_sklearn_warm_start, --et_sklearn_warm_start ET_SKLEARN_WARM_START When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest [default: False] - -et_sklearn_max_samples ET_SKLEARN_MAX_SAMPLES, --et_sklearn_max_samples ET_SKLEARN_MAX_SAMPLES + -et_sklearn_max_samples, --et_sklearn_max_samples ET_SKLEARN_MAX_SAMPLES If bootstrap is True, the number of samples to draw from X to train each base estimator. If None (default), then draw X.shape[0] samples. If int, then @@ -492,13 +480,13 @@ options: max(round(n_samples * max_samples), 1) samples. Thus, max_samples should be in the interval (0.0, 1.0]. [default: None] - -et_sklearn_rand_state ET_SKLEARN_RANDOM_STATE, --et_sklearn_random_state ET_SKLEARN_RANDOM_STATE + -et_sklearn_rand_state, --et_sklearn_random_state ET_SKLEARN_RANDOM_STATE Used to pick randomly the max_features used at each split. Note that the mere presence of random_state doesn’t mean that randomization is always used, as it may be dependent on another parameter, e.g. shuffle, being set. [default: None] - -et_sklearn_criterion ET_SKLEARN_CRITERION, --et_sklearn_criterion ET_SKLEARN_CRITERION + -et_sklearn_criterion, --et_sklearn_criterion ET_SKLEARN_CRITERION The function to measure the quality of a split. Supported criteria are “squared_error” for the mean squared error, which is equal to variance reduction as @@ -512,35 +500,35 @@ options: Poisson deviance to find splits. Training using “absolute_error” is slower than when using “squared_error”. [default: squared_error] - -et_sklearn_max_depth ET_SKLEARN_MAX_DEPTH, --et_sklearn_max_depth ET_SKLEARN_MAX_DEPTH + -et_sklearn_max_depth, --et_sklearn_max_depth ET_SKLEARN_MAX_DEPTH The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. [default: None] - -et_sklearn_min_samples_split ET_SKLEARN_MIN_SAMPLES_SPLIT, --et_sklearn_min_samples_split ET_SKLEARN_MIN_SAMPLES_SPLIT + -et_sklearn_min_samples_split, --et_sklearn_min_samples_split ET_SKLEARN_MIN_SAMPLES_SPLIT The minimum number of samples required to split an internal node.If int, then consider min_samples_split as the minimum number. If float, min_samples_split is a fraction and ceil(min_samples_split * n_samples) is the minimum number of samples for each split. [default: 2] - -et_sklearn_min_samples_leaf ET_SKLEARN_MIN_SAMPLES_LEAF, --et_sklearn_min_samples_leaf ET_SKLEARN_MIN_SAMPLES_LEAF + -et_sklearn_min_samples_leaf, --et_sklearn_min_samples_leaf ET_SKLEARN_MIN_SAMPLES_LEAF The minimum number of samples required to be at a leaf node. If int, then consider min_samples_leaf as the minimum number. If float, min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) is the minimum number of samples for each node. [default: 1] - -et_sklearn_min_weight_fraction_leaf ET_SKLEARN_MIN_WEIGHT_FRACTION_LEAF, --et_sklearn_min_weight_fraction_leaf ET_SKLEARN_MIN_WEIGHT_FRACTION_LEAF + -et_sklearn_min_weight_fraction_leaf, --et_sklearn_min_weight_fraction_leaf ET_SKLEARN_MIN_WEIGHT_FRACTION_LEAF The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. [default: 0.0] - -et_sklearn_max_leaf_nodes ET_SKLEARN_MAX_LEAF_NODES, --et_sklearn_max_leaf_nodes ET_SKLEARN_MAX_LEAF_NODES + -et_sklearn_max_leaf_nodes, --et_sklearn_max_leaf_nodes ET_SKLEARN_MAX_LEAF_NODES Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes [default: None] - -et_sklearn_min_impurity_decrease ET_SKLEARN_MIN_IMPURITY_DECREASE, --et_sklearn_min_impurity_decrease ET_SKLEARN_MIN_IMPURITY_DECREASE + -et_sklearn_min_impurity_decrease, --et_sklearn_min_impurity_decrease ET_SKLEARN_MIN_IMPURITY_DECREASE A node will be split if this split induces a decrease of the impurity greater than or equal to this value N_t / N * (impurity - N_t_R / N_t * right_impurity - @@ -551,100 +539,58 @@ options: right child. N, N_t, N_t_R and N_t_L all refer to the weighted sum, if sample_weight is passed. [default: 0.0] - -et_sklearn_ccp_alpha ET_SKLEARN_CCP_ALPHA, --et_sklearn_ccp_alpha ET_SKLEARN_CCP_ALPHA + -et_sklearn_ccp_alpha, --et_sklearn_ccp_alpha ET_SKLEARN_CCP_ALPHA Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed. [default: 0.0] - -setup_caret_session_id SETUP_CARET_SESSION_ID, --setup_caret_session_id SETUP_CARET_SESSION_ID - Controls the randomness of experiment. It is - equivalent to ‘random_state’ in scikit-learn. When - None, a pseudo random number is generated. This can be - used for later reproducibility of the entire - experiment [default: None] - -setup_caret_fold SETUP_CARET_FOLD, --setup_caret_fold SETUP_CARET_FOLD - Controls cross-validation. If None, the CV generator - in the fold_strategy parameter of the setup function - is used. When an integer is passed, it is interpreted - as the ‘n_splits’ parameter of the CV generator in the - setup function. [default: 0] - -setup_caret_data_split_shuffle SETUP_CARET_DATA_SPLIT_SHUFFLE, --setup_caret_data_split_shuffle SETUP_CARET_DATA_SPLIT_SHUFFLE - When set to False, prevents shuffling of rows during - ‘train_test_split’. [default: True] - -setup_caret_verbose SETUP_CARET_VERBOSE, --setup_caret_verbose SETUP_CARET_VERBOSE - When set to False, Information grid is not printed. - [default: True] - -model_caret_cross_validation MODEL_CARET_CROSS_VALIDATION, --model_caret_cross_validation MODEL_CARET_CROSS_VALIDATION - When set to False, metrics are evaluated on holdout - set. fold param is ignored when cross_validation is - set to False. [default: True] - -model_caret_verbose MODEL_CARET_VERBOSE, --model_caret_verbose MODEL_CARET_VERBOSE - Score grid is not printed when verbose is set to - False. [default: True] - -model_caret_return_train_score MODEL_CARET_RETURN_TRAIN_SCORE, --model_caret_return_train_score MODEL_CARET_RETURN_TRAIN_SCORE - If False, returns the CV Validation scores only. If - True, returns the CV training scores along with the CV - validation scores. This is useful when the user wants - to do bias-variance tradeoff. A high CV training score - with a low corresponding CV validation score indicates - overfitting. [default: False] - -tuner_caret_search_algo TUNER_CARET_SEARCH_ALGORITHM, --tuner_caret_search_algorithm TUNER_CARET_SEARCH_ALGORITHM - The search algorithm depends on the search_library - parameter. If None, will use search library-specific - default algorithm. Other possible values are ‘random’ - : random grid search (default) and ‘grid’ : grid - search [default: random] - -tuner_caret_tuner_verbose TUNER_CARET_TUNER_VERBOSE, --tuner_caret_tuner_verbose TUNER_CARET_TUNER_VERBOSE - If True or above 0, will print messages from the - tuner. Ignored when verbose param is False. [default: - True] - -resp RESPONSE, --response RESPONSE + -resp, --response RESPONSE Names of response variables, must be provided [default None] - -feat FEATURES, --features FEATURES + -feat, --features FEATURES Names of input features (can be computed from data) [default None] - -keep_feat KEEP_FEATURES, --keep_features KEEP_FEATURES + -keep_feat, --keep_features KEEP_FEATURES Names of input features that should be used in model training: feature selection or other heuristics for selecting features that will be used in model training cannot drop these input features [default []] - -new_data NEW_DATA, --new_data NEW_DATA + -new_data, --new_data NEW_DATA Path excluding the .csv suffix to new data file [default: None] - -data_scaler DATA_SCALER, --data_scaler DATA_SCALER + -data_scaler, --data_scaler DATA_SCALER Should features and responses be scaled and with which scaling optionton? Value "none" implies no scaling; the only other supported option in "min_max" scaler [default: min_max] - -scale_feat SCALE_FEATURES, --scale_features SCALE_FEATURES + -scale_feat, --scale_features SCALE_FEATURES Should features be scaled using scaler specified through option "data_scaler"? [default: True] - -scale_resp SCALE_RESPONSES, --scale_responses SCALE_RESPONSES + -scale_resp, --scale_responses SCALE_RESPONSES Should responses be scaled using scaler specified through option "data_scaler"? [default: True] - -impute_resp IMPUTE_RESPONSES, --impute_responses IMPUTE_RESPONSES + -impute_resp, --impute_responses IMPUTE_RESPONSES Should missing values in responses be imputed? Might make sense when there are multiple responses and different responses have missing values in different samples: this might be a better alternative compared to dropping rows where at least one response has a missing value [default: False] - -split SPLIT_TEST, --split_test SPLIT_TEST + -split, --split_test SPLIT_TEST Fraction in (0,1] of data samples to split from training data for testing; when the option value is 1,the dataset will be used both for training and testing [default: 0.2] - -train_rand TRAIN_RANDOM_N, --train_random_n TRAIN_RANDOM_N + -train_rand, --train_random_n TRAIN_RANDOM_N Subset random n rows from training data to use for training [default: 0] - -train_first TRAIN_FIRST_N, --train_first_n TRAIN_FIRST_N + -train_first, --train_first_n TRAIN_FIRST_N Subset first n rows from training data to use for training [default: 0] - -train_unif TRAIN_UNIFORM_N, --train_uniform_n TRAIN_UNIFORM_N + -train_unif, --train_uniform_n TRAIN_UNIFORM_N Subset random n rows from training data with close to uniform distribution to use for training [default: 0] - -sw_coef SAMPLE_WEIGHTS_COEF, --sample_weights_coef SAMPLE_WEIGHTS_COEF + -sw_coef, --sample_weights_coef SAMPLE_WEIGHTS_COEF Coefficient in range ]-1, 1[ to compute sample weights for model training; weights are defined as [sw_coef * (v - mid_range) + 1 for v in resp_vals] where @@ -658,7 +604,7 @@ options: max(resp_vals) and weight=0.8 to samples with min(resp_vals), and sw_coef = 0 implies weight=1 for each sample [default: 0] - -sw_exp SAMPLE_WEIGHTS_EXPONENT, --sample_weights_exponent SAMPLE_WEIGHTS_EXPONENT + -sw_exp, --sample_weights_exponent SAMPLE_WEIGHTS_EXPONENT The Exponent to compute sample weights for model training; weights are defined as [sw_int + sw_coef *((v - mn)/(mx-mn))**sw_exp for v in resp_vals ] where @@ -667,7 +613,7 @@ options: are respectively the min and max of resp_vals. The value of sw_coef is chosen non-negative to make sure all weights are non-negative [default: 0] - -sw_int SAMPLE_WEIGHTS_INTERCEPT, --sample_weights_intercept SAMPLE_WEIGHTS_INTERCEPT + -sw_int, --sample_weights_intercept SAMPLE_WEIGHTS_INTERCEPT The intercept to compute sample weights for model training; weights are defined as [sw_int + sw_coef *((v - mn)/(mx-mn))**sw_exp for v in resp_vals ] where @@ -676,7 +622,7 @@ options: are respectively the min and max of resp_vals. The value of sw_coef is chosen non-negative to make sure all weights are non-negative [default: 0] - -respmap RESPONSE_MAP, --response_map RESPONSE_MAP + -respmap, --response_map RESPONSE_MAP Python expression with just one variable x to be applied as lambda function to the values of each of the responses, as part of preprocessing. This @@ -685,7 +631,7 @@ options: responsibility to ensure these two transformations in the described order achieve the right transformation of the response columns [default: None] - -resp2b RESPONSE_TO_BOOL, --response_to_bool RESPONSE_TO_BOOL + -resp2b, --response_to_bool RESPONSE_TO_BOOL Semicolon seperated list of conditions to be applied to the responses in the order the responses are specified, to convert them into binary responses as @@ -695,77 +641,76 @@ options: response that has value 1 for each data sample (row) where resp1 is greater than 5 and value 0 for the remaining samples [default: None] - -pos_val POSITIVE_VALUE, --positive_value POSITIVE_VALUE + -pos_val, --positive_value POSITIVE_VALUE Value that represents positive values in a binary categorical response in the original input data (before any data processing has been applied) [default: 1] - -neg_val NEGATIVE_VALUE, --negative_value NEGATIVE_VALUE + -neg_val, --negative_value NEGATIVE_VALUE Value that represents negative values in a binary categorical response in the original input data (before any data processing has been applied) [default: 0] - -resp_plots RESPONSE_PLOTS, --response_plots RESPONSE_PLOTS + -resp_plots, --response_plots RESPONSE_PLOTS Should response value distribution plots be genrated during data processing? A related option interactive_plots controls whether the generated plots should be displayed interactively during runtime [default: True] - -mrmr_pred MRMR_FEAT_COUNT_FOR_PREDICTION, --mrmr_feat_count_for_prediction MRMR_FEAT_COUNT_FOR_PREDICTION + -mrmr_pred, --mrmr_feat_count_for_prediction MRMR_FEAT_COUNT_FOR_PREDICTION Count of features selected by MRMR algorithm for predictive models [default: 15] - -mrmr_corr MRMR_FEAT_COUNT_FOR_CORRELATION, --mrmr_feat_count_for_correlation MRMR_FEAT_COUNT_FOR_CORRELATION + -mrmr_corr, --mrmr_feat_count_for_correlation MRMR_FEAT_COUNT_FOR_CORRELATION Count of features selected by MRMR algorithm for correlation analysis [default: 15] - -data LABELED_DATA, --labeled_data LABELED_DATA + -data, --labeled_data LABELED_DATA Path, possibly excluding the .csv, or including gz or bz2 suffix, to input training data file containing labels [default None] - -mode ANALYTICS_MODE, --analytics_mode ANALYTICS_MODE + -mode, --analytics_mode ANALYTICS_MODE What kind of analysis should be performed; the supported modes are: "train", "predict", "subgroups", "doe", "discretize", "optimize", "verify", "query", "optsyn" [default: None] - -plots INTERACTIVE_PLOTS, --interactive_plots INTERACTIVE_PLOTS + -plots, --interactive_plots INTERACTIVE_PLOTS Should plots be displayed interactively (or only saved)?[default: True] - -seed SEED, --seed SEED - Initial random seed [default None] - -pref LOG_FILES_PREFIX, --log_files_prefix LOG_FILES_PREFIX + -seed, --seed SEED Initial random seed [default None] + -pref, --log_files_prefix LOG_FILES_PREFIX String to be used as prefix for the output files [default: None] - -out_dir OUTPUT_DIRECTORY, --output_directory OUTPUT_DIRECTORY + -out_dir, --output_directory OUTPUT_DIRECTORY Output directory where all reports and output files will be written [default: the same directory from which data is loaded] - -save_config SAVE_CONFIGURATION, --save_configuration SAVE_CONFIGURATION + -save_config, --save_configuration SAVE_CONFIGURATION Should tool run parameters be saved into a a configuration file? [default: False] - -config LOAD_CONFIGURATION, --load_configuration LOAD_CONFIGURATION + -config, --load_configuration LOAD_CONFIGURATION Json config file name, to load tool parameter values from, or None. Paramters specified through command line will override the correponding config file values if they are specified there as well [default: None] - -log_level LOG_LEVEL, --log_level LOG_LEVEL + -log_level, --log_level LOG_LEVEL The logger level or severity of the events they are used to track. The standard levels are (in increasing order of severity): notset, debug, info, warning, error, critical; only events of this level and above will be tracked [default warning] - -log_mode LOG_MODE, --log_mode LOG_MODE + -log_mode, --log_mode LOG_MODE The logger filemode for logging into log file [default w] - -log_time LOG_TIME, --log_time LOG_TIME + -log_time, --log_time LOG_TIME Should time stamp be logged along with every message issued by logger [default true] - -doe_algo DOE_ALGO, --doe_algo DOE_ALGO + -doe_algo, --doe_algo DOE_ALGO Design of experiment (DOE) algorithm from doepy package. The supported algorithms are: "full_factorial, fractional_factorial, plackett_burman, sukharev_grid, box_behnken, box_wilson, latin_hypercube, latin_hypercube_sf, halton_sequence, uniform_random_matrix" - -doe_factor_level_ranges DOE_FACTOR_LEVEL_RANGES, --doe_factor_level_ranges DOE_FACTOR_LEVEL_RANGES + -doe_factor_level_ranges, --doe_factor_level_ranges DOE_FACTOR_LEVEL_RANGES A dictionary of levels per feature for building experiments for all supported DOE algorithms. Here experiments are lists feature-value assignments @@ -781,10 +726,10 @@ options: 350],"Flow rate":[0.9,1.0]}. DOE algorithms that work with two levels only treat these levels as the min and max of the rage of a numeric variable. [default: None] - -doe_samples DOE_NUM_SAMPLES, --doe_num_samples DOE_NUM_SAMPLES + -doe_samples, --doe_num_samples DOE_NUM_SAMPLES Number of samples (experiments) to be generated [default: None] - -doe_resolution DOE_DESIGN_RESOLUTION, --doe_design_resolution DOE_DESIGN_RESOLUTION + -doe_resolution, --doe_design_resolution DOE_DESIGN_RESOLUTION Desired design resolution. The resolution of a design is defined as the length of the shortest word in the defining relation. The resolution describes the level @@ -806,23 +751,23 @@ options: factor interactions. Three-factor interactions may be confounded with each other. [default: Half of the total feature count in doe_factor_level_ranges] - -doe_spec DOE_SPEC_FILE, --doe_spec_file DOE_SPEC_FILE + -doe_spec, --doe_spec_file DOE_SPEC_FILE File in csv format that specifies factor, level ranges used for building design of experiment (DOE) samples using function sample_doepy(). If not provided, a dictionary of factor / level ranges must be supplied to sample_doepy() directly instead of the file. - -doe_bb_centers DOE_BOX_BEHNKEN_CENTERS, --doe_box_behnken_centers DOE_BOX_BEHNKEN_CENTERS + -doe_bb_centers, --doe_box_behnken_centers DOE_BOX_BEHNKEN_CENTERS Number of center points to include in the final design [default: 1] - -doe_cc_center DOE_CENTRAL_COMPOSITE_CENTER, --doe_central_composite_center DOE_CENTRAL_COMPOSITE_CENTER + -doe_cc_center, --doe_central_composite_center DOE_CENTRAL_COMPOSITE_CENTER A 1-by-2 array of integers, the number of center points in each block of the design. [default] - -doe_cc_alpha DOE_CENTRAL_COMPOSITE_ALPHA, --doe_central_composite_alpha DOE_CENTRAL_COMPOSITE_ALPHA + -doe_cc_alpha, --doe_central_composite_alpha DOE_CENTRAL_COMPOSITE_ALPHA A string describing the effect of alpha has on the variance. "alpha" can take on two values: "orthogonal" or "o", and "rotatable" or "r" [default o] - -doe_cc_face DOE_CENTRAL_COMPOSITE_FACE, --doe_central_composite_face DOE_CENTRAL_COMPOSITE_FACE + -doe_cc_face, --doe_central_composite_face DOE_CENTRAL_COMPOSITE_FACE The relation between the start points and the corner (factorial) points. There are three options for this input: 1. "circumscribed" or "ccc": This is the @@ -850,12 +795,12 @@ options: factor. Augmenting an existing factorial or resolution V design with appropriate star points can also produce this design. [default 2,2] - -doe_prob_distr DOE_PROB_DISTRIBUTION, --doe_prob_distribution DOE_PROB_DISTRIBUTION + -doe_prob_distr, --doe_prob_distribution DOE_PROB_DISTRIBUTION Analytical probability distribution to be applied over the randomized sampling. Takes strings: "Normal", "Poisson", "Exponential", "Beta", "Gamma" [default Normal] - -discr_algo DISCRETIZATION_ALGO, --discretization_algo DISCRETIZATION_ALGO + -discr_algo, --discretization_algo DISCRETIZATION_ALGO Discretization algorithm to use. The possible options are: * "uniform": constracts constant-width bins; * "quantile": uses the quantiles values to have equally @@ -869,15 +814,15 @@ options: feature. * "ranks": converts the feature values into ranks (ranks used in Spearman's rank correlation) [default uniform] - -discr_bins DISCRETIZATION_BINS, --discretization_bins DISCRETIZATION_BINS + -discr_bins, --discretization_bins DISCRETIZATION_BINS Number of required bins in a discretization algorithm [default 10] - -discr_labels DISCRETIZATION_LABELS, --discretization_labels DISCRETIZATION_LABELS + -discr_labels, --discretization_labels DISCRETIZATION_LABELS If true, string labels (e.g., "Bin2") will be used to denote levels of the categorical feature resulting from discretization; othewise integers (e.g., 2) will be used to represent the levels [default True] - -discr_type DISCRETIZATION_TYPE, --discretization_type DISCRETIZATION_TYPE + -discr_type, --discretization_type DISCRETIZATION_TYPE The type of the categorical feature resulting from discretization. Possible values are: * "object": the feature will be of type "object" -- with strings as @@ -891,7 +836,7 @@ options: of type int, its values will be the resulting bin numbers when enumerating the bins from left to right. [default category] - -mi_method MUTUAL_INFORMATION_METHOD, --mutual_information_method MUTUAL_INFORMATION_METHOD + -mi_method, --mutual_information_method MUTUAL_INFORMATION_METHOD The mutual information method to be used when computing feature correlation scores with responses. Supported options are "shannon", "normalized", and @@ -905,16 +850,16 @@ options: computing mutual information for (preferably) normally distributed continuous random variables [default: normalized] - -corr_and_mi CORRELATIONS_AND_MUTUAL_INFORMATION, --correlations_and_mutual_information CORRELATIONS_AND_MUTUAL_INFORMATION + -corr_and_mi, --correlations_and_mutual_information CORRELATIONS_AND_MUTUAL_INFORMATION Should correlation and mutual information between the features and the response(s) be computed when computing scores for feature selection and ranking [default: True] - -discret_num DISCRETIZE_NUMERIC_FEATURES, --discretize_numeric_features DISCRETIZE_NUMERIC_FEATURES + -discret_num, --discretize_numeric_features DISCRETIZE_NUMERIC_FEATURES The mutual information method to be used for discretizing numeric features, when computing feature correlation scores with responses [default: None] - -cont_est CONTINUOUS_CORRELATION_ESTIMATORS, --continuous_correlation_estimators CONTINUOUS_CORRELATION_ESTIMATORS + -cont_est, --continuous_correlation_estimators CONTINUOUS_CORRELATION_ESTIMATORS Correlation estimators for continuous features, to be used in correlation, mutual information and MRMR feature selection algorithms. The options are pearson, @@ -923,58 +868,56 @@ options: addition, the value "all" indicates that all the options should be used and value "none" indicates that no options should be used. [default: pearson,spearman] - -psg_quality PSG_QUALITY_TARGET, --psg_quality_target PSG_QUALITY_TARGET + -psg_quality, --psg_quality_target PSG_QUALITY_TARGET Quality function (quality target/measure) used for defining the importance criterion for range selction. The supported options (both for numeric as well as binary responses) are {} and {} [default Lift] - -psg_dim PSG_MAX_DIMENSION, --psg_max_dimension PSG_MAX_DIMENSION + -psg_dim, --psg_max_dimension PSG_MAX_DIMENSION Maximal dimension of selected range tuples (feature- range tuples) [default 3] - -psg_top PSG_TOP_RANKED, --psg_top_ranked PSG_TOP_RANKED + -psg_top, --psg_top_ranked PSG_TOP_RANKED Required count of selected range tuples (feature-range tuples) [default 15] - -spec SPEC, --spec SPEC - Name of spec file including full path, must be + -spec, --spec SPEC Name of spec file including full path, must be provided [default None] - -delta_rel DELTA_RELATIVE, --delta_relative DELTA_RELATIVE + -delta_rel, --delta_relative DELTA_RELATIVE exclude (1+DELTA)*radius region for non-grid components [default: 0.01] - -delta_abs DELTA_ABSOLUTE, --delta_absolute DELTA_ABSOLUTE + -delta_abs, --delta_absolute DELTA_ABSOLUTE exclude (1+DELTA)*radius region for non-grid components [default: 0.0] - -rad_rel RADIUS_RELATIVE, --radius_relative RADIUS_RELATIVE + -rad_rel, --radius_relative RADIUS_RELATIVE Relative radius, in terms of percentage of the value of the knob to which it applies to compute the absolute radius to be used in theta (stability) constraint. Overrides relative radius value specified in the spec file [default: None] - -rad_abs RADIUS_ABSOLUTE, --radius_absolute RADIUS_ABSOLUTE + -rad_abs, --radius_absolute RADIUS_ABSOLUTE Absolute value of radius to be used in theta (stability) constraint. Override relative radius value specified in the spec file [default: None] - -alpha ALPHA, --alpha ALPHA + -alpha, --alpha ALPHA constraints on model inputs (free inputs or configuration knobs) [default: None] - -beta BETA, --beta BETA - constraints on model outputs, relevant for "optimize" + -beta, --beta BETA constraints on model outputs, relevant for "optimize" mode only (when selecting model configuration that are safe and near-optimal) [default: None] - -eta ETA, --eta ETA global constraints on/accross knobs that define legal + -eta, --eta ETA global constraints on/accross knobs that define legal configurations of knobs during search for optimal configurations in "optimize" and "optsyn" modes [default: None] - -compress_rules COMPRESS_RULES, --compress_rules COMPRESS_RULES + -compress_rules, --compress_rules COMPRESS_RULES Should rules that represent tree branches be compressed to eliminate redundant repeated splitting of ranges of model features after training tree based models, in order to build smaller model terms? [default True] - -simplify_terms SIMPLIFY_TERMS, --simplify_terms SIMPLIFY_TERMS + -simplify_terms, --simplify_terms SIMPLIFY_TERMS Should terms be simplified using before building solver instance in model exploration modes? [default False] - -tree_encoding TREE_ENCODING, --tree_encoding TREE_ENCODING + -tree_encoding, --tree_encoding TREE_ENCODING Method to encode tree model to solvers. Can be "flat", "nested", or "branched". The flat encoding creates a formula from each branch of a tree, while the nested @@ -983,7 +926,7 @@ options: also uses ite expressions and in addition the branch conditions in ite expressions are shared across all responses [default nested] - -nnet_encoding NNET_ENCODING, --nnet_encoding NNET_ENCODING + -nnet_encoding, --nnet_encoding NNET_ENCODING Method to encode Keras Neural Nets model to solvers. Can be "layered" or "nested". The layered encoding creates a formula from each internal node of the NN @@ -991,82 +934,82 @@ options: nested encoding builds a monolithic term for each response representing the function for that response [default nested] - -trace_runtime TRACE_RUNTIME, --trace_runtime TRACE_RUNTIME + -trace_runtime, --trace_runtime TRACE_RUNTIME Should trace include solver runtimes and what precision to use in terms of number of decimal points after 0; the option value 0 means to not include the runtimes in the trace [default 0] - -trace_prec TRACE_PRECISION, --trace_precision TRACE_PRECISION + -trace_prec, --trace_precision TRACE_PRECISION Decimals after 0 to use when rounding fractions; option value 0 means to use fractions (implying no rounding) [default: 0] - -trace_anonym TRACE_ANONYMIZE, --trace_anonymize TRACE_ANONYMIZE + -trace_anonym, --trace_anonymize TRACE_ANONYMIZE Should anonymized names of system inputs, knobs and outputs be uses in trace log file?[default: False] - -quer_names QUERY_NAMES, --query_names QUERY_NAMES + -quer_names, --query_names QUERY_NAMES Names of optimization objectives [default None] - -quer_exprs QUERY_EXPRESSIONS, --query_expressions QUERY_EXPRESSIONS + -quer_exprs, --query_expressions QUERY_EXPRESSIONS Semicolon seperated list of expressions (functions) to be applied to the responses to convert them into optimization objectives [default: None] - -lemma_prec LEMMA_PRECISION, --lemma_precision LEMMA_PRECISION + -lemma_prec, --lemma_precision LEMMA_PRECISION Number of decimals after zero to use when approximating lemmas in model exploration modes. The default value 0 means that lemmas should not be approximated (full precision should be used [default: 0] - -asrt_names ASSERTIONS_NAMES, --assertions_names ASSERTIONS_NAMES + -asrt_names, --assertions_names ASSERTIONS_NAMES Names of optimization objectives [default None] - -asrt_exprs ASSERTIONS_EXPRESSIONS, --assertions_expressions ASSERTIONS_EXPRESSIONS + -asrt_exprs, --assertions_expressions ASSERTIONS_EXPRESSIONS Semicolon seperated list of expressions (functions) to be applied to the responses to convert them into optimization objectives [default: None] - -epsilon EPSILON, --epsilon EPSILON + -epsilon, --epsilon EPSILON ratio of the length of an estimated range of an objective, computed per objective based on its estimated min and max bounds [default: 0.05] - -center_offset CENTER_OFFSET, --center_offset CENTER_OFFSET + -center_offset, --center_offset CENTER_OFFSET Center threshold offset of threshold [default: 0] - -objv_names OBJECTIVES_NAMES, --objectives_names OBJECTIVES_NAMES + -objv_names, --objectives_names OBJECTIVES_NAMES Names of optimization objectives [default None] - -objv_exprs OBJECTIVES_EXPRESSIONS, --objectives_expressions OBJECTIVES_EXPRESSIONS + -objv_exprs, --objectives_expressions OBJECTIVES_EXPRESSIONS Semicolon seperated list of expressions (functions) to be applied to the responses to convert them into optimization objectives [default: None] - -scale_objv SCALE_OBJECTIVES, --scale_objectives SCALE_OBJECTIVES + -scale_objv, --scale_objectives SCALE_OBJECTIVES Should optimization objectives be scaled using scaler specified through option "data_scaler"? [default: True] - -pareto OPTIMIZE_PARETO, --optimize_pareto OPTIMIZE_PARETO + -pareto, --optimize_pareto OPTIMIZE_PARETO Should optimization be per objective (even if there are multiple objectives) or pareto optimization must be performed? [default: True] - -frac_aprox APPROXIMATE_FRACTIONS, --approximate_fractions APPROXIMATE_FRACTIONS + -frac_aprox, --approximate_fractions APPROXIMATE_FRACTIONS Should fraction values form satisfying assignments be converted to approximate reals? [default: True] - -frac_prec FRACTION_PRECISION, --fraction_precision FRACTION_PRECISION + -frac_prec, --fraction_precision FRACTION_PRECISION Decimal precision when approximating fractions by reals [default 64] - -vacuity VACUITY_CHECK, --vacuity_check VACUITY_CHECK + -vacuity, --vacuity_check VACUITY_CHECK Should solver problem instance vacuity check be performed? Vacuity checks whether the constraints are consistent and therefore at least one satisfiable assignment exist to solver constraints. Relevant in "verify", "query", "optimize" and "optsyn" modes [default: True] - -opt_strategy OPTIMIZATION_STRATEGY, --optimization_strategy OPTIMIZATION_STRATEGY + -opt_strategy, --optimization_strategy OPTIMIZATION_STRATEGY Strategy (algorithm) to use for single objective optimization in the "optimize" and "optsyn" modes. Supported options are "lazy" and "eager" [default eager] - -solver SOLVER, --solver SOLVER + -solver, --solver SOLVER Solver to use in model exploration modes "verify," "query", "optimize" and "optsyn". [default: z3] - -solver_path SOLVER_PATH, --solver_path SOLVER_PATH + -solver_path, --solver_path SOLVER_PATH Path to solver to use in model exploration modes "verify," "query", "optimize" and "optsyn". [default: None] - -solver_logic SOLVER_LOGIC, --solver_logic SOLVER_LOGIC + -solver_logic, --solver_logic SOLVER_LOGIC SMT2-lib theory with respect to which to solve model exploration task at hand, in modes "verify," "query", "optimize" and "optsyn". [default: ALL] diff --git a/bin/test_install b/bin/test_install index eb244d24..3ae4a4d2 120000 --- a/bin/test_install +++ b/bin/test_install @@ -1 +1 @@ -../docker/python3.11/run_installation_test \ No newline at end of file +../docker/python3.13/run_installation_test \ No newline at end of file diff --git a/dist/smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl b/dist/smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl new file mode 100644 index 00000000..861a1e18 Binary files /dev/null and b/dist/smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl differ diff --git a/dist/smlp-0.1.0-cp313-cp313-manylinux_2_34_x86_64.whl b/dist/smlp-0.1.0-cp313-cp313-manylinux_2_34_x86_64.whl new file mode 100644 index 00000000..b18c79ff Binary files /dev/null and b/dist/smlp-0.1.0-cp313-cp313-manylinux_2_34_x86_64.whl differ diff --git a/dist/smlp-0.1.0-cp313-cp313-manylinux_2_39_x86_64.whl b/dist/smlp-0.1.0-cp313-cp313-manylinux_2_39_x86_64.whl new file mode 100644 index 00000000..71540066 Binary files /dev/null and b/dist/smlp-0.1.0-cp313-cp313-manylinux_2_39_x86_64.whl differ diff --git a/docker/python3.13/Dockerfile b/docker/python3.13/Dockerfile new file mode 100644 index 00000000..d704742d --- /dev/null +++ b/docker/python3.13/Dockerfile @@ -0,0 +1,142 @@ +# Use ubuntu 24.04 as an image +FROM ubuntu:24.04 + +ENV DEBIAN_FRONTEND=noninteractive + +# Set environment variables +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 + +# Layer 1: base tools +RUN apt-get update && apt-get install -y \ + wget \ + vim \ + git \ + jq \ + tcsh \ + tzdata \ + locales \ + && apt-get update \ + && rm -rf /var/lib/apt/lists/* + +# Layer 2: Python 3.13 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + gnupg \ + curl \ + ca-certificates && \ + curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg && \ + echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu noble main" \ + > /etc/apt/sources.list.d/deadsnakes.list && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + python3.13 \ + python3.13-dev \ + python3.13-tk + +# Layer 3: build tools +RUN apt-get update && apt-get install -y \ + gcc \ + g++ \ + make \ + ninja-build \ + pkg-config \ + libgmp-dev \ + && rm -rf /var/lib/apt/lists/* + +# Layer 4: Z3 and Boost +RUN apt-get update && apt-get install -y \ + python3-z3 \ + z3 \ + libz3-dev \ + libboost-python-dev \ + && rm -rf /var/lib/apt/lists/* + +# Layer 5: X11 and GUI tools +RUN apt-get update && apt-get install -y \ + tk \ + tkcvs \ + libx11-6 \ + libxext6 \ + libxrender1 \ + libxtst6 \ + libxi6 \ + x11-apps \ + x11-xserver-utils \ + x11vnc \ + xvfb \ + vim-gtk3 \ + libcanberra-gtk-module \ + libcanberra-gtk3-module \ + autocutsel \ + && rm -rf /var/lib/apt/lists/* + +# Set working directory +WORKDIR /app + +# Copy requirements file +COPY requirements.txt . + +# Copy the build script +COPY run_python_boost_build . + +# Make script executable +RUN chmod +x run_python_boost_build +RUN ./run_python_boost_build && rm -rf /tmp/boost* /root/boost* + +# Install pip +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 + +# Install Python packages including meson +RUN python3.13 -m pip install --ignore-installed --no-cache-dir -r requirements.txt + +#Copy pycaret +COPY pycaret_313.tar.gz . + +RUN pip install --no-cache-dir pycaret_313.tar.gz && pip install numpy==2.4.1 && rm -f pycaret_313.tar.gz + +ARG CACHE_BUST_SMLP + +#Change python default version +RUN ln -sf /usr/bin/python3.13 /usr/bin/python3 +RUN ln -sf /usr/bin/python3.13 /usr/bin/python + +# Copy the build script +COPY run_meson_build . +# Make script executable +RUN chmod +x run_meson_build +RUN ./run_meson_build + +#Copy z3 python package +RUN cp -rp /usr/lib/python3/dist-packages/z3 /usr/local/lib/python3.13/dist-packages + +#Workaround for TkAgg issue +COPY tkagg_patch.sh . +RUN chmod +x tkagg_patch.sh +RUN ./tkagg_patch.sh + +#Mathsat +COPY run_mathsat_build . +RUN chmod +x run_mathsat_build +RUN ./run_mathsat_build && rm -rf /tmp/mathsat* + +#tkdiff patch for https://bugs.launchpad.net/bugs/2139062 +COPY run_tkdiff_patch . +RUN chmod +x run_tkdiff_patch +RUN ./run_tkdiff_patch + +#UTF-8 fonts +RUN locale-gen en_US.UTF-8 +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 + +#VNC +COPY start_vnc . +RUN chmod +x start_vnc + +## Default command +CMD ["/bin/tcsh"] diff --git a/docker/python3.13/README.md b/docker/python3.13/README.md new file mode 100644 index 00000000..b977ee5b --- /dev/null +++ b/docker/python3.13/README.md @@ -0,0 +1,266 @@ +# Python 3.13 Development Container with GUI Support + +Docker image with Python 3.13 and GUI application support via VNC. + +## Overview + +This container provides a complete Python 3.13 development environment with support for running GUI applications through VNC. It's portable across different systems including native Linux, WSL2, and Docker Desktop. + +**Image Details:** +- **Docker Hub:** `mdmitry1/python313-dev:latest` +- **Disk Size:** ~8GB (extracted), ~2GB (compressed) +- **Base:** Ubuntu 24.04 with Python 3.13 + +## Quick Start + +### Pull the Image + +```bash +docker pull mdmitry1/python313-dev:latest +``` + +### Run Container with VNC + +```bash +docker run -it -p 5900:5900 mdmitry1/python313-dev:latest +``` + +## Setting Up GUI Support using for X11 using socat + +1. Install `socat`, if it is not installed + +``` +sudo apt install socat +``` + +2. Run command + +```bash +pkill socat +``` + +3. Run command + +```bash +enter_released_container_x11_forwarding +``` + +## Setting Up GUI Support using for WSL2 with wslg installed + +``` +enter_released_container_wslg +``` + +## Setting Up GUI Support using VNC + +Once inside the container, set up the virtual display and VNC server: + +```bash +# 1. Start virtual X server +Xvfb :99 -screen 0 1280x1024x24 & + +# 2. Set display environment variable +export DISPLAY=:99 + +# 3. Start VNC server +x11vnc -display :99 -forever -nopw -shared -rfbport 5900 + +# 4. Enable clipboard sync (optional but recommended) +apt-get update && apt-get install -y autocutsel +autocutsel -selection CLIPBOARD -fork +autocutsel -selection PRIMARY -fork +``` + +## Connecting to VNC + +### Using Remmina (Recommended for Linux) + +```bash +# Install Remmina +sudo apt-get install remmina remmina-plugin-vnc + +# Launch and connect +remmina +# Create connection to: localhost:5900 +``` + +## Clipboard Support + +### Host System Setup (Linux) + +For bidirectional clipboard sync between host and VNC, install autocutsel on your host: + +```bash +# Install autocutsel +sudo apt-get install autocutsel + +# Add to ~/.xprofile +autocutsel -selection CLIPBOARD -fork +autocutsel -selection PRIMARY -fork +``` + +### In Container + +Already covered in the setup steps above. Use `autocutsel` to sync clipboard selections. + +### Clipboard Usage in xterm + +- **Paste from host:** Press `Shift+Insert` in xterm +- **Copy to host:** Select text in xterm (auto-copies to PRIMARY) + +### Clipboard with gvim (More Reliable) + +gvim handles clipboard better than xterm: +- Visual select + `"+y` - copy to CLIPBOARD +- `"+p` - paste from CLIPBOARD +- Or use visual mode + right-click menu + +## File Operations + +### Copy Files to Container + +```bash +# Find your container name/ID +docker ps + +# Copy file from host to container +docker cp /path/on/host/file.txt container_name:/path/in/container/ + +# Copy directory (recursive by default) +docker cp /path/on/host/directory container_name:/path/in/container/ + +# Copy from container to host +docker cp container_name:/path/in/container/file.txt /path/on/host/ +``` + +### Mount Shared Directory + +Share a directory between host and container: + +```bash +docker run -it -p 5900:5900 -v ~/shared:/shared mdmitry1/python313-dev:latest +``` + +Now `~/shared` on host equals `/shared` in container - changes are immediately visible in both locations. + +## Common GUI Applications + +```bash +# Terminal emulator +xterm & + +# Text editor with good clipboard support +apt-get install -y vim-gtk3 +gvim & + +# Test X11 display +apt-get install -y x11-apps +xeyes & +xclock & +``` + +## Troubleshooting + +### VNC Connection Refused + +Make sure VNC server is running inside the container: +```bash +ps aux | grep x11vnc +``` + +If not running, start it: +```bash +x11vnc -display :99 -forever -nopw -shared -rfbport 5900 +``` + +### Clipboard Not Working + +1. Ensure autocutsel is running in both host and container: + ```bash + ps aux | grep autocutsel + ``` + +2. Test clipboard manually: + ```bash + # In container + echo "test" | xclip -selection clipboard + xclip -selection clipboard -o + ``` + +3. For xterm, use `Shift+Insert` to paste instead of `Ctrl+V` + +4. Consider using gvim instead of xterm for better clipboard support + +### GUI App Won't Start + +Verify DISPLAY is set correctly: +```bash +echo $DISPLAY # Should show :99 +export DISPLAY=:99 +``` + +Check if Xvfb is running: +```bash +ps aux | grep Xvfb +``` + +## Network Usage + +Uploading/downloading the ~2GB compressed image on ADSL: +- Upload time: ~2 hours +- Download time: varies by connection + +Use `screen` or `tmux` for long-running pushes: +```bash +screen -S docker-push +docker push mdmitry1/python313-dev:latest +# Press Ctrl+A then D to detach +# Reconnect: screen -r docker-push +``` + +## Advanced Usage + +### Running as Non-Root User + +```bash +docker run -it \ + --user $(id -u):$(id -g) \ + -p 5900:5900 \ + mdmitry1/python313-dev:latest +``` + +### Persistent Storage Recommendation + +```bash +docker run -it \ + -p 5900:5900 \ + -v ~/persistent-data:/data \ + mdmitry1/python313-dev:latest +``` + +### Custom Screen Resolution + +```bash +# Inside container, stop Xvfb and restart with different resolution +pkill Xvfb +Xvfb :99 -screen 0 1920x1080x24 & +``` + +## System Requirements + +- **Host OS:** Linux, WSL2, or Docker Desktop +- **Docker:** Version 29.2.0 +- **Disk Space:** ~8GB for image +- **Memory:** 3GB+ recommended +- **Network:** For VNC access and Docker Hub pulls + +## Support + +For issues or questions: +- Check Docker Hub: https://hub.docker.com/r/mdmitry1/python313-dev +- Review this README +- Check Docker and VNC logs + +## Version History + +- **latest** - Initial release with Python 3.13, VNC support, and GUI capabilities diff --git a/docker/python3.13/enter_container b/docker/python3.13/enter_container new file mode 100755 index 00000000..af5865f5 --- /dev/null +++ b/docker/python3.13/enter_container @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +set -euo pipefail + +script_path=$(dirname "$(realpath "$0")") +script_name=$(basename "$(realpath "$0")") + +mkdir -p "$HOME/shared" + +if [[ $# -gt 0 ]]; then + if [[ "$1" == "-h" || "$1" == "--help" ]]; then + echo -e "\nUsage: $script_name [-geom x] []" + echo -e "Example: $script_name -geom 1280x1024\n" + exit 0 + fi +fi + +if [[ $# -gt 1 && "$1" == "-geom" ]]; then + screen_resolution="$2" + echo "User defined screen resolution: $screen_resolution" + shift 2 +fi + +if [[ -z "${TZ:-}" ]]; then + TZ=$(readlink /etc/localtime | sed 's|/usr/share/zoneinfo/||') + export TZ +fi + +p=5900 +d=99 +while ss -tln | grep -q ":${p}"; do + (( p++ )) + (( d++ )) +done + +if [[ -z "${screen_resolution:-}" ]]; then + if [[ "$(uname -r)" == *WSL2* ]]; then + powershell_exe=$(which powershell.exe 2>/dev/null || true) + if [[ "$powershell_exe" == *powershell.exe* ]]; then + screen_resolution=$($powershell_exe -ExecutionPolicy Bypass -File "$script_path/getres.ps1" | tail -1 | sed 's/.$//') + fi + else + if xset q &>/dev/null; then + screen_resolution=$(xdpyinfo | grep dimensions | cut -d':' -f2 | awk '{print $1}') + fi + fi + if [[ -n "$screen_resolution" ]]; then + echo "Automatically defined screen resolution: $screen_resolution" + fi +fi + +if [[ -z "$screen_resolution" ]]; then + screen_resolution="1280x1024" + echo "Using default screen resolution: $screen_resolution" +fi + +docker run \ + -e TZ="$TZ" \ + -e DISPLAY=":${d}" \ + -e VNC_SCREEN_RESOLUTION="$screen_resolution" \ + -p "${p}:${p}" \ + -v "$HOME/shared:/shared" \ + -it python313-dev:latest "$@" diff --git a/docker/python3.13/enter_container_mac b/docker/python3.13/enter_container_mac new file mode 100755 index 00000000..5e94a2dc --- /dev/null +++ b/docker/python3.13/enter_container_mac @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +set -euo pipefail + +script_path=$(dirname "$(realpath "$0")") +script_name=$(basename "$(realpath "$0")") + +mkdir -p "$HOME/shared" + +if [[ $# -gt 0 ]]; then + if [[ "$1" == "-h" || "$1" == "--help" ]]; then + echo -e "\nUsage: $script_name [-geom x] []" + echo -e "Example: $script_name -geom 1280x1024\n" + exit 0 + fi +fi + +if [[ $# -gt 1 && "$1" == "-geom" ]]; then + screen_resolution="$2" + echo "User defined screen resolution: $screen_resolution" + shift 2 +fi + +if [[ -z "${TZ:-}" ]]; then + TZ=$(readlink /etc/localtime | sed 's|/usr/share/zoneinfo/||') + export TZ +fi + +p=5900 +d=99 +while ss -tln | grep -q ":${p}"; do + (( p++ )) + (( d++ )) +done + +if [[ -z "${screen_resolution:-}" ]]; then + if [[ "$(uname -r)" == *WSL2* ]]; then + powershell_exe=$(which powershell.exe 2>/dev/null || true) + if [[ "$powershell_exe" == *powershell.exe* ]]; then + screen_resolution=$($powershell_exe -ExecutionPolicy Bypass -File "$script_path/getres.ps1" | tail -1 | sed 's/.$//') + fi + else + if xset q &>/dev/null; then + screen_resolution=$(xdpyinfo | grep dimensions | cut -d':' -f2 | awk '{print $1}') + fi + fi + if [[ -n "$screen_resolution" ]]; then + echo "Automatically defined screen resolution: $screen_resolution" + fi +fi + +if [[ -z "$screen_resolution" ]]; then + screen_resolution="1280x1024" + echo "Using default screen resolution: $screen_resolution" +fi + +docker run \ + -e TZ="$TZ" \ + -e DISPLAY=":${d}" \ + -e VNC_SCREEN_RESOLUTION="$screen_resolution" \ + -p "${p}:${p}" \ + -v "$HOME/shared:/shared" \ + --platform linux/arm64 \ + -it python313-dev-mac:latest "$@" diff --git a/docker/python3.13/enter_container_x11_forwarding b/docker/python3.13/enter_container_x11_forwarding new file mode 100755 index 00000000..4e931cdd --- /dev/null +++ b/docker/python3.13/enter_container_x11_forwarding @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +set -euo pipefail + +mkdir -p "$HOME/shared" + +ip=$(hostname -I | tr ' ' '\n' | grep -Ev '^172|^127|:|^$') +display=$(echo "$DISPLAY" | cut -d: -f2) + +pkill socat || true +socat "TCP4-LISTEN:600${display},bind=${ip},reuseaddr,fork" "UNIX-CONNECT:/tmp/.X11-unix/X${display}" & + +if [[ -z "${TZ:-}" ]]; then + TZ=$(readlink /etc/localtime | sed 's|/usr/share/zoneinfo/||') + export TZ +fi + +docker run \ + -e DISPLAY="${ip}:${display}" \ + -e TZ="$TZ" \ + -e NO_AT_BRIDGE=1 \ + --network host \ + -v "$HOME/shared:/shared" \ + -it python313-dev:latest \ + "$@" diff --git a/docker/python3.13/enter_released_container b/docker/python3.13/enter_released_container new file mode 100755 index 00000000..7e80f1d6 --- /dev/null +++ b/docker/python3.13/enter_released_container @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +set -euo pipefail + +script_path=$(dirname "$(realpath "$0")") +script_name=$(basename "$(realpath "$0")") + +mkdir -p "$HOME/shared" + +if [[ $# -gt 0 ]]; then + if [[ "$1" == "-h" || "$1" == "--help" ]]; then + echo -e "\nUsage: $script_name [-geom x] []" + echo -e "Example: $script_name -geom 1280x1024\n" + exit 0 + fi +fi + +if [[ $# -gt 1 && "$1" == "-geom" ]]; then + screen_resolution="$2" + echo "User defined screen resolution: $screen_resolution" + shift 2 +fi + +if [[ -z "${TZ:-}" ]]; then + TZ=$(readlink /etc/localtime | sed 's|/usr/share/zoneinfo/||') + export TZ +fi + +p=5900 +d=99 +while ss -tln | grep -q ":${p}"; do + (( p++ )) + (( d++ )) +done + +if [[ -z "${screen_resolution:-}" ]]; then + if [[ "$(uname -r)" == *WSL2* ]]; then + powershell_exe=$(which powershell.exe 2>/dev/null || true) + if [[ "$powershell_exe" == *powershell.exe* ]]; then + screen_resolution=$($powershell_exe -ExecutionPolicy Bypass -File "$script_path/getres.ps1" | tail -1 | sed 's/.$//') + fi + else + if xset q &>/dev/null; then + screen_resolution=$(xdpyinfo | grep dimensions | cut -d':' -f2 | awk '{print $1}') + fi + fi + if [[ -n "$screen_resolution" ]]; then + echo "Automatically defined screen resolution: $screen_resolution" + fi +fi + +if [[ -z "$screen_resolution" ]]; then + screen_resolution="1280x1024" + echo "Using default screen resolution: $screen_resolution" +fi + +docker run \ + -e TZ="$TZ" \ + -e DISPLAY=":${d}" \ + -e VNC_SCREEN_RESOLUTION="$screen_resolution" \ + -p "${p}:${p}" \ + -v "$HOME/shared:/shared" \ + -it mdmitry1/python313-dev:latest "$@" diff --git a/docker/python3.13/enter_released_container_mac b/docker/python3.13/enter_released_container_mac new file mode 100755 index 00000000..e0f92560 --- /dev/null +++ b/docker/python3.13/enter_released_container_mac @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +set -euo pipefail + +script_path=$(dirname "$(realpath "$0")") +script_name=$(basename "$(realpath "$0")") + +mkdir -p "$HOME/shared" + +if [[ $# -gt 0 ]]; then + if [[ "$1" == "-h" || "$1" == "--help" ]]; then + echo -e "\nUsage: $script_name [-geom x] []" + echo -e "Example: $script_name -geom 1280x1024\n" + exit 0 + fi +fi + +if [[ $# -gt 1 && "$1" == "-geom" ]]; then + screen_resolution="$2" + echo "User defined screen resolution: $screen_resolution" + shift 2 +fi + +if [[ -z "${TZ:-}" ]]; then + TZ=$(readlink /etc/localtime | sed 's|/usr/share/zoneinfo/||') + export TZ +fi + +p=5900 +d=99 +while ss -tln | grep -q ":${p}"; do + (( p++ )) + (( d++ )) +done + +if [[ -z "${screen_resolution:-}" ]]; then + if [[ "$(uname -r)" == *WSL2* ]]; then + powershell_exe=$(which powershell.exe 2>/dev/null || true) + if [[ "$powershell_exe" == *powershell.exe* ]]; then + screen_resolution=$($powershell_exe -ExecutionPolicy Bypass -File "$script_path/getres.ps1" | tail -1 | sed 's/.$//') + fi + else + if xset q &>/dev/null; then + screen_resolution=$(xdpyinfo | grep dimensions | cut -d':' -f2 | awk '{print $1}') + fi + fi + if [[ -n "$screen_resolution" ]]; then + echo "Automatically defined screen resolution: $screen_resolution" + fi +fi + +if [[ -z "$screen_resolution" ]]; then + screen_resolution="1280x1024" + echo "Using default screen resolution: $screen_resolution" +fi + +docker run \ + -e TZ="$TZ" \ + -e DISPLAY=":${d}" \ + -e VNC_SCREEN_RESOLUTION="$screen_resolution" \ + -p "${p}:${p}" \ + -v "$HOME/shared:/shared" \ + --platform linux/arm64 \ + -it mdmitry1/python313-dev-mac:latest "$@" diff --git a/docker/python3.13/enter_released_container_mac_x11_forwarding b/docker/python3.13/enter_released_container_mac_x11_forwarding new file mode 100755 index 00000000..0df04447 --- /dev/null +++ b/docker/python3.13/enter_released_container_mac_x11_forwarding @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +mkdir -p "$HOME/shared" + +ip=$(hostname -I | tr ' ' '\n' | grep -Ev '^172|^127|:|^$') +display=$(echo "$DISPLAY" | cut -d: -f2) + +pkill socat || true +socat "TCP4-LISTEN:600${display},bind=${ip},reuseaddr,fork" "UNIX-CONNECT:/tmp/.X11-unix/X${display}" & + +if [[ -z "${TZ:-}" ]]; then + TZ=$(readlink /etc/localtime | sed 's|/usr/share/zoneinfo/||') + export TZ +fi + +docker run \ + -e DISPLAY="${ip}:${display}" \ + -e TZ="$TZ" \ + -e NO_AT_BRIDGE=1 \ + --network host \ + --platform linux/arm64 \ + -v "$HOME/shared:/shared" \ + -it mdmitry1/python313-dev-mac:latest \ + "$@" diff --git a/docker/python3.13/enter_released_container_wslg b/docker/python3.13/enter_released_container_wslg new file mode 100755 index 00000000..2a90a6e9 --- /dev/null +++ b/docker/python3.13/enter_released_container_wslg @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +if [[ -z "${TZ:-}" ]]; then + TZ=$(readlink /etc/localtime | sed 's|/usr/share/zoneinfo/||') + export TZ +fi +docker run -it -e TZ=$TZ -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw -v /mnt/wslg:/mnt/wslg:rw mdmitry1/python313-dev:latest $* diff --git a/docker/python3.13/enter_released_container_x11_forwarding b/docker/python3.13/enter_released_container_x11_forwarding new file mode 100755 index 00000000..b18bef5a --- /dev/null +++ b/docker/python3.13/enter_released_container_x11_forwarding @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +set -euo pipefail + +mkdir -p "$HOME/shared" + +ip=$(hostname -I | tr ' ' '\n' | grep -Ev '^172|^127|:|^$') +display=$(echo "$DISPLAY" | cut -d: -f2) + +pkill socat || true +socat "TCP4-LISTEN:600${display},bind=${ip},reuseaddr,fork" "UNIX-CONNECT:/tmp/.X11-unix/X${display}" & + +if [[ -z "${TZ:-}" ]]; then + TZ=$(readlink /etc/localtime | sed 's|/usr/share/zoneinfo/||') + export TZ +fi + +docker run \ + -e DISPLAY="${ip}:${display}" \ + -e TZ="$TZ" \ + -e NO_AT_BRIDGE=1 \ + --network host \ + -v "$HOME/shared:/shared" \ + -it mdmitry1/python313-dev:latest \ + "$@" diff --git a/docker/python3.13/getres.ps1 b/docker/python3.13/getres.ps1 new file mode 100755 index 00000000..91099576 --- /dev/null +++ b/docker/python3.13/getres.ps1 @@ -0,0 +1,17 @@ +Add-Type -AssemblyName System.Windows.Forms +$screen = [System.Windows.Forms.Screen]::PrimaryScreen +$source = @" +using System; +using System.Runtime.InteropServices; +public class DPI { + [DllImport("gdi32.dll")] + public static extern int GetDeviceCaps(IntPtr hdc, int nIndex); + [DllImport("user32.dll")] + public static extern IntPtr GetDC(IntPtr hwnd); +} +"@ +Add-Type -TypeDefinition $source +$hdc = [DPI]::GetDC([IntPtr]::Zero) +$width = [DPI]::GetDeviceCaps($hdc, 118) # DESKTOPHORZRES +$height = [DPI]::GetDeviceCaps($hdc, 117) # DESKTOPVERTRES +Write-Output "${width}x${height}" \ No newline at end of file diff --git a/docker/python3.13/pycaret_313.tar.gz b/docker/python3.13/pycaret_313.tar.gz new file mode 100644 index 00000000..3d72bc3d Binary files /dev/null and b/docker/python3.13/pycaret_313.tar.gz differ diff --git a/docker/python3.13/requirements.txt b/docker/python3.13/requirements.txt new file mode 100644 index 00000000..d6305934 --- /dev/null +++ b/docker/python3.13/requirements.txt @@ -0,0 +1,14 @@ +doepy +jenkspy +keras_tuner +matplotlib==3.10.8 +meson +mrmr-selection +numpy==2.3.5 +pandas==2.3.3 +pydoe==0.3.8 +pysubgroup +scikit-learn==1.7.2 +scipy +seaborn +tensorflow diff --git a/docker/python3.13/run_docker_build b/docker/python3.13/run_docker_build new file mode 100755 index 00000000..7482e9d7 --- /dev/null +++ b/docker/python3.13/run_docker_build @@ -0,0 +1,2 @@ +#!/usr/bin/tcsh -f +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t python313-dev . diff --git a/docker/python3.13/run_docker_build_incremental b/docker/python3.13/run_docker_build_incremental new file mode 100755 index 00000000..319b3192 --- /dev/null +++ b/docker/python3.13/run_docker_build_incremental @@ -0,0 +1,13 @@ +#!/usr/bin/tcsh -f +set build_args="" +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`date +%s`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t python313-dev ." +echo $cmd +$cmd diff --git a/docker/python3.13/run_docker_build_incremental_mac b/docker/python3.13/run_docker_build_incremental_mac new file mode 100755 index 00000000..96f38002 --- /dev/null +++ b/docker/python3.13/run_docker_build_incremental_mac @@ -0,0 +1,13 @@ +#!/usr/bin/tcsh -f +set build_args="" +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`date +%s`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker buildx build $build_args --platform linux/arm64 --load --progress=plain -t python313-dev-mac:latest ." +echo $cmd +$cmd diff --git a/docker/python3.13/run_docker_build_mac b/docker/python3.13/run_docker_build_mac new file mode 100755 index 00000000..b122fa28 --- /dev/null +++ b/docker/python3.13/run_docker_build_mac @@ -0,0 +1,2 @@ +#!/usr/bin/tcsh -f +env DOCKER_BUILDKIT=1 docker buildx build --platform linux/arm64 --load --no-cache --progress=plain -t python313-dev-mac:latest . diff --git a/docker/python3.13/run_installation_test b/docker/python3.13/run_installation_test new file mode 100755 index 00000000..71304c66 --- /dev/null +++ b/docker/python3.13/run_installation_test @@ -0,0 +1,10 @@ +#!/usr/bin/tcsh -f +set log=`realpath $0 | xargs basename`.log +\rm -f $log >& /dev/null +docker run -it mdmitry1/python313-dev:latest smlp/src/run_smlp.py -h >& $log +if( 1 == `grep -c ^usage $log`) then + echo "\nSMLP installation succeeded\n" +else + echo "\nSMLP installation failed\n" + exit(1) +endif diff --git a/docker/python3.13/run_installation_test_expected.log b/docker/python3.13/run_installation_test_expected.log new file mode 100644 index 00000000..94540fce --- /dev/null +++ b/docker/python3.13/run_installation_test_expected.log @@ -0,0 +1,1015 @@ +Script is running inside a Docker container. +2026-02-05 09:26:58.641677: I external/local_xla/xla/tsl/cuda/cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. +2026-02-05 09:27:02.007746: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. +To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. +2026-02-05 09:27:08.575837: I external/local_xla/xla/tsl/cuda/cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. +2026-02-05 09:27:12.167166: E external/local_xla/xla/stream_executor/cuda/cuda_platform.cc:51] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303) +usage: smlp/src/run_smlp.py [-h] [-model MODEL] [-save_model SAVE_MODEL] + [-use_model USE_MODEL] [-model_name MODEL_NAME] + [-save_model_config SAVE_MODEL_RERUN_CONFIGURATION] + [-model_per_response MODEL_PER_RESPONSE] + [-pred_plots PREDICTION_PLOTS] + [-nn_keras_layers NN_KERAS_LAYERS] + [-nn_keras_epochs NN_KERAS_EPOCHS] + [-nn_keras_batch NN_KERAS_BATCH_SIZE] + [-nn_keras_optimizer NN_KERAS_OPTIMIZER] + [-nn_keras_learning_rate NN_KERAS_LEARNING_RATE] + [-nn_keras_loss NN_KERAS_LOSS_FUNCTION] + [-nn_keras_metrics NN_KERAS_METRICS] + [-nn_keras_hid_activation NN_KERAS_HID_ACTIVATION] + [-nn_keras_out_activation NN_KERAS_OUT_ACTIVATION] + [-nn_keras_seq_api NN_KERAS_SEQUENTIAL_API] + [-nn_keras_weights_precision NN_KERAS_WEIGHTS_PRECISION] + [-nn_keras_tuner NN_KERAS_TUNER_ALGO] + [-nn_keras_layers_grid NN_KERAS_LAYERS_GRID] + [-nn_keras_batches_grid NN_KERAS_BATCHES_GRID] + [-nn_keras_lrates_grid NN_KERAS_LEARNING_RATES_GRID] + [-nn_keras_losses_grid NN_KERAS_LOSS_FUNCTIONS_GRID] + [-poly_sklearn_degree POLY_SKLEARN_DEGREE] + [-poly_sklearn_fit_intercept POLY_SKLEARN_FIT_INTERCEPT] + [-poly_sklearn_copy_X POLY_SKLEARN_COPY_X] + [-poly_sklearn_n_jobs POLY_SKLEARN_N_JOBS] + [-poly_sklearn_positive POLY_SKLEARN_POSITIVE] + [-dt_sklearn_splitter DT_SKLEARN_SPLITTER] + [-dt_sklearn_max_features DT_SKLEARN_MAX_FEATURES] + [-dt_sklearn_rand_state DT_SKLEARN_RANDOM_STATE] + [-dt_sklearn_criterion DT_SKLEARN_CRITERION] + [-dt_sklearn_max_depth DT_SKLEARN_MAX_DEPTH] + [-dt_sklearn_min_samples_split DT_SKLEARN_MIN_SAMPLES_SPLIT] + [-dt_sklearn_min_samples_leaf DT_SKLEARN_MIN_SAMPLES_LEAF] + [-dt_sklearn_min_weight_fraction_leaf DT_SKLEARN_MIN_WEIGHT_FRACTION_LEAF] + [-dt_sklearn_max_leaf_nodes DT_SKLEARN_MAX_LEAF_NODES] + [-dt_sklearn_min_impurity_decrease DT_SKLEARN_MIN_IMPURITY_DECREASE] + [-dt_sklearn_ccp_alpha DT_SKLEARN_CCP_ALPHA] + [-rf_sklearn_n_estimators RF_SKLEARN_N_ESTIMATORS] + [-rf_sklearn_max_features RF_SKLEARN_MAX_FEATURES] + [-rf_sklearn_bootstrap RF_SKLEARN_BOOTSTRAP] + [-rf_sklearn_verbose RF_SKLEARN_VERBOSE] + [-rf_sklearn_warm_start RF_SKLEARN_WARM_START] + [-rf_sklearn_max_samples RF_SKLEARN_MAX_SAMPLES] + [-rf_sklearn_rand_state RF_SKLEARN_RANDOM_STATE] + [-rf_sklearn_criterion RF_SKLEARN_CRITERION] + [-rf_sklearn_max_depth RF_SKLEARN_MAX_DEPTH] + [-rf_sklearn_min_samples_split RF_SKLEARN_MIN_SAMPLES_SPLIT] + [-rf_sklearn_min_samples_leaf RF_SKLEARN_MIN_SAMPLES_LEAF] + [-rf_sklearn_min_weight_fraction_leaf RF_SKLEARN_MIN_WEIGHT_FRACTION_LEAF] + [-rf_sklearn_max_leaf_nodes RF_SKLEARN_MAX_LEAF_NODES] + [-rf_sklearn_min_impurity_decrease RF_SKLEARN_MIN_IMPURITY_DECREASE] + [-rf_sklearn_ccp_alpha RF_SKLEARN_CCP_ALPHA] + [-et_sklearn_n_estimators ET_SKLEARN_N_ESTIMATORS] + [-et_sklearn_max_features ET_SKLEARN_MAX_FEATURES] + [-et_sklearn_bootstrap ET_SKLEARN_BOOTSTRAP] + [-et_sklearn_verbose ET_SKLEARN_VERBOSE] + [-et_sklearn_warm_start ET_SKLEARN_WARM_START] + [-et_sklearn_max_samples ET_SKLEARN_MAX_SAMPLES] + [-et_sklearn_rand_state ET_SKLEARN_RANDOM_STATE] + [-et_sklearn_criterion ET_SKLEARN_CRITERION] + [-et_sklearn_max_depth ET_SKLEARN_MAX_DEPTH] + [-et_sklearn_min_samples_split ET_SKLEARN_MIN_SAMPLES_SPLIT] + [-et_sklearn_min_samples_leaf ET_SKLEARN_MIN_SAMPLES_LEAF] + [-et_sklearn_min_weight_fraction_leaf ET_SKLEARN_MIN_WEIGHT_FRACTION_LEAF] + [-et_sklearn_max_leaf_nodes ET_SKLEARN_MAX_LEAF_NODES] + [-et_sklearn_min_impurity_decrease ET_SKLEARN_MIN_IMPURITY_DECREASE] + [-et_sklearn_ccp_alpha ET_SKLEARN_CCP_ALPHA] + [-resp RESPONSE] [-feat FEATURES] + [-keep_feat KEEP_FEATURES] [-new_data NEW_DATA] + [-data_scaler DATA_SCALER] + [-scale_feat SCALE_FEATURES] + [-scale_resp SCALE_RESPONSES] + [-impute_resp IMPUTE_RESPONSES] + [-split SPLIT_TEST] [-train_rand TRAIN_RANDOM_N] + [-train_first TRAIN_FIRST_N] + [-train_unif TRAIN_UNIFORM_N] + [-sw_coef SAMPLE_WEIGHTS_COEF] + [-sw_exp SAMPLE_WEIGHTS_EXPONENT] + [-sw_int SAMPLE_WEIGHTS_INTERCEPT] + [-respmap RESPONSE_MAP] [-resp2b RESPONSE_TO_BOOL] + [-pos_val POSITIVE_VALUE] + [-neg_val NEGATIVE_VALUE] + [-resp_plots RESPONSE_PLOTS] + [-mrmr_pred MRMR_FEAT_COUNT_FOR_PREDICTION] + [-mrmr_corr MRMR_FEAT_COUNT_FOR_CORRELATION] + [-data LABELED_DATA] [-mode ANALYTICS_MODE] + [-plots INTERACTIVE_PLOTS] [-seed SEED] + [-pref LOG_FILES_PREFIX] + [-out_dir OUTPUT_DIRECTORY] + [-save_config SAVE_CONFIGURATION] + [-config LOAD_CONFIGURATION] + [-log_level LOG_LEVEL] [-log_mode LOG_MODE] + [-log_time LOG_TIME] [-doe_algo DOE_ALGO] + [-doe_factor_level_ranges DOE_FACTOR_LEVEL_RANGES] + [-doe_samples DOE_NUM_SAMPLES] + [-doe_resolution DOE_DESIGN_RESOLUTION] + [-doe_spec DOE_SPEC_FILE] + [-doe_bb_centers DOE_BOX_BEHNKEN_CENTERS] + [-doe_cc_center DOE_CENTRAL_COMPOSITE_CENTER] + [-doe_cc_alpha DOE_CENTRAL_COMPOSITE_ALPHA] + [-doe_cc_face DOE_CENTRAL_COMPOSITE_FACE] + [-doe_prob_distr DOE_PROB_DISTRIBUTION] + [-discr_algo DISCRETIZATION_ALGO] + [-discr_bins DISCRETIZATION_BINS] + [-discr_labels DISCRETIZATION_LABELS] + [-discr_type DISCRETIZATION_TYPE] + [-mi_method MUTUAL_INFORMATION_METHOD] + [-corr_and_mi CORRELATIONS_AND_MUTUAL_INFORMATION] + [-discret_num DISCRETIZE_NUMERIC_FEATURES] + [-cont_est CONTINUOUS_CORRELATION_ESTIMATORS] + [-psg_quality PSG_QUALITY_TARGET] + [-psg_dim PSG_MAX_DIMENSION] + [-psg_top PSG_TOP_RANKED] [-spec SPEC] + [-delta_rel DELTA_RELATIVE] + [-delta_abs DELTA_ABSOLUTE] + [-rad_rel RADIUS_RELATIVE] + [-rad_abs RADIUS_ABSOLUTE] [-alpha ALPHA] + [-beta BETA] [-eta ETA] + [-compress_rules COMPRESS_RULES] + [-simplify_terms SIMPLIFY_TERMS] + [-tree_encoding TREE_ENCODING] + [-nnet_encoding NNET_ENCODING] + [-trace_runtime TRACE_RUNTIME] + [-trace_prec TRACE_PRECISION] + [-trace_anonym TRACE_ANONYMIZE] + [-quer_names QUERY_NAMES] + [-quer_exprs QUERY_EXPRESSIONS] + [-lemma_prec LEMMA_PRECISION] + [-asrt_names ASSERTIONS_NAMES] + [-asrt_exprs ASSERTIONS_EXPRESSIONS] + [-epsilon EPSILON] [-center_offset CENTER_OFFSET] + [-objv_names OBJECTIVES_NAMES] + [-objv_exprs OBJECTIVES_EXPRESSIONS] + [-scale_objv SCALE_OBJECTIVES] + [-pareto OPTIMIZE_PARETO] + [-frac_aprox APPROXIMATE_FRACTIONS] + [-frac_prec FRACTION_PRECISION] + [-vacuity VACUITY_CHECK] + [-opt_strategy OPTIMIZATION_STRATEGY] + [-solver SOLVER] [-solver_path SOLVER_PATH] + [-solver_logic SOLVER_LOGIC] + +options: + -h, --help show this help message and exit + -model, --model MODEL + Type of model to train (NN, Poly, ... [default: none] + -save_model, --save_model SAVE_MODEL + Should the trained models be saved for future use? + [default: True] + -use_model, --use_model USE_MODEL + Should the saved models be reused (and training + skipped)? [default: False] + -model_name, --model_name MODEL_NAME + Name of saved model. If not specified, the name is + defined as follows: filename_prefix + "_" + model_algo + + "_model_complete" + model_format where + filename_prefix is concatenation of the output + directory and the prefix identifying the run, + model_algo is the training algo name and model_format + is .h5 for nn_keras and .pkl for models trained using + sklearn and keras packages. + -save_model_config, --save_model_rerun_configuration SAVE_MODEL_RERUN_CONFIGURATION + Should a config file enabling to re-run a saved model + be written out? [default: True] + -model_per_response, --model_per_response MODEL_PER_RESPONSE + Should a separate model, possible with a different, + dedicated feature set, be built per response (as + opposite to building one multi-response + model)?[default: False] + -pred_plots, --prediction_plots PREDICTION_PLOTS + Should response distribution plots and plots comparing + response values in data with the predicted values be + generated? A related option interactive_plots controls + whether the generated plots should be displayed + interactively during runtime [default: True] + -nn_keras_layers, --nn_keras_layers NN_KERAS_LAYERS + specify number and sizes of the hidden layers of the + NN as non-empty, comma-separated list of positive + fractions in the number of input features in, e.g. + "0.5,0.25" specifies the second layer of half input + size, third layer of quarter input size (the input + layer has one node per input) [default: 2,1] + -nn_keras_epochs, --nn_keras_epochs NN_KERAS_EPOCHS + epochs for NN [default: 2000] + -nn_keras_batch, --nn_keras_batch_size NN_KERAS_BATCH_SIZE + batch_size for NN [default: not exposed] + -nn_keras_optimizer, --nn_keras_optimizer NN_KERAS_OPTIMIZER + optimizer for NN [default: adam] + -nn_keras_learning_rate, --nn_keras_learning_rate NN_KERAS_LEARNING_RATE + optimizer for NN [default: 0.001] + -nn_keras_loss, --nn_keras_loss_function NN_KERAS_LOSS_FUNCTION + The loss function for NN training convergence. + Possible options are: "mse" (MeanSquaredError), "mae" + (MeanAbsoluteError), "mspe" + (MeanAbsolutePercentageError) "msle" + (MeanSquaredLogarithmicError), "huber" (Huber), + "logcosh" (LogCosh) [default: mse] + -nn_keras_metrics, --nn_keras_metrics NN_KERAS_METRICS + The metrics for NN training convergence. Possible + options are: "rmse (RootMeanSquaredError), "mse" + (MeanSquaredError), "mae" (MeanAbsoluteError), "mspe" + (MeanAbsolutePercentageError) "msle" + (MeanSquaredLogarithmicError), "logcosh" + (LogCoshError), and "cosine" (CosineSimilarity) + [default: ['mse']] + -nn_keras_hid_activation, --nn_keras_hid_activation NN_KERAS_HID_ACTIVATION + hidden layer activation for NN [default: relu] + -nn_keras_out_activation, --nn_keras_out_activation NN_KERAS_OUT_ACTIVATION + output layer activation for NN [default: linear] + -nn_keras_seq_api, --nn_keras_sequential_api NN_KERAS_SEQUENTIAL_API + Should sequential api be used building NN layers or + should functional api be used instead? [default: True] + -nn_keras_weights_precision, --nn_keras_weights_precision NN_KERAS_WEIGHTS_PRECISION + Decimal precison (theat is, decimal points after the + dot) to use for rounding model weights (after a NN + model has been trained). The default value {} implies + that weight will not be rounded [default: linear] + -nn_keras_tuner, --nn_keras_tuner_algo NN_KERAS_TUNER_ALGO + NN Keras tuner algorithm to be invoked. Supported + options are hyperband (Hyperband), bayesian + (BayesianOptimization) and random (RandomSearch). The + option value None indicates that keras tuner will not + be invoked [default: None] + -nn_keras_layers_grid, --nn_keras_layers_grid NN_KERAS_LAYERS_GRID + Semicolon separated list of NN Keras layers + specifications, to be used by Keras tuner. Each such + specification itself is a comma separated list of + numbers, see the layers options for a detailed + description [default: None] + -nn_keras_batches_grid, --nn_keras_batches_grid NN_KERAS_BATCHES_GRID + Comma separated list of NN Keras batch sizes, to be + used by Keras tuner. [default: None] + -nn_keras_lrates_grid, --nn_keras_learning_rates_grid NN_KERAS_LEARNING_RATES_GRID + Comma separated list of NN Keras learning rates, to be + used by Keras tuner. [default: None] + -nn_keras_losses_grid, --nn_keras_loss_functions_grid NN_KERAS_LOSS_FUNCTIONS_GRID + Comma separated list of NN Keras loss functions, to be + used by Keras tuner. It can be a subset of loss + functions mse, mae, mape, msle, huber, logcosh. + [default: None] + -poly_sklearn_degree, --poly_sklearn_degree POLY_SKLEARN_DEGREE + Degree of the polynomial to train [default: 2] + -poly_sklearn_fit_intercept, --poly_sklearn_fit_intercept POLY_SKLEARN_FIT_INTERCEPT + Whether to calculate the intercept for this model. If + set to False, no intercept will be used in + calculations (i.e. data is expected to be centered). + [default: True] + -poly_sklearn_copy_X, --poly_sklearn_copy_X POLY_SKLEARN_COPY_X + If True, X will be copied; else, it may be + overwritten. [default: True] + -poly_sklearn_n_jobs, --poly_sklearn_n_jobs POLY_SKLEARN_N_JOBS + The number of jobs to use for the computation. This + will only provide speedup in case of sufficiently + large problems, that is if firstly n_targets > 1 and + secondly X is sparse or if positive is set to True. + None means 1 unless in a joblib.parallel_backend + context. -1 means using all processors [default: None] + -poly_sklearn_positive, --poly_sklearn_positive POLY_SKLEARN_POSITIVE + When set to True, forces the coefficients to be + positive. This option is only supported for dense + arrays. [default: False] + -dt_sklearn_splitter, --dt_sklearn_splitter DT_SKLEARN_SPLITTER + The strategy used to choose the split at each node. + Supported strategies are “best” to choose the best + split and “random” to choose the best random split + [default: best] + -dt_sklearn_max_features, --dt_sklearn_max_features DT_SKLEARN_MAX_FEATURES + The number of features to consider when looking for + the best split: If int, then consider max_features + features at each split. If float, max_features is a + fraction and max(1, int(max_features * + n_features_in_)) features are considered at each + split. If “sqrt”, then max_features=sqrt(n_features). + If “log2”, then max_features=log2(n_features). If + None, then max_features=n_features. [default: None] + -dt_sklearn_rand_state, --dt_sklearn_random_state DT_SKLEARN_RANDOM_STATE + Controls the randomness of the estimator. The features + are always randomly permuted at each split, even if + splitter is set to "best". When max_features < + n_features, the algorithm will select max_features at + random at each split before finding the best split + among them. But the best found split may vary across + different runs, even if max_features=n_features. That + is the case, if the improvement of the criterion is + identical for several splits and one split has to be + selected at random. To obtain a deterministic + behaviour during fitting, random_state has to be fixed + to an integer. [default: None] + -dt_sklearn_criterion, --dt_sklearn_criterion DT_SKLEARN_CRITERION + The function to measure the quality of a split. + Supported criteria are “squared_error” for the mean + squared error, which is equal to variance reduction as + feature selection criterion and minimizes the L2 loss + using the mean of each terminal node, “friedman_mse”, + which uses mean squared error with Friedman’s + improvement score for potential splits, + “absolute_error” for the mean absolute error, which + minimizes the L1 loss using the median of each + terminal node, and “poisson” which uses reduction in + Poisson deviance to find splits. Training using + “absolute_error” is slower than when using + “squared_error”. [default: squared_error] + -dt_sklearn_max_depth, --dt_sklearn_max_depth DT_SKLEARN_MAX_DEPTH + The maximum depth of the tree. If None, then nodes are + expanded until all leaves are pure or until all leaves + contain less than min_samples_split samples. [default: + None] + -dt_sklearn_min_samples_split, --dt_sklearn_min_samples_split DT_SKLEARN_MIN_SAMPLES_SPLIT + The minimum number of samples required to split an + internal node.If int, then consider min_samples_split + as the minimum number. If float, min_samples_split is + a fraction and ceil(min_samples_split * n_samples) is + the minimum number of samples for each split. + [default: 2] + -dt_sklearn_min_samples_leaf, --dt_sklearn_min_samples_leaf DT_SKLEARN_MIN_SAMPLES_LEAF + The minimum number of samples required to be at a leaf + node. If int, then consider min_samples_leaf as the + minimum number. If float, min_samples_leaf is a + fraction and ceil(min_samples_leaf * n_samples) is the + minimum number of samples for each node. [default: 1] + -dt_sklearn_min_weight_fraction_leaf, --dt_sklearn_min_weight_fraction_leaf DT_SKLEARN_MIN_WEIGHT_FRACTION_LEAF + The minimum weighted fraction of the sum total of + weights (of all the input samples) required to be at a + leaf node. Samples have equal weight when + sample_weight is not provided. [default: 0.0] + -dt_sklearn_max_leaf_nodes, --dt_sklearn_max_leaf_nodes DT_SKLEARN_MAX_LEAF_NODES + Grow a tree with max_leaf_nodes in best-first fashion. + Best nodes are defined as relative reduction in + impurity. If None then unlimited number of leaf nodes + [default: None] + -dt_sklearn_min_impurity_decrease, --dt_sklearn_min_impurity_decrease DT_SKLEARN_MIN_IMPURITY_DECREASE + A node will be split if this split induces a decrease + of the impurity greater than or equal to this value + N_t / N * (impurity - N_t_R / N_t * right_impurity - + N_t_L / N_t * left_impurity), where N is the total + number of samples, N_t is the number of samples at the + current node, N_t_L is the number of samples in the + left child, and N_t_R is the number of samples in the + right child. N, N_t, N_t_R and N_t_L all refer to the + weighted sum, if sample_weight is passed. [default: + 0.0] + -dt_sklearn_ccp_alpha, --dt_sklearn_ccp_alpha DT_SKLEARN_CCP_ALPHA + Complexity parameter used for Minimal Cost-Complexity + Pruning. The subtree with the largest cost complexity + that is smaller than ccp_alpha will be chosen. By + default, no pruning is performed. [default: 0.0] + -rf_sklearn_n_estimators, --rf_sklearn_n_estimators RF_SKLEARN_N_ESTIMATORS + The number of trees in the forest. [default: 100] + -rf_sklearn_max_features, --rf_sklearn_max_features RF_SKLEARN_MAX_FEATURES + The number of features to consider when looking for + the best split: If int, then consider max_features + features at each split. If float, max_features is a + fraction and max(1, int(max_features * + n_features_in_)) features are considered at each + split, where n_features_in_ is the number of features + seen during fit. If “sqrt”, then + max_features=sqrt(n_features). If “log2”, then + max_features=log2(n_features). If None or 1.0, then + max_features=n_features. [default: 1.0] + -rf_sklearn_bootstrap, --rf_sklearn_bootstrap RF_SKLEARN_BOOTSTRAP + Whether bootstrap samples are used when building + trees. If False, the whole dataset is used to build + each tree [default: True] + -rf_sklearn_verbose, --rf_sklearn_verbose RF_SKLEARN_VERBOSE + Controls the verbosity when fitting and predicting. + [default: 0] + -rf_sklearn_warm_start, --rf_sklearn_warm_start RF_SKLEARN_WARM_START + When set to True, reuse the solution of the previous + call to fit and add more estimators to the ensemble, + otherwise, just fit a whole new forest [default: + False] + -rf_sklearn_max_samples, --rf_sklearn_max_samples RF_SKLEARN_MAX_SAMPLES + If bootstrap is True, the number of samples to draw + from X to train each base estimator. If None + (default), then draw X.shape[0] samples. If int, then + draw max_samples samples.If float, then draw + max(round(n_samples * max_samples), 1) samples. Thus, + max_samples should be in the interval (0.0, 1.0]. + [default: None] + -rf_sklearn_rand_state, --rf_sklearn_random_state RF_SKLEARN_RANDOM_STATE + Controls both the randomness of the bootstrapping of + the samples used when building trees (if + bootstrap=True) and the sampling of the features to + consider when looking for the best split at each node + (if max_features < n_features). [default: None] + -rf_sklearn_criterion, --rf_sklearn_criterion RF_SKLEARN_CRITERION + The function to measure the quality of a split. + Supported criteria are “squared_error” for the mean + squared error, which is equal to variance reduction as + feature selection criterion and minimizes the L2 loss + using the mean of each terminal node, “friedman_mse”, + which uses mean squared error with Friedman’s + improvement score for potential splits, + “absolute_error” for the mean absolute error, which + minimizes the L1 loss using the median of each + terminal node, and “poisson” which uses reduction in + Poisson deviance to find splits. Training using + “absolute_error” is slower than when using + “squared_error”. [default: squared_error] + -rf_sklearn_max_depth, --rf_sklearn_max_depth RF_SKLEARN_MAX_DEPTH + The maximum depth of the tree. If None, then nodes are + expanded until all leaves are pure or until all leaves + contain less than min_samples_split samples. [default: + None] + -rf_sklearn_min_samples_split, --rf_sklearn_min_samples_split RF_SKLEARN_MIN_SAMPLES_SPLIT + The minimum number of samples required to split an + internal node.If int, then consider min_samples_split + as the minimum number. If float, min_samples_split is + a fraction and ceil(min_samples_split * n_samples) is + the minimum number of samples for each split. + [default: 2] + -rf_sklearn_min_samples_leaf, --rf_sklearn_min_samples_leaf RF_SKLEARN_MIN_SAMPLES_LEAF + The minimum number of samples required to be at a leaf + node. If int, then consider min_samples_leaf as the + minimum number. If float, min_samples_leaf is a + fraction and ceil(min_samples_leaf * n_samples) is the + minimum number of samples for each node. [default: 1] + -rf_sklearn_min_weight_fraction_leaf, --rf_sklearn_min_weight_fraction_leaf RF_SKLEARN_MIN_WEIGHT_FRACTION_LEAF + The minimum weighted fraction of the sum total of + weights (of all the input samples) required to be at a + leaf node. Samples have equal weight when + sample_weight is not provided. [default: 0.0] + -rf_sklearn_max_leaf_nodes, --rf_sklearn_max_leaf_nodes RF_SKLEARN_MAX_LEAF_NODES + Grow a tree with max_leaf_nodes in best-first fashion. + Best nodes are defined as relative reduction in + impurity. If None then unlimited number of leaf nodes + [default: None] + -rf_sklearn_min_impurity_decrease, --rf_sklearn_min_impurity_decrease RF_SKLEARN_MIN_IMPURITY_DECREASE + A node will be split if this split induces a decrease + of the impurity greater than or equal to this value + N_t / N * (impurity - N_t_R / N_t * right_impurity - + N_t_L / N_t * left_impurity), where N is the total + number of samples, N_t is the number of samples at the + current node, N_t_L is the number of samples in the + left child, and N_t_R is the number of samples in the + right child. N, N_t, N_t_R and N_t_L all refer to the + weighted sum, if sample_weight is passed. [default: + 0.0] + -rf_sklearn_ccp_alpha, --rf_sklearn_ccp_alpha RF_SKLEARN_CCP_ALPHA + Complexity parameter used for Minimal Cost-Complexity + Pruning. The subtree with the largest cost complexity + that is smaller than ccp_alpha will be chosen. By + default, no pruning is performed. [default: 0.0] + -et_sklearn_n_estimators, --et_sklearn_n_estimators ET_SKLEARN_N_ESTIMATORS + The number of trees in the forest. [default: 100] + -et_sklearn_max_features, --et_sklearn_max_features ET_SKLEARN_MAX_FEATURES + The number of features to consider when looking for + the best split: If int, then consider max_features + features at each split. If float, max_features is a + fraction and max(1, int(max_features * + n_features_in_)) features are considered at each + split, where n_features_in_ is the number of features + seen during fit. If “sqrt”, then + max_features=sqrt(n_features). If “log2”, then + max_features=log2(n_features). If None, then + max_features=n_features. [default: 1.0] + -et_sklearn_bootstrap, --et_sklearn_bootstrap ET_SKLEARN_BOOTSTRAP + Whether bootstrap samples are used when building + trees. If False, the whole dataset is used to build + each tree [default: True] + -et_sklearn_verbose, --et_sklearn_verbose ET_SKLEARN_VERBOSE + Controls the verbosity when fitting and predicting. + [default: 0] + -et_sklearn_warm_start, --et_sklearn_warm_start ET_SKLEARN_WARM_START + When set to True, reuse the solution of the previous + call to fit and add more estimators to the ensemble, + otherwise, just fit a whole new forest [default: + False] + -et_sklearn_max_samples, --et_sklearn_max_samples ET_SKLEARN_MAX_SAMPLES + If bootstrap is True, the number of samples to draw + from X to train each base estimator. If None + (default), then draw X.shape[0] samples. If int, then + draw max_samples samples.If float, then draw + max(round(n_samples * max_samples), 1) samples. Thus, + max_samples should be in the interval (0.0, 1.0]. + [default: None] + -et_sklearn_rand_state, --et_sklearn_random_state ET_SKLEARN_RANDOM_STATE + Used to pick randomly the max_features used at each + split. Note that the mere presence of random_state + doesn’t mean that randomization is always used, as it + may be dependent on another parameter, e.g. shuffle, + being set. [default: None] + -et_sklearn_criterion, --et_sklearn_criterion ET_SKLEARN_CRITERION + The function to measure the quality of a split. + Supported criteria are “squared_error” for the mean + squared error, which is equal to variance reduction as + feature selection criterion and minimizes the L2 loss + using the mean of each terminal node, “friedman_mse”, + which uses mean squared error with Friedman’s + improvement score for potential splits, + “absolute_error” for the mean absolute error, which + minimizes the L1 loss using the median of each + terminal node, and “poisson” which uses reduction in + Poisson deviance to find splits. Training using + “absolute_error” is slower than when using + “squared_error”. [default: squared_error] + -et_sklearn_max_depth, --et_sklearn_max_depth ET_SKLEARN_MAX_DEPTH + The maximum depth of the tree. If None, then nodes are + expanded until all leaves are pure or until all leaves + contain less than min_samples_split samples. [default: + None] + -et_sklearn_min_samples_split, --et_sklearn_min_samples_split ET_SKLEARN_MIN_SAMPLES_SPLIT + The minimum number of samples required to split an + internal node.If int, then consider min_samples_split + as the minimum number. If float, min_samples_split is + a fraction and ceil(min_samples_split * n_samples) is + the minimum number of samples for each split. + [default: 2] + -et_sklearn_min_samples_leaf, --et_sklearn_min_samples_leaf ET_SKLEARN_MIN_SAMPLES_LEAF + The minimum number of samples required to be at a leaf + node. If int, then consider min_samples_leaf as the + minimum number. If float, min_samples_leaf is a + fraction and ceil(min_samples_leaf * n_samples) is the + minimum number of samples for each node. [default: 1] + -et_sklearn_min_weight_fraction_leaf, --et_sklearn_min_weight_fraction_leaf ET_SKLEARN_MIN_WEIGHT_FRACTION_LEAF + The minimum weighted fraction of the sum total of + weights (of all the input samples) required to be at a + leaf node. Samples have equal weight when + sample_weight is not provided. [default: 0.0] + -et_sklearn_max_leaf_nodes, --et_sklearn_max_leaf_nodes ET_SKLEARN_MAX_LEAF_NODES + Grow a tree with max_leaf_nodes in best-first fashion. + Best nodes are defined as relative reduction in + impurity. If None then unlimited number of leaf nodes + [default: None] + -et_sklearn_min_impurity_decrease, --et_sklearn_min_impurity_decrease ET_SKLEARN_MIN_IMPURITY_DECREASE + A node will be split if this split induces a decrease + of the impurity greater than or equal to this value + N_t / N * (impurity - N_t_R / N_t * right_impurity - + N_t_L / N_t * left_impurity), where N is the total + number of samples, N_t is the number of samples at the + current node, N_t_L is the number of samples in the + left child, and N_t_R is the number of samples in the + right child. N, N_t, N_t_R and N_t_L all refer to the + weighted sum, if sample_weight is passed. [default: + 0.0] + -et_sklearn_ccp_alpha, --et_sklearn_ccp_alpha ET_SKLEARN_CCP_ALPHA + Complexity parameter used for Minimal Cost-Complexity + Pruning. The subtree with the largest cost complexity + that is smaller than ccp_alpha will be chosen. By + default, no pruning is performed. [default: 0.0] + -resp, --response RESPONSE + Names of response variables, must be provided [default + None] + -feat, --features FEATURES + Names of input features (can be computed from data) + [default None] + -keep_feat, --keep_features KEEP_FEATURES + Names of input features that should be used in model + training: feature selection or other heuristics for + selecting features that will be used in model training + cannot drop these input features [default []] + -new_data, --new_data NEW_DATA + Path excluding the .csv suffix to new data file + [default: None] + -data_scaler, --data_scaler DATA_SCALER + Should features and responses be scaled and with which + scaling optionton? Value "none" implies no scaling; + the only other supported option in "min_max" scaler + [default: min_max] + -scale_feat, --scale_features SCALE_FEATURES + Should features be scaled using scaler specified + through option "data_scaler"? [default: True] + -scale_resp, --scale_responses SCALE_RESPONSES + Should responses be scaled using scaler specified + through option "data_scaler"? [default: True] + -impute_resp, --impute_responses IMPUTE_RESPONSES + Should missing values in responses be imputed? Might + make sense when there are multiple responses and + different responses have missing values in different + samples: this might be a better alternative compared + to dropping rows where at least one response has a + missing value [default: False] + -split, --split_test SPLIT_TEST + Fraction in (0,1] of data samples to split from + training data for testing; when the option value is + 1,the dataset will be used both for training and + testing [default: 0.2] + -train_rand, --train_random_n TRAIN_RANDOM_N + Subset random n rows from training data to use for + training [default: 0] + -train_first, --train_first_n TRAIN_FIRST_N + Subset first n rows from training data to use for + training [default: 0] + -train_unif, --train_uniform_n TRAIN_UNIFORM_N + Subset random n rows from training data with close to + uniform distribution to use for training [default: 0] + -sw_coef, --sample_weights_coef SAMPLE_WEIGHTS_COEF + Coefficient in range ]-1, 1[ to compute sample weights + for model training; weights are defined as [sw_coef * + (v - mid_range) + 1 for v in resp_vals] where + resp_vals is the response value vector or vector of + mean values of all responses per sample, and mid_range + is the mid point of the range of resp_vals. The value + of sw_coef is chosen positive (resp. negative) when + one wants to assign higher weights to samples with + high (resp. low) values in resp_vals. As an example, + sw_coef = 0.2 assigns weight=1.2 to samples with + max(resp_vals) and weight=0.8 to samples with + min(resp_vals), and sw_coef = 0 implies weight=1 for + each sample [default: 0] + -sw_exp, --sample_weights_exponent SAMPLE_WEIGHTS_EXPONENT + The Exponent to compute sample weights for model + training; weights are defined as [sw_int + sw_coef + *((v - mn)/(mx-mn))**sw_exp for v in resp_vals ] where + resp_vals is the response value vector or vector of + mean values of all responses per sample, and mn and mx + are respectively the min and max of resp_vals. The + value of sw_coef is chosen non-negative to make sure + all weights are non-negative [default: 0] + -sw_int, --sample_weights_intercept SAMPLE_WEIGHTS_INTERCEPT + The intercept to compute sample weights for model + training; weights are defined as [sw_int + sw_coef + *((v - mn)/(mx-mn))**sw_exp for v in resp_vals ] where + resp_vals is the response value vector or vector of + mean values of all responses per sample, and mn and mx + are respectively the min and max of resp_vals. The + value of sw_coef is chosen non-negative to make sure + all weights are non-negative [default: 0] + -respmap, --response_map RESPONSE_MAP + Python expression with just one variable x to be + applied as lambda function to the values of each of + the responses, as part of preprocessing. This + transformation is applied before the transformation of + responses specified using option resp2b, and the user + responsibility to ensure these two transformations in + the described order achieve the right transformation + of the response columns [default: None] + -resp2b, --response_to_bool RESPONSE_TO_BOOL + Semicolon seperated list of conditions to be applied + to the responses in the order the responses are + specified, to convert them into binary responses as + part of data preprocessing. The conditions define when + each response is positive. Say a condition resp1 > 5 + transforms response called resp1 into a binary 1/0 + response that has value 1 for each data sample (row) + where resp1 is greater than 5 and value 0 for the + remaining samples [default: None] + -pos_val, --positive_value POSITIVE_VALUE + Value that represents positive values in a binary + categorical response in the original input data + (before any data processing has been applied) + [default: 1] + -neg_val, --negative_value NEGATIVE_VALUE + Value that represents negative values in a binary + categorical response in the original input data + (before any data processing has been applied) + [default: 0] + -resp_plots, --response_plots RESPONSE_PLOTS + Should response value distribution plots be genrated + during data processing? A related option + interactive_plots controls whether the generated plots + should be displayed interactively during runtime + [default: True] + -mrmr_pred, --mrmr_feat_count_for_prediction MRMR_FEAT_COUNT_FOR_PREDICTION + Count of features selected by MRMR algorithm for + predictive models [default: 15] + -mrmr_corr, --mrmr_feat_count_for_correlation MRMR_FEAT_COUNT_FOR_CORRELATION + Count of features selected by MRMR algorithm for + correlation analysis [default: 15] + -data, --labeled_data LABELED_DATA + Path, possibly excluding the .csv, or including gz or + bz2 suffix, to input training data file containing + labels [default None] + -mode, --analytics_mode ANALYTICS_MODE + What kind of analysis should be performed; the + supported modes are: "train", "predict", "subgroups", + "doe", "discretize", "optimize", "verify", "query", + "optsyn" [default: None] + -plots, --interactive_plots INTERACTIVE_PLOTS + Should plots be displayed interactively (or only + saved)?[default: True] + -seed, --seed SEED Initial random seed [default None] + -pref, --log_files_prefix LOG_FILES_PREFIX + String to be used as prefix for the output files + [default: None] + -out_dir, --output_directory OUTPUT_DIRECTORY + Output directory where all reports and output files + will be written [default: the same directory from + which data is loaded] + -save_config, --save_configuration SAVE_CONFIGURATION + Should tool run parameters be saved into a a + configuration file? [default: False] + -config, --load_configuration LOAD_CONFIGURATION + Json config file name, to load tool parameter values + from, or None. Paramters specified through command + line will override the correponding config file values + if they are specified there as well [default: None] + -log_level, --log_level LOG_LEVEL + The logger level or severity of the events they are + used to track. The standard levels are (in increasing + order of severity): notset, debug, info, warning, + error, critical; only events of this level and above + will be tracked [default warning] + -log_mode, --log_mode LOG_MODE + The logger filemode for logging into log file [default + w] + -log_time, --log_time LOG_TIME + Should time stamp be logged along with every message + issued by logger [default true] + -doe_algo, --doe_algo DOE_ALGO + Design of experiment (DOE) algorithm from doepy + package. The supported algorithms are: + "full_factorial, fractional_factorial, + plackett_burman, sukharev_grid, box_behnken, + box_wilson, latin_hypercube, latin_hypercube_sf, + halton_sequence, uniform_random_matrix" + -doe_factor_level_ranges, --doe_factor_level_ranges DOE_FACTOR_LEVEL_RANGES + A dictionary of levels per feature for building + experiments for all supported DOE algorithms. Here + experiments are lists feature-value assignments + [(feature_1, value_1),...,(feature_n, value_n)], and + they are rows of the matrix of experiments returned by + the supported DOE algorithms. The features are integer + features (thus the levels (values) are integers). The + keys in that dictionary are names of features and the + associated values are lists [val_1, .., val_k] from + which value for that feature are selected to build an + experiment. Example: + {"Pressure":[50,60,70],"Temperature":[290, 320, + 350],"Flow rate":[0.9,1.0]}. DOE algorithms that work + with two levels only treat these levels as the min and + max of the rage of a numeric variable. [default: None] + -doe_samples, --doe_num_samples DOE_NUM_SAMPLES + Number of samples (experiments) to be generated + [default: None] + -doe_resolution, --doe_design_resolution DOE_DESIGN_RESOLUTION + Desired design resolution. The resolution of a design + is defined as the length of the shortest word in the + defining relation. The resolution describes the level + of confounding between factors and interaction + effects, where higher resolution indicates lower + degree of confounding. For example, consider the + 2^4-1-design defined by gen = "a b c ab" The factor + "d" is defined by "ab" with defining relation I="abd", + where I is the unit vector. In this simple example the + shortest word is "abd" meaning that this is a + resolution III-design. In practice resolution III-, + IV- and V-designs are most commonly applied. * III: + Main effects may be confounded with two-factor + interactions. * IV: Main effects are unconfounded by + two-factor interactions, but two-factor interactions + may be confounded with each other. * V: Main effects + unconfounded with up to four-factor interactions, two- + factor interactions unconfounded with up to three- + factor interactions. Three-factor interactions may be + confounded with each other. [default: Half of the + total feature count in doe_factor_level_ranges] + -doe_spec, --doe_spec_file DOE_SPEC_FILE + File in csv format that specifies factor, level ranges + used for building design of experiment (DOE) samples + using function sample_doepy(). If not provided, a + dictionary of factor / level ranges must be supplied + to sample_doepy() directly instead of the file. + -doe_bb_centers, --doe_box_behnken_centers DOE_BOX_BEHNKEN_CENTERS + Number of center points to include in the final design + [default: 1] + -doe_cc_center, --doe_central_composite_center DOE_CENTRAL_COMPOSITE_CENTER + A 1-by-2 array of integers, the number of center + points in each block of the design. [default] + -doe_cc_alpha, --doe_central_composite_alpha DOE_CENTRAL_COMPOSITE_ALPHA + A string describing the effect of alpha has on the + variance. "alpha" can take on two values: "orthogonal" + or "o", and "rotatable" or "r" [default o] + -doe_cc_face, --doe_central_composite_face DOE_CENTRAL_COMPOSITE_FACE + The relation between the start points and the corner + (factorial) points. There are three options for this + input: 1. "circumscribed" or "ccc": This is the + original form of the central composite design. The + star points are at some distance "alpha" from the + center, based on the properties desired for the + design. The start points establish new extremes for + the low and high settings for all factors. These + designs have circular, spherical, or hyperspherical + symmetry and require 5 levels for each factor. + Augmenting an existing factorial or resolution V + fractional factorial design with star points can + produce this design. 2. "inscribed" or "cci": For + those situations in which the limits specified for + factor settings are truly limits, the CCI design uses + the factors settings as the star points and creates a + factorial or fractional factorial design within those + limits (in other words, a CCI design is a scaled down + CCC design with each factor level of the CCC design + divided by "alpha" to generate the CCI design). This + design also requires 5 levels of each factor. 3. + "faced" or "ccf": In this design, the star points are + at the center of each face of the factorial space, so + alpha" = 1. This variety requires 3 levels of each + factor. Augmenting an existing factorial or resolution + V design with appropriate star points can also produce + this design. [default 2,2] + -doe_prob_distr, --doe_prob_distribution DOE_PROB_DISTRIBUTION + Analytical probability distribution to be applied over + the randomized sampling. Takes strings: "Normal", + "Poisson", "Exponential", "Beta", "Gamma" [default + Normal] + -discr_algo, --discretization_algo DISCRETIZATION_ALGO + Discretization algorithm to use. The possible options + are: * "uniform": constracts constant-width bins; * + "quantile": uses the quantiles values to have equally + populated bins in each feature; * "kmeans": defines + bins based on a k-means clustering performed on each + feature independently; * "jenks": implements the + Fisher-Jenks Natural Breaks algorithm; * "ordinals": + converts the feature values into ordinals correponding + to the location of the correponding value in the + ascending sorted list of unique values in that + feature. * "ranks": converts the feature values into + ranks (ranks used in Spearman's rank correlation) + [default uniform] + -discr_bins, --discretization_bins DISCRETIZATION_BINS + Number of required bins in a discretization algorithm + [default 10] + -discr_labels, --discretization_labels DISCRETIZATION_LABELS + If true, string labels (e.g., "Bin2") will be used to + denote levels of the categorical feature resulting + from discretization; othewise integers (e.g., 2) will + be used to represent the levels [default True] + -discr_type, --discretization_type DISCRETIZATION_TYPE + The type of the categorical feature resulting from + discretization. Possible values are: * "object": the + feature will be of type "object" -- with strings as + values; * "category": the feature will be of pandas + type "category" -- with levels unordered; these + correspond to factors in statistics (and in R lamguage + terminology) * "ordered": the feature will be of + pandas type "category" -- with levels ordered; these + correspond to ordered factors in statistics (and in R + language terminology) * "integer": The feature will be + of type int, its values will be the resulting bin + numbers when enumerating the bins from left to right. + [default category] + -mi_method, --mutual_information_method MUTUAL_INFORMATION_METHOD + The mutual information method to be used when + computing feature correlation scores with responses. + Supported options are "shannon", "normalized", and + "adjusted", for Shannon's mutual information, for the + normalized mutual information, and the adjusted mutual + information, respectively; in addition, with the + option value "correlation", the mutual information is + computed from a correlation coefficient corr between + the feature and response using equation mi = -0.5 * + log(1 - (corr**2)), which is primarily useful for + computing mutual information for (preferably) normally + distributed continuous random variables [default: + normalized] + -corr_and_mi, --correlations_and_mutual_information CORRELATIONS_AND_MUTUAL_INFORMATION + Should correlation and mutual information between the + features and the response(s) be computed when + computing scores for feature selection and ranking + [default: True] + -discret_num, --discretize_numeric_features DISCRETIZE_NUMERIC_FEATURES + The mutual information method to be used for + discretizing numeric features, when computing feature + correlation scores with responses [default: None] + -cont_est, --continuous_correlation_estimators CONTINUOUS_CORRELATION_ESTIMATORS + Correlation estimators for continuous features, to be + used in correlation, mutual information and MRMR + feature selection algorithms. The options are pearson, + spearman, kendall and frequency, and any subset of + these specified thru a comma-separated string. In + addition, the value "all" indicates that all the + options should be used and value "none" indicates that + no options should be used. [default: pearson,spearman] + -psg_quality, --psg_quality_target PSG_QUALITY_TARGET + Quality function (quality target/measure) used for + defining the importance criterion for range selction. + The supported options (both for numeric as well as + binary responses) are {} and {} [default Lift] + -psg_dim, --psg_max_dimension PSG_MAX_DIMENSION + Maximal dimension of selected range tuples (feature- + range tuples) [default 3] + -psg_top, --psg_top_ranked PSG_TOP_RANKED + Required count of selected range tuples (feature-range + tuples) [default 15] + -spec, --spec SPEC Name of spec file including full path, must be + provided [default None] + -delta_rel, --delta_relative DELTA_RELATIVE + exclude (1+DELTA)*radius region for non-grid + components [default: 0.01] + -delta_abs, --delta_absolute DELTA_ABSOLUTE + exclude (1+DELTA)*radius region for non-grid + components [default: 0.0] + -rad_rel, --radius_relative RADIUS_RELATIVE + Relative radius, in terms of percentage of the value + of the knob to which it applies to compute the + absolute radius to be used in theta (stability) + constraint. Overrides relative radius value specified + in the spec file [default: None] + -rad_abs, --radius_absolute RADIUS_ABSOLUTE + Absolute value of radius to be used in theta + (stability) constraint. Override relative radius value + specified in the spec file [default: None] + -alpha, --alpha ALPHA + constraints on model inputs (free inputs or + configuration knobs) [default: None] + -beta, --beta BETA constraints on model outputs, relevant for "optimize" + mode only (when selecting model configuration that are + safe and near-optimal) [default: None] + -eta, --eta ETA global constraints on/accross knobs that define legal + configurations of knobs during search for optimal + configurations in "optimize" and "optsyn" modes + [default: None] + -compress_rules, --compress_rules COMPRESS_RULES + Should rules that represent tree branches be + compressed to eliminate redundant repeated splitting + of ranges of model features after training tree based + models, in order to build smaller model terms? + [default True] + -simplify_terms, --simplify_terms SIMPLIFY_TERMS + Should terms be simplified using before building + solver instance in model exploration modes? [default + False] + -tree_encoding, --tree_encoding TREE_ENCODING + Method to encode tree model to solvers. Can be "flat", + "nested", or "branched". The flat encoding creates a + formula from each branch of a tree, while the nested + encoding builds formula from branches using nested if- + then-else (ite) expressions. The branched encoding + also uses ite expressions and in addition the branch + conditions in ite expressions are shared across all + responses [default nested] + -nnet_encoding, --nnet_encoding NNET_ENCODING + Method to encode Keras Neural Nets model to solvers. + Can be "layered" or "nested". The layered encoding + creates a formula from each internal node of the NN + with nodes in the previous layer as inputs, while + nested encoding builds a monolithic term for each + response representing the function for that response + [default nested] + -trace_runtime, --trace_runtime TRACE_RUNTIME + Should trace include solver runtimes and what + precision to use in terms of number of decimal points + after 0; the option value 0 means to not include the + runtimes in the trace [default 0] + -trace_prec, --trace_precision TRACE_PRECISION + Decimals after 0 to use when rounding fractions; + option value 0 means to use fractions (implying no + rounding) [default: 0] + -trace_anonym, --trace_anonymize TRACE_ANONYMIZE + Should anonymized names of system inputs, knobs and + outputs be uses in trace log file?[default: False] + -quer_names, --query_names QUERY_NAMES + Names of optimization objectives [default None] + -quer_exprs, --query_expressions QUERY_EXPRESSIONS + Semicolon seperated list of expressions (functions) to + be applied to the responses to convert them into + optimization objectives [default: None] + -lemma_prec, --lemma_precision LEMMA_PRECISION + Number of decimals after zero to use when + approximating lemmas in model exploration modes. The + default value 0 means that lemmas should not be + approximated (full precision should be used [default: + 0] + -asrt_names, --assertions_names ASSERTIONS_NAMES + Names of optimization objectives [default None] + -asrt_exprs, --assertions_expressions ASSERTIONS_EXPRESSIONS + Semicolon seperated list of expressions (functions) to + be applied to the responses to convert them into + optimization objectives [default: None] + -epsilon, --epsilon EPSILON + ratio of the length of an estimated range of an + objective, computed per objective based on its + estimated min and max bounds [default: 0.05] + -center_offset, --center_offset CENTER_OFFSET + Center threshold offset of threshold [default: 0] + -objv_names, --objectives_names OBJECTIVES_NAMES + Names of optimization objectives [default None] + -objv_exprs, --objectives_expressions OBJECTIVES_EXPRESSIONS + Semicolon seperated list of expressions (functions) to + be applied to the responses to convert them into + optimization objectives [default: None] + -scale_objv, --scale_objectives SCALE_OBJECTIVES + Should optimization objectives be scaled using scaler + specified through option "data_scaler"? [default: + True] + -pareto, --optimize_pareto OPTIMIZE_PARETO + Should optimization be per objective (even if there + are multiple objectives) or pareto optimization must + be performed? [default: True] + -frac_aprox, --approximate_fractions APPROXIMATE_FRACTIONS + Should fraction values form satisfying assignments be + converted to approximate reals? [default: True] + -frac_prec, --fraction_precision FRACTION_PRECISION + Decimal precision when approximating fractions by + reals [default 64] + -vacuity, --vacuity_check VACUITY_CHECK + Should solver problem instance vacuity check be + performed? Vacuity checks whether the constraints are + consistent and therefore at least one satisfiable + assignment exist to solver constraints. Relevant in + "verify", "query", "optimize" and "optsyn" modes + [default: True] + -opt_strategy, --optimization_strategy OPTIMIZATION_STRATEGY + Strategy (algorithm) to use for single objective + optimization in the "optimize" and "optsyn" modes. + Supported options are "lazy" and "eager" [default + eager] + -solver, --solver SOLVER + Solver to use in model exploration modes "verify," + "query", "optimize" and "optsyn". [default: z3] + -solver_path, --solver_path SOLVER_PATH + Path to solver to use in model exploration modes + "verify," "query", "optimize" and "optsyn". [default: + None] + -solver_logic, --solver_logic SOLVER_LOGIC + SMT2-lib theory with respect to which to solve model + exploration task at hand, in modes "verify," "query", + "optimize" and "optsyn". [default: ALL] diff --git a/docker/python3.13/run_mathsat_build b/docker/python3.13/run_mathsat_build new file mode 100755 index 00000000..fb276366 --- /dev/null +++ b/docker/python3.13/run_mathsat_build @@ -0,0 +1,8 @@ +#!/usr/bin/tcsh -f +set mathsat=mathsat-5.6.8-linux-x86_64-reentrant +set mathsat_bin_dir=`realpath $0 | xargs dirname`/external/$mathsat/bin +mkdir -p $mathsat_bin_dir +wget --tries=5 --timeout=30 --waitretry=2 https://mathsat.fbk.eu/release/${mathsat}.tar.gz -O /tmp/${mathsat}.tar.gz +cd /tmp +tar -xvf ${mathsat}.tar.gz >& ${mathsat}.tar.log +cp -p $mathsat/bin/mathsat $mathsat_bin_dir diff --git a/docker/python3.13/run_meson_build b/docker/python3.13/run_meson_build new file mode 100755 index 00000000..9c27d0e7 --- /dev/null +++ b/docker/python3.13/run_meson_build @@ -0,0 +1,32 @@ +#!/bin/bash +set -e # Exit on error + +# Get the directory where this script is located +build_dir="build" +output_dir="$HOME/.local_smlp" +kay_dir="$output_dir/kay" +smlp_dir="$output_dir/smlp" + +# Clean up existing directories +rm -rf "/app/smlp/utils/poly/$build_dir" "$output_dir" 2>/dev/null || true + +# Clone the kay repository +git clone https://github.com/fbrausse/kay "$kay_dir" + +# Clone the smlp repository +git clone https://github.com/SMLP-Systems/smlp +cd /app/smlp + +if [ $(git branch -r --list origin/smlp_python313) ]; then + git switch smlp_python313 +fi + +# Run meson setup +cd /app/smlp/utils/poly +python3.13 -m mesonbuild.mesonmain setup \ + --wipe \ + -Dkay-prefix="$kay_dir" \ + --prefix "$output_dir" "$build_dir" + +/usr/bin/ninja -C build install +cp -rp $HOME/.local_smlp/lib/python3/dist-packages/smlp /usr/local/lib/python3.13/dist-packages diff --git a/docker/python3.13/run_python_boost_build b/docker/python3.13/run_python_boost_build new file mode 100644 index 00000000..5c699318 --- /dev/null +++ b/docker/python3.13/run_python_boost_build @@ -0,0 +1,11 @@ +#!/usr/bin/tcsh -f +wget --tries=5 --timeout=30 --waitretry=2 https://archives.boost.io/release/1.83.0/source/boost_1_83_0.tar.gz -O /tmp/boost_1_83_0.tar.gz +cd /tmp +tar -xvf boost_1_83_0.tar.gz >& boost_1_83_0.tar.log +cd boost_1_83_0 +./bootstrap.sh --with-python=/usr/bin/python3.13 --with-libraries=python +./b2 install --prefix=$HOME/boost_py313 --with-python python=3.13 +set arch=`uname -m` +cp -p $HOME/boost_py313/lib/libboost_python313.a /usr/lib/{arch}-linux-gnu +cp -p $HOME/boost_py313/lib/libboost_python313.so.1.83.0 /usr/lib/${arch}-linux-gnu +ln -s /usr/lib/${arch}-linux-gnu/{libboost_python313.so.1.83.0,libboost_python313.so} diff --git a/docker/python3.13/run_tkdiff_patch b/docker/python3.13/run_tkdiff_patch new file mode 100755 index 00000000..92f9ce21 --- /dev/null +++ b/docker/python3.13/run_tkdiff_patch @@ -0,0 +1,2 @@ +#!/usr/bin/tcsh -f +sed -i.bak 's@set factor \[expr {$mapheight / $lines}\]@set factor [expr {$lines > 0 ? $mapheight / double($lines) : 1.0}]@' /usr/bin/tkdiff diff --git a/docker/python3.13/start_vnc b/docker/python3.13/start_vnc new file mode 100755 index 00000000..9e344393 --- /dev/null +++ b/docker/python3.13/start_vnc @@ -0,0 +1,18 @@ +#!/usr/bin/tcsh -f +if($?VNC_SCREEN_RESOLUTION) then + set screen_resolution=$VNC_SCREEN_RESOLUTION +else + set screen_resolution=1280x1024 +endif +Xvfb $DISPLAY -screen 0 ${screen_resolution}x16 >& Xvfb.log & +sleep 2 +set d=`echo $DISPLAY | tr -d :` +@ p = $d + 5801 +x11vnc -display $DISPLAY -forever -nopw -shared -rfbport $p >& x11nc.log & +autocutsel -selection CLIPBOARD -fork +autocutsel -selection PRIMARY -fork +if(`xset q >& /dev/null ; echo $status` == 0) then + echo "VNC server has started successfully with screen resolution $screen_resolution" +else + echo "VNC server failed - GUI won't work" +endif diff --git a/docker/python3.13/tkagg_patch.sh b/docker/python3.13/tkagg_patch.sh new file mode 100755 index 00000000..e376a1d9 --- /dev/null +++ b/docker/python3.13/tkagg_patch.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash +sed -i.bak '/^from/iif os.path.exists("\/.dockerenv"): \ + print("Script is running inside a Docker container.") \ + import matplotlib \ + matplotlib.use("TkAgg") \ + import matplotlib.pyplot as plt\n' /app/smlp/src/run_smlp.py diff --git a/manylinux_2_28/repair_wheel.py b/manylinux_2_28/repair_wheel.py new file mode 100644 index 00000000..eee53262 --- /dev/null +++ b/manylinux_2_28/repair_wheel.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +""" +Run auditwheel repair on the smlp wheel in dist/ to produce a +self-contained manylinux wheel with all .so dependencies bundled. + +Usage: + python3 repair_wheel.py [dist_dir] [--plat PLATFORM] + +dist_dir defaults to 'dist/'. +--plat defaults to 'manylinux_2_28_x86_64'. +""" +import sys +import subprocess +import argparse +from pathlib import Path + + +def _find_auditwheel_location() -> str | None: + result = subprocess.run( + [sys.executable, "-m", "pip", "show", "auditwheel"], + capture_output=True, text=True + ) + if result.returncode == 0: + for line in result.stdout.splitlines(): + if line.startswith("Location:"): + return line.split(":", 1)[1].strip() + + user_site = ( + Path.home() / ".local" / "lib" + / f"python{sys.version_info.major}.{sys.version_info.minor}" + / "site-packages" + ) + if (user_site / "auditwheel").exists(): + return str(user_site) + + return None + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("dist_dir", nargs="?", default="dist") + parser.add_argument("--plat", default="manylinux_2_28_x86_64", + help="Target platform tag (default: manylinux_2_28_x86_64)") + args = parser.parse_args() + + dist_dir = Path(args.dist_dir) + + location = _find_auditwheel_location() + if not location: + print("ERROR: auditwheel not found.") + print("Install with: python3 -m pip install auditwheel patchelf") + sys.exit(1) + + wheels = sorted(dist_dir.glob("smlp-*linux_x86_64.whl"), key=lambda p: p.stat().st_mtime) + if not wheels: + print(f"ERROR: No linux_x86_64 wheel found in {dist_dir}/") + sys.exit(1) + + wheel = wheels[-1] + print(f"Repairing {wheel} with platform {args.plat} ...") + subprocess.check_call([ + sys.executable, "-c", + f"import sys; sys.path.insert(0, {location!r}); " + f"from auditwheel.main import main; sys.exit(main())", + "repair", str(wheel), "--plat", args.plat, "-w", str(dist_dir) + ]) + print(f"Done. manylinux wheel saved to {dist_dir}/") + + +if __name__ == "__main__": + main() diff --git a/manylinux_2_28/setup.py b/manylinux_2_28/setup.py new file mode 100644 index 00000000..a67175a4 --- /dev/null +++ b/manylinux_2_28/setup.py @@ -0,0 +1,919 @@ +""" +setup.py for the smlp package. + +System prerequisites (require sudo, install once) +-------------------------------------------------- + sudo apt install gcc g++ git make m4 pkg-config + +User prerequisites (no sudo, install once) +------------------------------------------ + python3.13 -m pip install --user meson ninja z3-solver + +Build flow +---------- +1. Boost.Python 1.83 is compiled from source for Python 3.13 and cached in + ~/.local/boost_py313 (or the path in $BOOST_CACHE_DIR). + The build is skipped on subsequent runs if the cache directory already + contains the marker file .built_for_python. + Set $BOOST_ROOT to point at an existing Boost prefix to skip this step + entirely. + +2. The 'kay' C++ dependency is cloned from GitHub into the pip build-temp + directory (or reused from $KAY_DIR). + +3. `meson setup` + `ninja install` is run inside utils/poly/ of the + repository this setup.py lives in. No repo cloning is performed; + setup.py is expected to be in the root of the smlp checkout. + +4. The installed smlp extension package is copied into the wheel. + +Environment variables +--------------------- +BOOST_ROOT Reuse an existing Boost prefix – skips download + compile. + e.g. export BOOST_ROOT=~/.local/boost_py313 +BOOST_CACHE_DIR Where to cache the compiled Boost (default: ~/.local/boost_py313). +BOOST_VERSION Boost version to download (default: 1.83.0). +KAY_DIR Reuse an existing kay checkout. +GMP_ROOT Reuse an existing GMP prefix – skips download + compile. + e.g. export GMP_ROOT=~/.local/gmp +GMP_CACHE_DIR Where to cache compiled GMP (default: ~/.local/gmp). +GMP_VERSION GMP version to download (default: 6.3.0). +Z3_PREFIX Reuse an existing Z3 install prefix – skips pip z3-solver. + e.g. export Z3_PREFIX=~/.local/z3 +Z3_VERSION Z3 version to download binary for (default: 4.8.12). +Z3_BIN_DIR Path to directory containing z3 binary (default: ~/.local/z3/bin). +SMLP_BRANCH Git branch to switch to in the smlp repo (auto-detected if unset). +""" + +import os +import platform +import shutil +import subprocess +import sys +import tarfile +import urllib.request +from pathlib import Path + +from setuptools import setup +from setuptools.command.build_ext import build_ext as _build_ext + + +# --------------------------------------------------------------------------- +# Constants / defaults +# --------------------------------------------------------------------------- + +BOOST_VERSION = os.environ.get("BOOST_VERSION", "1.83.0") +BOOST_CACHE_DIR = Path( + os.environ.get("BOOST_CACHE_DIR", Path.home() / ".local" / "boost_py313") +).expanduser() + +# Default Z3_PREFIX: where z3-solver installs its lib/libz3.so +# This is the standard location when installed via: +# python3.13 -m pip install --user z3-solver +Z3_DEFAULT_PREFIX = ( + Path.home() / ".local" / "lib" / f"python{sys.version_info.major}.{sys.version_info.minor}" + / "site-packages" / "z3" +) + +GMP_VERSION = os.environ.get("GMP_VERSION", "6.3.0") +GMP_CACHE_DIR = Path( + os.environ.get("GMP_CACHE_DIR", Path.home() / ".local" / "gmp") +).expanduser() + +Z3_VERSION = os.environ.get("Z3_VERSION", "4.8.12") +Z3_BIN_DIR = Path( + os.environ.get("Z3_BIN_DIR", Path.home() / ".local" / "z3" / "bin") +).expanduser() + +# Root of this repository (where setup.py lives) +REPO_ROOT = Path(__file__).parent.resolve() + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _run(cmd, **kwargs): + print(f"[smlp build] $ {' '.join(str(c) for c in cmd)}") + result = subprocess.run([str(c) for c in cmd], **kwargs) + if result.returncode != 0: + # Print captured output if any + if hasattr(result, "stdout") and result.stdout: + print(result.stdout) + if hasattr(result, "stderr") and result.stderr: + print(result.stderr, file=sys.stderr) + raise subprocess.CalledProcessError(result.returncode, cmd) + + +def _verify_tarball(path: Path) -> bool: + """Return True if the tarball can be opened and is not truncated.""" + try: + with tarfile.open(path) as tf: + # Read all members to detect truncation + tf.getmembers() + return True + except Exception: + return False + + +def _download(url: str, dest: Path, retries: int = 5) -> None: + """Download a file with retries, verifying integrity after each attempt.""" + import time + + # Remove any existing file — it may be a corrupt partial download + if dest.exists(): + if _verify_tarball(dest): + print(f"[smlp build] Using verified cached tarball {dest}") + return + else: + print(f"[smlp build] Removing corrupt cached tarball {dest}") + dest.unlink() + + for attempt in range(1, retries + 1): + try: + print(f"[smlp build] Downloading {url} (attempt {attempt}/{retries}) ...") + urllib.request.urlretrieve(url, dest) + if _verify_tarball(dest): + print(f"[smlp build] Download verified OK.") + return + else: + print(f"[smlp build] Download corrupt, retrying ...") + dest.unlink() + except Exception as e: + print(f"[smlp build] Download failed: {e}") + if dest.exists(): + dest.unlink() + if attempt < retries: + wait = 2 ** attempt + print(f"[smlp build] Retrying in {wait}s ...") + time.sleep(wait) + sys.exit(f"[smlp build] ERROR: failed to download {url} after {retries} attempts.") + + +def _meson_bin(build_tmp: Path) -> list[str]: + """ + Write a meson wrapper script and return the command to invoke it. + + The wrapper explicitly adds the meson install location to sys.path, + so it works in pip's isolated build environment where user site-packages + is not on sys.path. Meson stores the wrapper path in the build dir and + reuses it for internal calls like `meson install`, so it must be a real + executable file — not a -c string. + """ + # Find where mesonbuild is installed via pip show + mesonbuild_location = None + result = subprocess.run( + [sys.executable, "-m", "pip", "show", "meson"], + capture_output=True, text=True + ) + if result.returncode == 0: + for line in result.stdout.splitlines(): + if line.startswith("Location:"): + mesonbuild_location = line.split(":", 1)[1].strip() + break + + # Fallback: check user site-packages directly + if not mesonbuild_location: + user_site = ( + Path.home() / ".local" / "lib" + / f"python{sys.version_info.major}.{sys.version_info.minor}" + / "site-packages" + ) + if (user_site / "mesonbuild").exists(): + mesonbuild_location = str(user_site) + + # Fallback: find mesonbuild via importlib (works when on sys.path) + if not mesonbuild_location: + import importlib.util + spec = importlib.util.find_spec("mesonbuild") + if spec and spec.submodule_search_locations: + mesonbuild_location = str(Path(list(spec.submodule_search_locations)[0]).parent) + + # Fallback: use meson binary directly from PATH + if not mesonbuild_location: + from shutil import which + meson_bin = which("meson") + if meson_bin: + print(f"[smlp build] Using meson from PATH: {meson_bin}") + return [meson_bin] + + if not mesonbuild_location: + raise RuntimeError( + f"[smlp build] meson not found. Run: python{sys.version_info.major}.{sys.version_info.minor} -m pip install meson" + ) + + print(f"[smlp build] meson location: {mesonbuild_location}") + + # Write a wrapper script with a proper shebang so Meson can store and + # reuse its path for internal calls (meson install, meson test, etc.) + wrapper = build_tmp / "meson" + wrapper.write_text( + f"#!/usr/bin/env {sys.executable}\n" + "import sys\n" + f"sys.path.insert(0, {mesonbuild_location!r})\n" + "from mesonbuild.mesonmain import main\n" + "sys.exit(main())\n" + ) + wrapper.chmod(0o755) + print(f"[smlp build] Using meson wrapper: {wrapper}") + return [str(wrapper)] + + +def _ninja_bin() -> str: + """ + Resolve the ninja binary, preferring user-space installs over system ones. + + Search order: + 1. The 'ninja' PyPI package (pip install ninja → /bin/ninja) + 2. ~/.local/bin/ninja (pip install --user ninja) + 3. PATH (last resort — may find /usr/bin/ninja) + """ + import importlib.util + from shutil import which + + # ── 1. pip ninja package ───────────────────────────────────────────── + spec = importlib.util.find_spec("ninja") + if spec is not None: + try: + import ninja as _ninja_pkg # type: ignore + candidate = Path(_ninja_pkg.BIN_DIR) / "ninja" + if candidate.exists(): + print(f"[smlp build] Using pip ninja: {candidate}") + return str(candidate) + except Exception: + pass + + # ── 2. ~/.local/bin (pip install --user) ───────────────────────────── + user_ninja = Path.home() / ".local" / "bin" / "ninja" + if user_ninja.exists(): + print(f"[smlp build] Using user ninja: {user_ninja}") + return str(user_ninja) + + # ── 3. PATH fallback ───────────────────────────────────────────────── + found = which("ninja") + if found: + print(f"[smlp build] Using ninja from PATH: {found}") + return found + + raise RuntimeError( + "[smlp build] ninja not found. Run: pip install ninja" + ) + + +# --------------------------------------------------------------------------- +# Step 1 – Boost.Python (compiled from source, cached in user-space) +# --------------------------------------------------------------------------- + +def _boost_prefix() -> Path: + """ + Return the Boost install prefix, building from source if necessary. + + Search order: + 1. $BOOST_ROOT env var → use as-is, no build + 2. BOOST_CACHE_DIR marker → cache hit, skip build + 3. Download + compile into BOOST_CACHE_DIR + """ + # ── Option A: caller supplied an existing prefix ────────────────────── + env_root = os.environ.get("BOOST_ROOT") + if env_root: + prefix = Path(env_root).expanduser() + print(f"[smlp build] Using BOOST_ROOT={prefix}") + return prefix + + # ── Option B: cached build already present ──────────────────────────── + py_tag = f"python{sys.version_info.major}{sys.version_info.minor}" + tag_file = BOOST_CACHE_DIR / f".built_for_{py_tag}" + if tag_file.exists(): + print(f"[smlp build] Boost cache found at {BOOST_CACHE_DIR}, skipping build.") + return BOOST_CACHE_DIR + + # ── Option C: download + compile into user-space cache ──────────────── + ver_flat = BOOST_VERSION.replace(".", "_") + tarball_name = f"boost_{ver_flat}.tar.gz" + url = f"https://archives.boost.io/release/{BOOST_VERSION}/source/{tarball_name}" + + # Temporary directory for download + extraction (sibling of cache dir) + tmp_dir = BOOST_CACHE_DIR.parent / "_boost_build_tmp" + tmp_dir.mkdir(parents=True, exist_ok=True) + + tarball_path = tmp_dir / tarball_name + if not tarball_path.exists(): + print(f"[smlp build] Downloading Boost {BOOST_VERSION} ...") + _download(url, tarball_path) + else: + print(f"[smlp build] Using cached tarball {tarball_path}") + + print(f"[smlp build] Extracting {tarball_name} ...") + with tarfile.open(tarball_path) as tf: + tf.extractall(tmp_dir) + + src = tmp_dir / f"boost_{ver_flat}" + + print(f"[smlp build] Bootstrapping Boost (python={sys.executable}) ...") + _run( + ["./bootstrap.sh", + f"--with-python={sys.executable}", + "--with-libraries=python"], + cwd=str(src), + ) + + BOOST_CACHE_DIR.mkdir(parents=True, exist_ok=True) + print(f"[smlp build] Compiling Boost → {BOOST_CACHE_DIR} (this takes a few minutes) ...") + py_ver = f"{sys.version_info.major}.{sys.version_info.minor}" + py_inc = subprocess.check_output( + [sys.executable, "-c", + "import sysconfig; print(sysconfig.get_path('include'))"], + text=True + ).strip() + + # Write a user-config.jam that tells Boost.Python exactly which Python + # to use and disables linking against libpython (required for manylinux + # where libpython.so does not exist). + user_config = src / "user-config.jam" + user_config.write_text( + f"using python : {py_ver} : {sys.executable} : {py_inc} : ;\n" + ) + + _run( + ["./b2", "install", + f"--prefix={BOOST_CACHE_DIR}", + "--with-python", + f"python={py_ver}"], + cwd=str(src), + ) + + # Leave a marker so we skip the build on the next pip install + tag_file.touch() + + # Remove the source + tarball; keep only the install + shutil.rmtree(tmp_dir, ignore_errors=True) + + print(f"[smlp build] Boost built and cached at {BOOST_CACHE_DIR}") + return BOOST_CACHE_DIR + + + +def _boost_env(prefix: Path) -> dict: + """ + Environment variables for meson/ninja so the user-space Boost is found + without touching /usr/lib. + """ + lib_dir = prefix / "lib" + inc_dir = prefix / "include" + + env = os.environ.copy() + env["BOOST_ROOT"] = str(prefix) + env["BOOST_INCLUDEDIR"] = str(inc_dir) + env["BOOST_LIBRARYDIR"] = str(lib_dir) + + # Force Meson to use the same Python that is running this build script, + # preventing it from falling back to the system Python 3.12. + env["PYTHON"] = sys.executable + env["PYTHON3"] = sys.executable + + # Tell Meson the exact versioned Boost.Python library name, + # e.g. Python 3.13 → boost_python313, Python 3.11 → boost_python311 + py_ver = f"{sys.version_info.major}{sys.version_info.minor}" + env["BOOST_PYTHON_LIBNAME"] = f"boost_python{py_ver}" + + existing_ld = env.get("LD_LIBRARY_PATH", "") + env["LD_LIBRARY_PATH"] = f"{lib_dir}:{existing_ld}" if existing_ld else str(lib_dir) + + pkgconfig = lib_dir / "pkgconfig" + existing_pkg = env.get("PKG_CONFIG_PATH", "") + env["PKG_CONFIG_PATH"] = f"{pkgconfig}:{existing_pkg}" if existing_pkg else str(pkgconfig) + + return env + + +def _add_z3_to_env(env: dict, z3_lib: Path) -> dict: + """Prepend the z3-solver lib/bin directories to the relevant env vars.""" + existing_ld = env.get("LD_LIBRARY_PATH", "") + env["LD_LIBRARY_PATH"] = f"{z3_lib}:{existing_ld}" if existing_ld else str(z3_lib) + + existing_pkg = env.get("PKG_CONFIG_PATH", "") + env["PKG_CONFIG_PATH"] = f"{z3_lib}:{existing_pkg}" if existing_pkg else str(z3_lib) + + # Add z3 binary to PATH so meson can find the solver executable + z3_bin = z3_lib.parent / "bin" + existing_path = env.get("PATH", os.environ.get("PATH", "")) + env["PATH"] = f"{z3_bin}:{existing_path}" if existing_path else str(z3_bin) + + return env + + +def _add_gmp_to_env(env: dict, gmp_prefix: Path) -> dict: + """Prepend the GMP lib/include directories to the relevant env vars.""" + gmp_lib = gmp_prefix / "lib" + gmp_inc = gmp_prefix / "include" + + existing_ld = env.get("LD_LIBRARY_PATH", "") + env["LD_LIBRARY_PATH"] = f"{gmp_lib}:{existing_ld}" if existing_ld else str(gmp_lib) + + pkgconfig = gmp_lib / "pkgconfig" + existing_pkg = env.get("PKG_CONFIG_PATH", "") + env["PKG_CONFIG_PATH"] = f"{pkgconfig}:{existing_pkg}" if existing_pkg else str(pkgconfig) + + existing_cpp = env.get("CPPFLAGS", "") + env["CPPFLAGS"] = f"-I{gmp_inc} {existing_cpp}".strip() + + existing_ld_flags = env.get("LDFLAGS", "") + env["LDFLAGS"] = f"-L{gmp_lib} {existing_ld_flags}".strip() + + return env + + +# --------------------------------------------------------------------------- +# Step 2 – kay dependency +# --------------------------------------------------------------------------- + +def _ensure_kay(build_tmp: Path) -> Path: + kay_env = os.environ.get("KAY_DIR") + if kay_env: + kay_dir = Path(kay_env).expanduser() + print(f"[smlp build] Using existing kay at {kay_dir}") + return kay_dir + + kay_dir = build_tmp.resolve() / "kay" + if kay_dir.exists(): + print(f"[smlp build] Reusing kay clone at {kay_dir}") + else: + _run(["git", "clone", "https://github.com/fbrausse/kay", str(kay_dir)]) + return kay_dir + + +# --------------------------------------------------------------------------- +# Step 1c – GMP (compiled from source, cached in user-space) +# --------------------------------------------------------------------------- + +def _write_gmp_pc(prefix: Path) -> None: + """ + Write a gmp.pc pkg-config file into /lib/pkgconfig/. + GMP does not generate one by default, so Meson cannot find it + via pkg-config without this file. + """ + pkgconfig_dir = prefix / "lib" / "pkgconfig" + pkgconfig_dir.mkdir(parents=True, exist_ok=True) + pc_file = pkgconfig_dir / "gmp.pc" + pc_file.write_text( + f"prefix={prefix}\n" + "exec_prefix=${prefix}\n" + "libdir=${exec_prefix}/lib\n" + "includedir=${prefix}/include\n" + "\n" + "Name: gmp\n" + "Description: GNU Multiple Precision Arithmetic Library\n" + f"Version: {GMP_VERSION}\n" + "Libs: -L${libdir} -lgmp\n" + "Cflags: -I${includedir}\n" + ) + print(f"[smlp build] Wrote pkg-config file: {pc_file}") + + # Also write gmpxx.pc for the C++ wrapper library + pcxx_file = pkgconfig_dir / "gmpxx.pc" + pcxx_file.write_text( + f"prefix={prefix}\n" + "exec_prefix=${prefix}\n" + "libdir=${exec_prefix}/lib\n" + "includedir=${prefix}/include\n" + "\n" + "Name: gmpxx\n" + "Description: GNU Multiple Precision Arithmetic Library (C++ bindings)\n" + f"Version: {GMP_VERSION}\n" + "Requires: gmp\n" + "Libs: -L${libdir} -lgmpxx -lgmp\n" + "Cflags: -I${includedir}\n" + ) + print(f"[smlp build] Wrote pkg-config file: {pcxx_file}") + + +def _gmp_prefix() -> Path: + """ + Return the GMP install prefix, building from source if necessary. + + Search order: + 1. $GMP_ROOT env var → use as-is, no build + 2. GMP_CACHE_DIR marker → cache hit, skip build + 3. Download + compile into GMP_CACHE_DIR + """ + # ── Option A: caller supplied an existing prefix ────────────────────── + env_root = os.environ.get("GMP_ROOT") + if env_root: + prefix = Path(env_root).expanduser() + print(f"[smlp build] Using GMP_ROOT={prefix}") + return prefix + + # ── Option B: cached build already present ──────────────────────────── + tag_file = GMP_CACHE_DIR / ".built" + if tag_file.exists(): + print(f"[smlp build] GMP cache found at {GMP_CACHE_DIR}, skipping build.") + _write_gmp_pc(GMP_CACHE_DIR) + return GMP_CACHE_DIR + + # ── Option C: download + compile into user-space cache ──────────────── + tarball_name = f"gmp-{GMP_VERSION}.tar.xz" + url = f"https://gmplib.org/download/gmp/{tarball_name}" + + tmp_dir = GMP_CACHE_DIR.parent / "_gmp_build_tmp" + tmp_dir.mkdir(parents=True, exist_ok=True) + + tarball_path = tmp_dir / tarball_name + if not tarball_path.exists(): + print(f"[smlp build] Downloading GMP {GMP_VERSION} ...") + _download(url, tarball_path) + else: + print(f"[smlp build] Using cached tarball {tarball_path}") + + print(f"[smlp build] Extracting {tarball_name} ...") + with tarfile.open(tarball_path) as tf: + tf.extractall(tmp_dir) + + src = tmp_dir / f"gmp-{GMP_VERSION}" + + GMP_CACHE_DIR.mkdir(parents=True, exist_ok=True) + print(f"[smlp build] Compiling GMP → {GMP_CACHE_DIR} (this takes a minute) ...") + import platform as _platform + machine = _platform.machine() # e.g. x86_64, aarch64 + system = _platform.system().lower() # linux + host = f"{machine}-pc-{system}-gnu" + + _run( + ["./configure", + f"--prefix={GMP_CACHE_DIR}", + f"--host={host}", + "--enable-shared", + "--enable-static", + "--disable-assembly", + "--enable-cxx"], # avoids platform-specific asm issues + cwd=str(src), + ) + _run(["make", f"-j{os.cpu_count() or 1}"], cwd=str(src)) + _run(["make", "install"], cwd=str(src)) + + # Generate a gmp.pc pkg-config file — GMP doesn't ship one by default + _write_gmp_pc(GMP_CACHE_DIR) + + # Leave a marker so we skip the build on the next pip install + tag_file.touch() + + # Remove the source + tarball; keep only the install + shutil.rmtree(tmp_dir, ignore_errors=True) + + print(f"[smlp build] GMP built and cached at {GMP_CACHE_DIR}") + return GMP_CACHE_DIR + + + +# --------------------------------------------------------------------------- +# Step 1b – Z3 (via pip z3-solver, no sudo) +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# Step 1d – Z3 binary (downloaded from GitHub releases) +# --------------------------------------------------------------------------- + +def _z3_binary() -> Path: + """ + Return the path to the z3 executable. + + Search order: + 1. Z3_BIN_DIR env var / constant (~/.local/z3/bin/z3) + 2. ~/.local/bin/z3 (pip install --user z3-solver installs it here) + 3. System z3 on PATH (sudo apt install z3) + 4. Download pre-built from GitHub (no sudo fallback) + """ + from shutil import which + + # ── 1. Explicit Z3_BIN_DIR ──────────────────────────────────────────── + if Z3_BIN_DIR.exists() and (Z3_BIN_DIR / "z3").exists(): + print(f"[smlp build] Using z3 binary from Z3_BIN_DIR: {Z3_BIN_DIR / 'z3'}") + return Z3_BIN_DIR / "z3" + + # ── 2. ~/.local/bin/z3 ─────────────────────────────────────────────── + user_z3 = Path.home() / ".local" / "bin" / "z3" + if user_z3.exists(): + print(f"[smlp build] Using user z3 binary: {user_z3}") + return user_z3 + + # ── 3. PATH ─────────────────────────────────────────────────────────── + system_z3 = which("z3") + if system_z3: + print(f"[smlp build] Using system z3: {system_z3}") + return Path(system_z3) + + # ── 4. Download pre-built binary from GitHub releases ───────────────── + import platform as _platform + machine = _platform.machine() + arch_map = {"x86_64": "x64", "aarch64": "arm64"} + arch = arch_map.get(machine, machine) + z3_release = f"z3-{Z3_VERSION}-{arch}-glibc-2.31" + url = ( + f"https://github.com/Z3Prover/z3/releases/download/z3-{Z3_VERSION}/" + f"{z3_release}.zip" + ) + + tmp_dir = Z3_BIN_DIR.parent.parent / "_z3_build_tmp" + tmp_dir.mkdir(parents=True, exist_ok=True) + zip_path = tmp_dir / f"{z3_release}.zip" + + print(f"[smlp build] Downloading z3 binary {Z3_VERSION} ...") + _download(url, zip_path) + + import zipfile, shutil as _shutil + print(f"[smlp build] Extracting z3 binary ...") + with zipfile.ZipFile(zip_path) as zf: + zf.extractall(tmp_dir) + + Z3_BIN_DIR.mkdir(parents=True, exist_ok=True) + src_bin = tmp_dir / z3_release / "bin" / "z3" + _shutil.copy2(src_bin, Z3_BIN_DIR / "z3") + (Z3_BIN_DIR / "z3").chmod(0o755) + _shutil.rmtree(tmp_dir, ignore_errors=True) + + print(f"[smlp build] z3 binary installed at {Z3_BIN_DIR / 'z3'}") + return Z3_BIN_DIR / "z3" + + +def _write_z3_pc(z3_lib: Path) -> None: + """ + Write a z3.pc pkg-config file into /pkgconfig/. + z3-solver does not ship one, so Meson cannot find it via pkg-config + without this file. + """ + import re + # Detect z3 version from libz3.so filename e.g. libz3.so.4.8.12 + version = "4.8.12" # fallback + for f in z3_lib.glob("libz3.so.*"): + m = re.search(r"libz3\.so\.([\d.]+)", f.name) + if m: + version = m.group(1) + break + + prefix = z3_lib.parent # /z3 + inc_dir = prefix / "include" + + pkgconfig_dir = z3_lib / "pkgconfig" + pkgconfig_dir.mkdir(parents=True, exist_ok=True) + pc_file = pkgconfig_dir / "z3.pc" + pc_file.write_text( + f"prefix={prefix}\n" + f"libdir={z3_lib}\n" + f"includedir={inc_dir}\n" + "\n" + "Name: z3\n" + "Description: Z3 Theorem Prover\n" + f"Version: {version}\n" + "Libs: -L${libdir} -lz3\n" + "Cflags: -I${includedir}\n" + ) + print(f"[smlp build] Wrote pkg-config file: {pc_file}") + + +def _z3_prefix() -> Path: + """ + Return the z3-solver lib directory containing libz3.so. + + Search order: + 1. $Z3_PREFIX env var → use /lib + 2. Z3_DEFAULT_PREFIX constant → ~/.local/lib/python3.13/site-packages/z3/lib + (standard location for: pip install --user z3-solver) + """ + env_prefix = os.environ.get("Z3_PREFIX") + prefix = Path(env_prefix).expanduser() if env_prefix else Z3_DEFAULT_PREFIX + lib_dir = prefix / "lib" + + print(f"[smlp build] Looking for libz3.so in: {lib_dir}") + + found = list(lib_dir.rglob("libz3.so")) if lib_dir.exists() else [] + if found: + print(f"[smlp build] Using z3 lib dir: {lib_dir}") + _write_z3_pc(lib_dir) + return lib_dir + + sys.exit( + f"[smlp build] ERROR: libz3.so not found at {lib_dir}.\n" + "Install z3-solver with: python3.13 -m pip install --user z3-solver\n" + "Or set Z3_PREFIX to your z3 package directory, e.g.:\n" + " export Z3_PREFIX=~/.local/lib/python3.13/site-packages/z3" + ) + + +def _write_native_file(boost_prefix: Path, gmp_prefix: Path, z3_lib: Path, z3_bin: Path, build_tmp: Path, stub_dir: Path = None) -> Path: + """ + Write a Meson native file that points to the user-space Boost install. + This is the most reliable way to pass non-standard library paths to Meson — + more reliable than environment variables, which Meson may ignore depending + on version and platform. + """ + boost_lib = boost_prefix / "lib" + boost_inc = boost_prefix / "include" + gmp_lib = gmp_prefix / "lib" + gmp_inc = gmp_prefix / "include" + z3_pc_dir = z3_lib / "pkgconfig" + + native_file = build_tmp / "native.ini" + native_file.write_text( + "[properties]\n" + f"boost_root = '{boost_prefix}'\n" + f"boost_includedir = '{boost_inc}'\n" + f"boost_librarydir = '{boost_lib}'\n" + f"gmp_includedir = '{gmp_inc}'\n" + f"gmp_librarydir = '{gmp_lib}'\n" + f"gmpxx_includedir = '{gmp_inc}'\n" + f"gmpxx_librarydir = '{gmp_lib}'\n" + "\n" + "[binaries]\n" + f"python = '{sys.executable}'\n" + f"python3 = '{sys.executable}'\n" + f"pkg-config = 'pkg-config'\n" + f"z3 = '{z3_bin}'\n" + "\n" + "[built-in options]\n" + f"pkg_config_path = ['{gmp_lib / 'pkgconfig'}', '{boost_lib / 'pkgconfig'}', '{z3_pc_dir}']\n" + f"c_args = ['-I{gmp_inc}', '-I{boost_inc}']\n" + f"cpp_args = ['-I{gmp_inc}', '-I{boost_inc}']\n" + f"c_link_args = ['-L{gmp_lib}', '-L{boost_lib}', '-L{build_tmp}', '-Wl,-rpath,{gmp_lib}', '-Wl,-rpath,{boost_lib}']\n" + f"cpp_link_args = ['-L{gmp_lib}', '-L{boost_lib}', '-L{build_tmp}', '-Wl,-rpath,{gmp_lib}', '-Wl,-rpath,{boost_lib}', '-Wl,-rpath,{z3_lib}']\n" + ) + print(f"[smlp build] Wrote Meson native file: {native_file}") + return native_file + + +def _create_python_stub_lib(build_tmp: Path) -> None: + """ + Create a stub libpythonX.Y.so in build_tmp so the linker can satisfy + the -lpythonX.Y flag from Meson's embed:true Python dependency. + On manylinux, Python is statically linked so no real libpython exists, + but the extension works at runtime because the interpreter provides + all symbols via dlopen. + """ + import sysconfig + py_ver = f"{sys.version_info.major}.{sys.version_info.minor}" + stub_lib = build_tmp / f"libpython{py_ver}.so" + if stub_lib.exists(): + return + + # Create an empty shared library as a stub + stub_src = build_tmp / f"python_stub.c" + stub_src.write_text("// empty stub\n") + result = subprocess.run( + ["gcc", "-shared", "-fPIC", "-o", str(stub_lib), str(stub_src)], + capture_output=True, text=True + ) + if result.returncode != 0: + print(f"[smlp build] WARNING: failed to create Python stub lib: {result.stderr}") + return + + print(f"[smlp build] Created Python stub lib: {stub_lib}") + + # Add stub dir to LDFLAGS and library path so linker finds it + os.environ["LDFLAGS"] = f"-L{build_tmp} " + os.environ.get("LDFLAGS", "") + os.environ["LIBRARY_PATH"] = f"{build_tmp}:" + os.environ.get("LIBRARY_PATH", "") + + +def _meson_build(poly_dir: Path, kay_dir: Path, + boost_prefix: Path, build_tmp: Path) -> Path: + """ + Run meson setup + ninja install. + Returns the path to the installed smlp package directory. + """ + meson_build_dir = poly_dir / "build" + install_prefix = build_tmp.resolve() / "smlp_install" + + if meson_build_dir.exists(): + shutil.rmtree(meson_build_dir) + + z3_lib = _z3_prefix() + z3_bin = _z3_binary() + gmp_prefix = _gmp_prefix() + _create_python_stub_lib(build_tmp) + env = _boost_env(boost_prefix) + env = _add_z3_to_env(env, z3_lib) + env = _add_gmp_to_env(env, gmp_prefix) + + # Embed RPATH into the built .so so it finds user-space libs at runtime + # without needing LD_LIBRARY_PATH to be set. + rpath_dirs = [ + str(boost_prefix / "lib"), + str(gmp_prefix / "lib"), + str(z3_lib), + ] + rpath_flags = ":".join(f"-Wl,-rpath,{d}" for d in rpath_dirs) + existing_ldflags = env.get("LDFLAGS", "") + env["LDFLAGS"] = f"{rpath_flags} {existing_ldflags}".strip() + native_file = _write_native_file(boost_prefix, gmp_prefix, z3_lib, z3_bin, build_tmp) + + meson_flags = [ + "--wipe", + f"--native-file={native_file}", + f"-Dkay-prefix={kay_dir}", + "-Dz3=enabled", + "--prefix", str(install_prefix), + # Explicitly pass both source dir and build dir as absolute paths + # so Meson works correctly regardless of cwd + str(poly_dir), + str(poly_dir / "build"), + ] + + print(f"[smlp build] PKG_CONFIG_PATH = {env.get('PKG_CONFIG_PATH', '(not set)')}") + print(f"[smlp build] LD_LIBRARY_PATH = {env.get('LD_LIBRARY_PATH', '(not set)')}") + _run( + _meson_bin(build_tmp) + ["setup"] + meson_flags, + env=env, + ) + + _run([_ninja_bin(), "-C", str(poly_dir / "build"), "install"], + cwd=str(poly_dir), env=env) + + # Locate the installed smlp package (Meson may use a versioned python path) + candidates = (list(install_prefix.glob("lib/python*/dist-packages/smlp")) + + list(install_prefix.glob("lib/python3/dist-packages/smlp")) + + list(install_prefix.glob("lib/python*/site-packages/smlp")) + + list(install_prefix.glob("lib/python3/site-packages/smlp"))) + if not candidates: + sys.exit( + f"[smlp build] ERROR: could not find installed smlp package under " + f"{install_prefix}. Check the Meson/Ninja output above." + ) + return candidates[0] + + +# --------------------------------------------------------------------------- +# Custom build_ext +# --------------------------------------------------------------------------- + +class MesonBuildExt(_build_ext): + + def run(self): + build_tmp = Path(self.build_temp).resolve() + build_tmp.mkdir(parents=True, exist_ok=True) + + # 1. Boost (compiled from source, cached in ~/.local/boost_py313) + boost_prefix = _boost_prefix() + + # 2. kay + kay_dir = _ensure_kay(build_tmp) + + # 3. Meson build – run from within the repo + poly_dir = REPO_ROOT / "utils" / "poly" + if not poly_dir.is_dir(): + sys.exit( + f"[smlp build] ERROR: expected utils/poly/ at {poly_dir}.\n" + "Make sure setup.py is run from the root of the smlp repository." + ) + + # Optionally switch branch (useful in CI) + branch = os.environ.get("SMLP_BRANCH") + if branch: + _run(["git", "switch", branch], cwd=str(REPO_ROOT)) + else: + py_ver = f"{sys.version_info.major}{sys.version_info.minor}" + auto_branch = f"smlp_python{py_ver}" + result = subprocess.run( + ["git", "branch", "-r", "--list", f"origin/{auto_branch}"], + capture_output=True, text=True, cwd=str(REPO_ROOT) + ) + if result.stdout.strip(): + _run(["git", "switch", auto_branch], cwd=str(REPO_ROOT)) + + installed_pkg = _meson_build(poly_dir, kay_dir, boost_prefix, build_tmp) + + # 4. Copy into the wheel's lib tree + dest = Path(self.build_lib) / "smlp" + if dest.exists(): + shutil.rmtree(dest) + shutil.copytree(str(installed_pkg), str(dest)) + print(f"[smlp build] smlp extension copied to wheel at {dest}") + + # 5. Copy Python source from src/smlp_py into smlp/smlp_py inside the wheel + smlp_py_src = REPO_ROOT / "src" / "smlp_py" + if smlp_py_src.is_dir(): + smlp_py_dest = dest / "smlp_py" # dest is already smlp/ + if smlp_py_dest.exists(): + shutil.rmtree(smlp_py_dest) + shutil.copytree(str(smlp_py_src), str(smlp_py_dest)) + print(f"[smlp build] smlp_py source copied to wheel at {smlp_py_dest}") + else: + print(f"[smlp build] WARNING: src/smlp_py not found at {smlp_py_src}, skipping.") + + # 6. Copy src/run_smlp.py into smlp/ inside the wheel + run_smlp_src = REPO_ROOT / "src" / "run_smlp.py" + if run_smlp_src.is_file(): + shutil.copy2(str(run_smlp_src), str(dest / "run_smlp.py")) + print(f"[smlp build] run_smlp.py copied to wheel at {dest / 'run_smlp.py'}") + else: + print(f"[smlp build] WARNING: src/run_smlp.py not found at {run_smlp_src}, skipping.") + + +# --------------------------------------------------------------------------- +# setup() +# --------------------------------------------------------------------------- + +setup( + cmdclass={"build_ext": MesonBuildExt}, + # Dummy extension so setuptools produces a platform-specific wheel + # and actually invokes build_ext. + ext_modules=[ + __import__("setuptools").Extension(name="smlp._dummy", sources=[]), + ], +) diff --git a/package_build/python3.13/22.04/Dockerfile b/package_build/python3.13/22.04/Dockerfile new file mode 100644 index 00000000..fb15fa4f --- /dev/null +++ b/package_build/python3.13/22.04/Dockerfile @@ -0,0 +1,52 @@ +FROM ubuntu:22.04 + +# Avoid interactive prompts during apt install +ENV DEBIAN_FRONTEND=noninteractive + +# --------------------------------------------------------------------------- +# 1. System packages +# --------------------------------------------------------------------------- +RUN apt-get update && apt-get install -y \ + ca-certificates curl gnupg wget \ + gcc g++ git make m4 pkg-config && \ + curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg && \ + echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu jammy main" \ + > /etc/apt/sources.list.d/deadsnakes.list && \ + apt-get update && apt-get install -y \ + python3.13 python3.13-dev python3.13-venv python3.13-tk && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# --------------------------------------------------------------------------- +# 2. Install pip for Python 3.13 +# --------------------------------------------------------------------------- +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 + +# --------------------------------------------------------------------------- +# 3. Python build tools +# --------------------------------------------------------------------------- +RUN python3.13 -m pip install --user --force-reinstall --break-system-packages setuptools==80.10.1 && \ + python3.13 -m pip install --user meson ninja z3-solver==4.8.12 auditwheel patchelf + +# --------------------------------------------------------------------------- +# 4. Add ~/.local/bin to PATH +# --------------------------------------------------------------------------- +ENV PATH="/root/.local/bin:${PATH}" + +# --------------------------------------------------------------------------- +# 5. Clone smlp and optionaly switch to branch +# --------------------------------------------------------------------------- + +WORKDIR /app +ARG GIT_BRANCH=smlp_python313 +COPY run_git_clone . +RUN ./run_git_clone $GIT_BRANCH + +# --------------------------------------------------------------------------- +# 6. Build wheel +# --------------------------------------------------------------------------- +WORKDIR smlp +RUN python3.13 -m pip wheel . -w dist/ && \ + python3.13 repair_wheel.py dist/ + +# The manylinux wheel is in /app/smlp/dist/ diff --git a/package_build/python3.13/22.04/README.md b/package_build/python3.13/22.04/README.md new file mode 100644 index 00000000..d5039fca --- /dev/null +++ b/package_build/python3.13/22.04/README.md @@ -0,0 +1,212 @@ +# SMLP Installation Guide + +Complete installation steps for a clean **Ubuntu 22.04** image. + +--- + +## 1. System packages + +These require `sudo` and only need to be installed once. + +```bash +apt update +apt install -y ca-certificates curl gnupg wget + +curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg + +echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu jammy main" \ + > /etc/apt/sources.list.d/deadsnakes.list + +apt update +apt install -y \ + gcc g++ git make m4 pkg-config xvfb \ + python3.11 python3.11-dev python3.11-venv python3.11-tk +``` + +--- + +## 2. Install pip for Python 3.11 + +```bash +curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11 +``` + +--- + +## 3. Python user packages + +These are installed into `~/.local` and do not require `sudo`. + +```bash +python3.11 -m pip install --user --force-reinstall --break-system-packages setuptools +python3.11 -m pip install --user meson ninja z3-solver==4.8.12 +``` + +--- + +## 4. Add `~/.local/bin` to PATH + +Required so that `meson`, `ninja`, and `z3` are found during the build. + +```bash +export PATH=$HOME/.local/bin:$PATH +``` + +Add it permanently to your shell profile: + +```bash +echo 'export PATH=$HOME/.local/bin:$PATH' >> ~/.bashrc +source ~/.bashrc +``` + +--- + +## 5. Clone the smlp repository + +```bash +git clone https://github.com/SMLP-Systems/smlp.git +cd smlp +git switch smlp_python311 +``` + +--- + +## 6. Install smlp + +The `setup.py` automatically: +- Downloads and compiles **Boost 1.83** (cached in `~/.local/boost_py311`, ~5 min first time) +- Downloads and compiles **GMP 6.3.0** (cached in `~/.local/gmp`, ~1 min first time) +- Clones the **kay** C++ dependency +- Runs `meson` + `ninja` to build the native extension + +```bash +python3.11 -m pip install . +``` + +Subsequent installs reuse the Boost and GMP caches and are much faster. + +--- + +## 7. Build a wheel (optional) + +To save a redistributable `.whl` file instead of installing directly: + +```bash +python3.11 -m pip wheel . -w dist/ +``` + +The wheel is saved in `dist/smlp-*.whl` and can be installed without recompiling: + +```bash +python3.11 -m pip install dist/smlp-*.whl +``` + +> **Portability note:** The plain wheel (`linux_x86_64`) embeds RPATH entries pointing to +> `~/.local/boost_py311/lib`, `~/.local/gmp/lib`, and `~/.local/lib/python3.11/site-packages/z3/lib`. +> It will only work on machines with the same library paths. + +### Manylinux wheel (fully portable) + +A manylinux wheel bundles all `.so` dependencies inside it and works on any Linux x86_64 +with a compatible glibc. First install the required tools: + +```bash +python3.11 -m pip install --user auditwheel patchelf +``` + +Then build the plain wheel and repair it: + +```bash +python3.11 -m pip wheel . -w dist/ +python3.11 repair_wheel.py +``` + +The repaired `manylinux_*_x86_64` wheel will be saved alongside the original in `dist/`. +Distribute the manylinux one. + +--- + +## 8. Headless display (xvfb) + +Required when running SMLP without a `$DISPLAY` (e.g. in Docker or CI). + +```bash +Xvfb :99 -screen 0 1024x768x24 & +export DISPLAY=:99 +``` + +Add to your shell profile to make it permanent: + +```bash +echo 'export DISPLAY=:99' >> ~/.bashrc +``` + +--- + +## 9. Verify + +```bash +python3.11 -c "import smlp; print('smlp imported OK')" +``` + +--- + +## Environment variables + +All are optional. Set before running `pip install .` to override defaults. + +| Variable | Default | Description | +|---|---|---| +| `BOOST_ROOT` | *(build from source)* | Reuse an existing Boost prefix, skips download + compile | +| `BOOST_CACHE_DIR` | `~/.local/boost_py311` | Where to cache the compiled Boost | +| `BOOST_VERSION` | `1.83.0` | Boost version to download | +| `GMP_ROOT` | *(build from source)* | Reuse an existing GMP prefix, skips download + compile | +| `GMP_CACHE_DIR` | `~/.local/gmp` | Where to cache the compiled GMP | +| `GMP_VERSION` | `6.3.0` | GMP version to download | +| `Z3_PREFIX` | `~/.local/lib/python3.11/site-packages/z3` | Reuse an existing Z3 install | +| `Z3_BIN_DIR` | `~/.local/z3/bin` | Directory containing the `z3` binary | +| `Z3_VERSION` | `4.8.12` | Z3 version to download if binary not found | +| `KAY_DIR` | *(cloned into build temp)* | Reuse an existing kay checkout | +| `SMLP_BRANCH` | *(auto-detected)* | Git branch to use in the smlp repo | + +--- + +## Reinstalling + +To reinstall after code changes: + +```bash +python3.11 -m pip uninstall smlp -y +python3.11 -m pip install . +``` + +To force a full rebuild including Boost and GMP: + +```bash +rm -rf ~/.local/boost_py311 ~/.local/gmp +python3.11 -m pip install . +``` + +--- + +## Troubleshooting + +**`No module named 'z3'` during build** + +Install z3-solver before running pip install: +```bash +python3.11 -m pip install --user z3-solver==4.8.12 +``` + +**`meson: command not found` during build** + +Make sure `~/.local/bin` is on `PATH` as described in step 3. + +**`sys/pstat.h: No such file or directory` during GMP build** + +Delete the GMP cache and rebuild — the `--disable-assembly` flag handles this: +```bash +rm -rf ~/.local/gmp ~/.local/_gmp_build_tmp +python3.11 -m pip install . +``` diff --git a/package_build/python3.13/22.04/run_docker_build b/package_build/python3.13/22.04/run_docker_build new file mode 100755 index 00000000..86ed5f3d --- /dev/null +++ b/package_build/python3.13/22.04/run_docker_build @@ -0,0 +1,20 @@ +#!/usr/bin/tcsh -f +set image=smlp-build-22.04-python313 +set wheel=smlp-0.1.0-cp313-cp313-manylinux_2_34_x86_64.whl +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t smlp-build-22.04-python313 . +if($status) then + echo "\nERROR: Docker build faied\n" + exit 1 +else + set container=dummy + docker create --name $container $image + docker cp ${container}:/app/smlp/dist/$wheel . + docker rm $container +endif +if(-s $wheel ) then + echo "\nWheel: `realpath $wheel`\n" + exit 0 +else + echo "\nERROR: failed to copy $wheel to host\n" + exit 1 +endif diff --git a/package_build/python3.13/22.04/run_docker_build_incremental b/package_build/python3.13/22.04/run_docker_build_incremental new file mode 100755 index 00000000..1f3d8d96 --- /dev/null +++ b/package_build/python3.13/22.04/run_docker_build_incremental @@ -0,0 +1,32 @@ +#!/usr/bin/tcsh -f +set build_args="" +set image=smlp-build-22.04-python313 +set wheel=smlp-0.1.0-cp313-cp313-manylinux_2_34_x86_64.whl +\rm -f $wheel >& /dev/null +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`date +%s`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t $image ." +echo $cmd +$cmd +if($status) then + echo "\nERROR: Docker build faied\n" + exit 1 +else + set container=dummy + docker create --name $container $image + docker cp ${container}:/app/smlp/dist/$wheel . + docker rm $container +endif +if(-s $wheel ) then + echo "\nWheel: `realpath $wheel`\n" + exit 0 +else + echo "\nERROR: failed to copy $wheel to host\n" + exit 1 +endif diff --git a/package_build/python3.13/22.04/run_git_clone b/package_build/python3.13/22.04/run_git_clone new file mode 100755 index 00000000..ec1a6b78 --- /dev/null +++ b/package_build/python3.13/22.04/run_git_clone @@ -0,0 +1,7 @@ +#!/usr/bin/bash +git clone https://github.com/SMLP-Systems/smlp +GIT_BRANCH=$1 +cd smlp +if [ $(git branch -r --list origin/$GIT_BRANCH) ]; then + git switch $GIT_BRANCH +fi diff --git a/package_build/python3.13/24.04/Dockerfile b/package_build/python3.13/24.04/Dockerfile new file mode 100644 index 00000000..f455b2f4 --- /dev/null +++ b/package_build/python3.13/24.04/Dockerfile @@ -0,0 +1,46 @@ +FROM ubuntu:24.04 + +# Avoid interactive prompts during apt install +ENV DEBIAN_FRONTEND=noninteractive + +# --------------------------------------------------------------------------- +# 1. System packages +# --------------------------------------------------------------------------- +RUN apt-get update && apt-get install -y \ + ca-certificates curl gnupg wget \ + gcc g++ git make m4 pkg-config && \ + curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg && \ + echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu noble main" \ + > /etc/apt/sources.list.d/deadsnakes.list && \ + apt-get update && apt-get install -y \ + python3.13 python3.13-dev python3.13-venv python3.13-tk && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# --------------------------------------------------------------------------- +# 2. Install pip for Python 3.13 +# --------------------------------------------------------------------------- +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 + +# --------------------------------------------------------------------------- +# 3. Python build tools +# --------------------------------------------------------------------------- +RUN python3.13 -m pip install --user --force-reinstall --break-system-packages setuptools && \ + python3.13 -m pip install --user meson ninja z3-solver==4.8.12 auditwheel patchelf + +# --------------------------------------------------------------------------- +# 4. Add ~/.local/bin to PATH +# --------------------------------------------------------------------------- +ENV PATH="/root/.local/bin:${PATH}" + +# --------------------------------------------------------------------------- +# 5. Clone smlp and build wheel +# --------------------------------------------------------------------------- +WORKDIR /app +RUN git clone https://github.com/SMLP-Systems/smlp.git && \ + cd smlp && \ + git switch smlp_python313 && \ + python3.13 -m pip wheel . -w dist/ && \ + python3.13 repair_wheel.py dist/ + +# The manylinux wheel is in /app/smlp/dist/ diff --git a/package_build/python3.13/24.04/README.md b/package_build/python3.13/24.04/README.md new file mode 100644 index 00000000..9ea592b5 --- /dev/null +++ b/package_build/python3.13/24.04/README.md @@ -0,0 +1,212 @@ +# SMLP Installation Guide + +Complete installation steps for a clean **Ubuntu 24.04** image. + +--- + +## 1. System packages + +These require `sudo` and only need to be installed once. + +```bash +apt update +apt install -y ca-certificates curl gnupg wget + +curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg + +echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu noble main" \ + > /etc/apt/sources.list.d/deadsnakes.list + +apt update +apt install -y \ + gcc g++ git make m4 pkg-config xvfb \ + python3.13 python3.13-dev python3.13-venv python3.13-tk +``` + +--- + +## 2. Install pip for Python 3.13 + +```bash +curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 +``` + +--- + +## 3. Python user packages + +These are installed into `~/.local` and do not require `sudo`. + +```bash +python3.13 -m pip install --user --force-reinstall --break-system-packages setuptools +python3.13 -m pip install --user meson ninja z3-solver==4.8.12 +``` + +--- + +## 4. Add `~/.local/bin` to PATH + +Required so that `meson`, `ninja`, and `z3` are found during the build. + +```bash +export PATH=$HOME/.local/bin:$PATH +``` + +Add it permanently to your shell profile: + +```bash +echo 'export PATH=$HOME/.local/bin:$PATH' >> ~/.bashrc +source ~/.bashrc +``` + +--- + +## 5. Clone the smlp repository + +```bash +git clone https://github.com/SMLP-Systems/smlp.git +cd smlp +git switch smlp_python313 +``` + +--- + +## 6. Install smlp + +The `setup.py` automatically: +- Downloads and compiles **Boost 1.83** (cached in `~/.local/boost_py313`, ~5 min first time) +- Downloads and compiles **GMP 6.3.0** (cached in `~/.local/gmp`, ~1 min first time) +- Clones the **kay** C++ dependency +- Runs `meson` + `ninja` to build the native extension + +```bash +python3.13 -m pip install . +``` + +Subsequent installs reuse the Boost and GMP caches and are much faster. + +--- + +## 7. Build a wheel (optional) + +To save a redistributable `.whl` file instead of installing directly: + +```bash +python3.13 -m pip wheel . -w dist/ +``` + +The wheel is saved in `dist/smlp-*.whl` and can be installed without recompiling: + +```bash +python3.13 -m pip install dist/smlp-*.whl +``` + +> **Portability note:** The plain wheel (`linux_x86_64`) embeds RPATH entries pointing to +> `~/.local/boost_py313/lib`, `~/.local/gmp/lib`, and `~/.local/lib/python3.13/site-packages/z3/lib`. +> It will only work on machines with the same library paths. + +### Manylinux wheel (fully portable) + +A manylinux wheel bundles all `.so` dependencies inside it and works on any Linux x86_64 +with a compatible glibc. First install the required tools: + +```bash +python3.13 -m pip install --user auditwheel patchelf +``` + +Then build the plain wheel and repair it: + +```bash +python3.13 -m pip wheel . -w dist/ +python3.13 repair_wheel.py +``` + +The repaired `manylinux_*_x86_64` wheel will be saved alongside the original in `dist/`. +Distribute the manylinux one. + +--- + +## 8. Headless display (xvfb) + +Required when running SMLP without a `$DISPLAY` (e.g. in Docker or CI). + +```bash +Xvfb :99 -screen 0 1024x768x24 & +export DISPLAY=:99 +``` + +Add to your shell profile to make it permanent: + +```bash +echo 'export DISPLAY=:99' >> ~/.bashrc +``` + +--- + +## 9. Verify + +```bash +python3.13 -c "import smlp; print('smlp imported OK')" +``` + +--- + +## Environment variables + +All are optional. Set before running `pip install .` to override defaults. + +| Variable | Default | Description | +|---|---|---| +| `BOOST_ROOT` | *(build from source)* | Reuse an existing Boost prefix, skips download + compile | +| `BOOST_CACHE_DIR` | `~/.local/boost_py313` | Where to cache the compiled Boost | +| `BOOST_VERSION` | `1.83.0` | Boost version to download | +| `GMP_ROOT` | *(build from source)* | Reuse an existing GMP prefix, skips download + compile | +| `GMP_CACHE_DIR` | `~/.local/gmp` | Where to cache the compiled GMP | +| `GMP_VERSION` | `6.3.0` | GMP version to download | +| `Z3_PREFIX` | `~/.local/lib/python3.13/site-packages/z3` | Reuse an existing Z3 install | +| `Z3_BIN_DIR` | `~/.local/z3/bin` | Directory containing the `z3` binary | +| `Z3_VERSION` | `4.8.12` | Z3 version to download if binary not found | +| `KAY_DIR` | *(cloned into build temp)* | Reuse an existing kay checkout | +| `SMLP_BRANCH` | *(auto-detected)* | Git branch to use in the smlp repo | + +--- + +## Reinstalling + +To reinstall after code changes: + +```bash +python3.13 -m pip uninstall smlp -y +python3.13 -m pip install . +``` + +To force a full rebuild including Boost and GMP: + +```bash +rm -rf ~/.local/boost_py313 ~/.local/gmp +python3.13 -m pip install . +``` + +--- + +## Troubleshooting + +**`No module named 'z3'` during build** + +Install z3-solver before running pip install: +```bash +python3.13 -m pip install --user z3-solver==4.8.12 +``` + +**`meson: command not found` during build** + +Make sure `~/.local/bin` is on `PATH` as described in step 3. + +**`sys/pstat.h: No such file or directory` during GMP build** + +Delete the GMP cache and rebuild — the `--disable-assembly` flag handles this: +```bash +rm -rf ~/.local/gmp ~/.local/_gmp_build_tmp +python3.13 -m pip install . +``` diff --git a/package_build/python3.13/24.04/run_docker_build b/package_build/python3.13/24.04/run_docker_build new file mode 100755 index 00000000..c6a5f83e --- /dev/null +++ b/package_build/python3.13/24.04/run_docker_build @@ -0,0 +1,20 @@ +#!/usr/bin/tcsh -f +set image=smlp-build-24.04-python313 +set wheel=smlp-0.1.0-cp313-cp313-manylinux_2_39_x86_64.whl +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t smlp-build-24.04-python313 . +if($status) then + echo "\nERROR: Docker build faied\n" + exit 1 +else + set container=dummy + docker create --name $container $image + docker cp ${container}:/app/smlp/dist/$wheel . + docker rm $container +endif +if(-s $wheel ) then + echo "\nWheel: `realpath $wheel`\n" + exit 0 +else + echo "\nERROR: failed to copy $wheel to host\n" + exit 1 +endif diff --git a/package_build/python3.13/24.04/run_docker_build_incremental b/package_build/python3.13/24.04/run_docker_build_incremental new file mode 100755 index 00000000..e61935eb --- /dev/null +++ b/package_build/python3.13/24.04/run_docker_build_incremental @@ -0,0 +1,32 @@ +#!/usr/bin/tcsh -f +set build_args="" +set image=smlp-build-24.04-python313 +set wheel=smlp-0.1.0-cp313-cp313-manylinux_2_39_x86_64.whl +\rm -f $wheel >& /dev/null +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`date +%s`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t $image ." +echo $cmd +$cmd +if($status) then + echo "\nERROR: Docker build faied\n" + exit 1 +else + set container=dummy + docker create --name $container $image + docker cp ${container}:/app/smlp/dist/$wheel . + docker rm $container +endif +if(-s $wheel ) then + echo "\nWheel: `realpath $wheel`\n" + exit 0 +else + echo "\nERROR: failed to copy $wheel to host\n" + exit 1 +endif diff --git a/package_build/python3.13/manylinux_2_28/Dockerfile b/package_build/python3.13/manylinux_2_28/Dockerfile new file mode 100644 index 00000000..19a726d2 --- /dev/null +++ b/package_build/python3.13/manylinux_2_28/Dockerfile @@ -0,0 +1,53 @@ +FROM quay.io/pypa/manylinux_2_28_x86_64 + +# --------------------------------------------------------------------------- +# 1. System packages +# --------------------------------------------------------------------------- +RUN dnf install -y \ + wget git make m4 pkg-config && \ + dnf clean all + +# --------------------------------------------------------------------------- +# 2. Set Python 3.13 from pre-installed pypa versions +# --------------------------------------------------------------------------- +ENV PATH="/opt/python/cp313-cp313/bin:${PATH}" +ENV PYTHON=/opt/python/cp313-cp313/bin/python3.13 +ENV PIP=/opt/python/cp313-cp313/bin/pip + +# --------------------------------------------------------------------------- +# 3. Python build tools +# --------------------------------------------------------------------------- +RUN /opt/python/cp313-cp313/bin/pip install --upgrade pip && \ + /opt/python/cp313-cp313/bin/pip install setuptools==80.10.1 && \ + /opt/python/cp313-cp313/bin/pip install meson ninja z3-solver==4.8.12 auditwheel patchelf && \ + ln -sf /opt/python/cp313-cp313/bin/python3.13 /usr/local/bin/python3.13 && \ + ln -sf /opt/python/cp313-cp313/bin/pip /usr/local/bin/pip3.13 && \ + ln -sf /opt/python/cp313-cp313/bin/meson /usr/local/bin/meson && \ + ln -sf /opt/python/cp313-cp313/bin/ninja /usr/local/bin/ninja + +# --------------------------------------------------------------------------- +# 4. Add ~/.local/bin to PATH +# --------------------------------------------------------------------------- +ENV PATH="/root/.local/bin:${PATH}" + +# --------------------------------------------------------------------------- +# 5. Clone smlp and optionaly switch to branch +# --------------------------------------------------------------------------- +# Point setup.py to the correct z3 location for this Python installation +ENV Z3_PREFIX=/opt/python/cp313-cp313/lib/python3.13/site-packages/z3 + +WORKDIR /app +ARG GIT_BRANCH=smlp_python313 +COPY run_git_clone . +RUN ./run_git_clone $GIT_BRANCH + +# --------------------------------------------------------------------------- +# 6. Build wheel +# --------------------------------------------------------------------------- +WORKDIR smlp +RUN \cp -f manylinux_2_28/setup.py . && \ + /opt/python/cp313-cp313/bin/python3.13 -m pip wheel . -w dist/ && \ + /opt/python/cp313-cp313/bin/python3.13 manylinux_2_28/repair_wheel.py dist/ --plat manylinux_2_28_x86_64 + +CMD ["/usr/bin/bash"] +# The manylinux wheel is in /app/smlp/dist/ diff --git a/package_build/python3.13/manylinux_2_28/README.md b/package_build/python3.13/manylinux_2_28/README.md new file mode 100644 index 00000000..576a946b --- /dev/null +++ b/package_build/python3.13/manylinux_2_28/README.md @@ -0,0 +1,212 @@ +# SMLP Installation Guide + +Complete installation steps for a clean **Ubuntu 20.04** image. + +--- + +## 1. System packages + +These require `sudo` and only need to be installed once. + +```bash +apt update +apt install -y ca-certificates curl gnupg wget + +curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg + +echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu focal main" \ + > /etc/apt/sources.list.d/deadsnakes.list + +apt update +apt install -y \ + gcc g++ git make m4 pkg-config xvfb \ + python3.13 python3.13-dev python3.13-venv python3.13-tk +``` + +--- + +## 2. Install pip for Python 3.13 + +```bash +curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 +``` + +--- + +## 3. Python user packages + +These are installed into `~/.local` and do not require `sudo`. + +```bash +python3.13 -m pip install --user --force-reinstall --break-system-packages setuptools +python3.13 -m pip install --user meson ninja z3-solver==4.8.12 +``` + +--- + +## 4. Add `~/.local/bin` to PATH + +Required so that `meson`, `ninja`, and `z3` are found during the build. + +```bash +export PATH=$HOME/.local/bin:$PATH +``` + +Add it permanently to your shell profile: + +```bash +echo 'export PATH=$HOME/.local/bin:$PATH' >> ~/.bashrc +source ~/.bashrc +``` + +--- + +## 5. Clone the smlp repository + +```bash +git clone https://github.com/SMLP-Systems/smlp.git +cd smlp +git switch smlp_python313 +``` + +--- + +## 6. Install smlp + +The `setup.py` automatically: +- Downloads and compiles **Boost 1.83** (cached in `~/.local/boost_py313`, ~5 min first time) +- Downloads and compiles **GMP 6.3.0** (cached in `~/.local/gmp`, ~1 min first time) +- Clones the **kay** C++ dependency +- Runs `meson` + `ninja` to build the native extension + +```bash +python3.13 -m pip install . +``` + +Subsequent installs reuse the Boost and GMP caches and are much faster. + +--- + +## 7. Build a wheel (optional) + +To save a redistributable `.whl` file instead of installing directly: + +```bash +python3.13 -m pip wheel . -w dist/ +``` + +The wheel is saved in `dist/smlp-*.whl` and can be installed without recompiling: + +```bash +python3.13 -m pip install dist/smlp-*.whl +``` + +> **Portability note:** The plain wheel (`linux_x86_64`) embeds RPATH entries pointing to +> `~/.local/boost_py313/lib`, `~/.local/gmp/lib`, and `~/.local/lib/python3.13/site-packages/z3/lib`. +> It will only work on machines with the same library paths. + +### Manylinux wheel (fully portable) + +A manylinux wheel bundles all `.so` dependencies inside it and works on any Linux x86_64 +with a compatible glibc. First install the required tools: + +```bash +python3.13 -m pip install --user auditwheel patchelf +``` + +Then build the plain wheel and repair it: + +```bash +python3.13 -m pip wheel . -w dist/ +python3.13 repair_wheel.py +``` + +The repaired `manylinux_*_x86_64` wheel will be saved alongside the original in `dist/`. +Distribute the manylinux one. + +--- + +## 8. Headless display (xvfb) + +Required when running SMLP without a `$DISPLAY` (e.g. in Docker or CI). + +```bash +Xvfb :99 -screen 0 1024x768x24 & +export DISPLAY=:99 +``` + +Add to your shell profile to make it permanent: + +```bash +echo 'export DISPLAY=:99' >> ~/.bashrc +``` + +--- + +## 9. Verify + +```bash +python3.13 -c "import smlp; print('smlp imported OK')" +``` + +--- + +## Environment variables + +All are optional. Set before running `pip install .` to override defaults. + +| Variable | Default | Description | +|---|---|---| +| `BOOST_ROOT` | *(build from source)* | Reuse an existing Boost prefix, skips download + compile | +| `BOOST_CACHE_DIR` | `~/.local/boost_py313` | Where to cache the compiled Boost | +| `BOOST_VERSION` | `1.83.0` | Boost version to download | +| `GMP_ROOT` | *(build from source)* | Reuse an existing GMP prefix, skips download + compile | +| `GMP_CACHE_DIR` | `~/.local/gmp` | Where to cache the compiled GMP | +| `GMP_VERSION` | `6.3.0` | GMP version to download | +| `Z3_PREFIX` | `~/.local/lib/python3.13/site-packages/z3` | Reuse an existing Z3 install | +| `Z3_BIN_DIR` | `~/.local/z3/bin` | Directory containing the `z3` binary | +| `Z3_VERSION` | `4.8.12` | Z3 version to download if binary not found | +| `KAY_DIR` | *(cloned into build temp)* | Reuse an existing kay checkout | +| `SMLP_BRANCH` | *(auto-detected)* | Git branch to use in the smlp repo | + +--- + +## Reinstalling + +To reinstall after code changes: + +```bash +python3.13 -m pip uninstall smlp -y +python3.13 -m pip install . +``` + +To force a full rebuild including Boost and GMP: + +```bash +rm -rf ~/.local/boost_py313 ~/.local/gmp +python3.13 -m pip install . +``` + +--- + +## Troubleshooting + +**`No module named 'z3'` during build** + +Install z3-solver before running pip install: +```bash +python3.13 -m pip install --user z3-solver==4.8.12 +``` + +**`meson: command not found` during build** + +Make sure `~/.local/bin` is on `PATH` as described in step 3. + +**`sys/pstat.h: No such file or directory` during GMP build** + +Delete the GMP cache and rebuild — the `--disable-assembly` flag handles this: +```bash +rm -rf ~/.local/gmp ~/.local/_gmp_build_tmp +python3.13 -m pip install . +``` diff --git a/package_build/python3.13/manylinux_2_28/run_docker_build b/package_build/python3.13/manylinux_2_28/run_docker_build new file mode 100755 index 00000000..5021ea6d --- /dev/null +++ b/package_build/python3.13/manylinux_2_28/run_docker_build @@ -0,0 +1,25 @@ +#!/usr/bin/tcsh -f +set build_args="" +set image=smlp-manylinux_2_28-python313 +set wheel=smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl +set clone_script=run_git_clone +\rm -f $wheel >& /dev/null +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../22.04/$clone_script . +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t $image . +if($status) then + echo "\nERROR: Docker build faied\n" + exit 1 +else + set container=dummy + docker create --name $container $image + docker cp ${container}:/app/smlp/dist/$wheel . + docker rm $container +endif +if(-s $wheel ) then + echo "\nWheel: `realpath $wheel`\n" + exit 0 +else + echo "\nERROR: failed to copy $wheel to host\n" + exit 1 +endif diff --git a/package_build/python3.13/manylinux_2_28/run_docker_build_incremental b/package_build/python3.13/manylinux_2_28/run_docker_build_incremental new file mode 100755 index 00000000..0e8e0f9f --- /dev/null +++ b/package_build/python3.13/manylinux_2_28/run_docker_build_incremental @@ -0,0 +1,35 @@ +#!/usr/bin/tcsh -f +set build_args="" +set image=smlp-manylinux_2_28-python313 +set wheel=smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl +set clone_script=run_git_clone +\rm -f $wheel >& /dev/null +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../22.04/$clone_script . +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`date +%s`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t $image ." +echo $cmd +$cmd +if($status) then + echo "\nERROR: Docker build faied\n" + exit 1 +else + set container=dummy + docker create --name $container $image + docker cp ${container}:/app/smlp/dist/$wheel . + docker rm $container +endif +if(-s $wheel ) then + echo "\nWheel: `realpath $wheel`\n" + exit 0 +else + echo "\nERROR: failed to copy $wheel to host\n" + exit 1 +endif diff --git a/package_test/python3.13/22.04/Dockerfile b/package_test/python3.13/22.04/Dockerfile new file mode 100644 index 00000000..7fe3afee --- /dev/null +++ b/package_test/python3.13/22.04/Dockerfile @@ -0,0 +1,91 @@ +FROM ubuntu:22.04 + +# Avoid interactive prompts during apt install +ENV DEBIAN_FRONTEND=noninteractive + +# --------------------------------------------------------------------------- +# 1. System packages +# --------------------------------------------------------------------------- +RUN apt-get update && apt-get install -y \ + ca-certificates \ + curl \ + git \ + gnupg \ + jq \ + libgomp1 \ + locales \ + tzdata wget \ + xvfb && \ + curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg && \ + echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu jammy main" \ + > /etc/apt/sources.list.d/deadsnakes.list && \ + apt-get update && apt-get install -y \ + python3.13 python3.13-dev python3.13-venv python3.13-tk && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + + +# --------------------------------------------------------------------------- +# 2. mathsat +# --------------------------------------------------------------------------- +WORKDIR /app +COPY run_mathsat_build.sh . +RUN ./run_mathsat_build.sh && rm -rf /tmp/mathsat* + +# --------------------------------------------------------------------------- +# 3. Install pip and setuptools for Python 3.13 +# --------------------------------------------------------------------------- +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 && \ + python3.13 -m pip install --user --force-reinstall --break-system-packages setuptools + +# --------------------------------------------------------------------------- +# 4. Clone smlp (ARG busts the cache to always get latest) +# --------------------------------------------------------------------------- +ARG CACHE_BUST_SMLP +COPY run_git_clone /app +RUN cd /app && ./run_git_clone smlp_python313 + +# --------------------------------------------------------------------------- +# 5. Install smlp from wheel +# --------------------------------------------------------------------------- +RUN python3.13 -m pip install /app/smlp/dist/smlp-0.1.0-cp313-cp313-manylinux_2_34_x86_64.whl + +# --------------------------------------------------------------------------- +# 6. Install pycaret from archive +# --------------------------------------------------------------------------- +RUN pip install /app/smlp/docker/python3.13/pycaret_313.tar.gz + +# --------------------------------------------------------------------------- +# 7. Install UTF-8 fonts +# --------------------------------------------------------------------------- +RUN locale-gen en_US.UTF-8 +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 + +# --------------------------------------------------------------------------- +# 8. Docker GUI patch +# --------------------------------------------------------------------------- +COPY tkagg_patch.sh . +RUN ./tkagg_patch.sh + +# --------------------------------------------------------------------------- +# 9. SMLP regression patch +# --------------------------------------------------------------------------- +COPY smlp_regr_patch.sh . +RUN ./smlp_regr_patch.sh + +# --------------------------------------------------------------------------- +# 10. Define python version +# --------------------------------------------------------------------------- +RUN ln -sf /usr/bin/python3.13 /usr/bin/python +RUN ln -sf /usr/bin/python3.13 /usr/bin/python3 + +# --------------------------------------------------------------------------- +# 11. Copy and run DORA +# --------------------------------------------------------------------------- +COPY run_dora /app +RUN ./run_dora + +# Default command +CMD ["/usr/bin/bash"] diff --git a/package_test/python3.13/22.04/run_docker_build b/package_test/python3.13/22.04/run_docker_build new file mode 100755 index 00000000..aac25ec3 --- /dev/null +++ b/package_test/python3.13/22.04/run_docker_build @@ -0,0 +1,5 @@ +#!/usr/bin/tcsh -f +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t smlp-test-build-22.04-python313 . diff --git a/package_test/python3.13/22.04/run_docker_build_incremental b/package_test/python3.13/22.04/run_docker_build_incremental new file mode 100755 index 00000000..90864cbe --- /dev/null +++ b/package_test/python3.13/22.04/run_docker_build_incremental @@ -0,0 +1,16 @@ +#!/usr/bin/tcsh -f +set build_args="" +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`git ls-remote https://github.com/SMLP-Systems/smlp.git refs/heads/smlp_python313 | cut -f1`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t smlp-test-build-22.04-python313 ." +echo $cmd +$cmd diff --git a/package_test/python3.13/22.04/run_dora b/package_test/python3.13/22.04/run_dora new file mode 100755 index 00000000..75174dc0 --- /dev/null +++ b/package_test/python3.13/22.04/run_dora @@ -0,0 +1,37 @@ +#!/usr/bin/bash +script_name=$(realpath $0 | xargs basename) +log=$PWD/${script_name}.log +pref=Test83 +\rm -f ${pref}* > /dev/null 2>&1 +\rm -f $log logs.log > /dev/null 2>&1 +if [[ $# -gt 0 ]]; then + if [[ "$1" == "-clean" ]]; then + exit 0 + fi +fi +xvfb-run /usr/local/lib/python3.13/dist-packages/smlp/run_smlp.py \ + -data smlp/regr_smlp/data/smlp_toy_num_resp_mult.csv \ + -out_dir ./ \ + -pref Test83 \ + -mode optimize \ + -pareto t \ + -opt_strategy lazy \ + -resp y1,y2 \ + -feat x,p1,p2 \ + -model dt_sklearn \ + -dt_sklearn_max_depth 15 \ + -tree_encoding nested \ + -compress_rules f \ + -spec smlp/regr_smlp/specs/smlp_toy_num_resp_mult_free_inps.spec \ + -data_scaler min_max -beta "y1>7 and y2>6" -objv_names obj1,objv2,objv3 -objv_exprs "(y1+y2)/2;y1/2-y2;y2" \ + -epsilon 0.05 \ + -delta_rel 0.01 \ + -save_model_config f \ + -mrmr_pred 0 \ + -plots f \ + -pred_plots f \ + -resp_plots f \ + -seed 10 \ + -log_time f |& tee $log +cmd="diff ${pref}_smlp_toy_num_resp_mult.txt smlp/regr_smlp/master" +echo $cmd && $cmd diff --git a/package_test/python3.13/22.04/run_mathsat_build.sh b/package_test/python3.13/22.04/run_mathsat_build.sh new file mode 100755 index 00000000..1c20bcc3 --- /dev/null +++ b/package_test/python3.13/22.04/run_mathsat_build.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -e + +MATHSAT="mathsat-5.6.8-linux-x86_64-reentrant" +MATHSAT_BIN_DIR="$(dirname "$(realpath "$0")")/external/${MATHSAT}/bin" + +mkdir -p "${MATHSAT_BIN_DIR}" + +wget --tries=5 --timeout=30 --waitretry=2 \ + "https://mathsat.fbk.eu/release/${MATHSAT}.tar.gz" \ + -O "/tmp/${MATHSAT}.tar.gz" + +cd /tmp +tar -xvf "${MATHSAT}.tar.gz" > "${MATHSAT}.tar.log" 2>&1 + +cp -p "${MATHSAT}/bin/mathsat" "${MATHSAT_BIN_DIR}" diff --git a/package_test/python3.13/22.04/smlp_regr_patch.sh b/package_test/python3.13/22.04/smlp_regr_patch.sh new file mode 100755 index 00000000..588f0657 --- /dev/null +++ b/package_test/python3.13/22.04/smlp_regr_patch.sh @@ -0,0 +1,2 @@ +#!/usr/bin/bash +sed -i.bak 's@../../src@/usr/local/lib/python3.13/dist-packages/smlp@' /app/smlp/regr_smlp/code/smlp_regr.py diff --git a/package_test/python3.13/22.04/tkagg_patch.sh b/package_test/python3.13/22.04/tkagg_patch.sh new file mode 100755 index 00000000..5404df51 --- /dev/null +++ b/package_test/python3.13/22.04/tkagg_patch.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash +sed -i.bak '/^from/iif os.path.exists("\/.dockerenv"): \ + print("Script is running inside a Docker container.") \ + import matplotlib \ + matplotlib.use("TkAgg") \ + import matplotlib.pyplot as plt\n' /usr/local/lib/python3.13/dist-packages/smlp/run_smlp.py diff --git a/package_test/python3.13/24.04/Dockerfile b/package_test/python3.13/24.04/Dockerfile new file mode 100644 index 00000000..be25aacf --- /dev/null +++ b/package_test/python3.13/24.04/Dockerfile @@ -0,0 +1,91 @@ +FROM ubuntu:24.04 + +# Avoid interactive prompts during apt install +ENV DEBIAN_FRONTEND=noninteractive + +# --------------------------------------------------------------------------- +# 1. System packages +# --------------------------------------------------------------------------- +RUN apt-get update && apt-get install -y \ + ca-certificates \ + curl \ + git \ + gnupg \ + jq \ + libgomp1 \ + locales \ + tzdata wget \ + xvfb && \ + curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg && \ + echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu noble main" \ + > /etc/apt/sources.list.d/deadsnakes.list && \ + apt-get update && apt-get install -y \ + python3.13 python3.13-dev python3.13-venv python3.13-tk && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + + +# --------------------------------------------------------------------------- +# 2. mathsat +# --------------------------------------------------------------------------- +WORKDIR /app +COPY run_mathsat_build.sh . +RUN ./run_mathsat_build.sh && rm -rf /tmp/mathsat* + +# --------------------------------------------------------------------------- +# 3. Install pip and setuptools for Python 3.13 +# --------------------------------------------------------------------------- +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 && \ + python3.13 -m pip install --user --force-reinstall --break-system-packages setuptools + +# --------------------------------------------------------------------------- +# 4. Clone smlp (ARG busts the cache to always get latest) +# --------------------------------------------------------------------------- +ARG CACHE_BUST_SMLP +COPY run_git_clone /app +RUN cd /app && ./run_git_clone smlp_python313 + +# --------------------------------------------------------------------------- +# 5. Install smlp from wheel +# --------------------------------------------------------------------------- +RUN python3.13 -m pip install /app/smlp/dist/smlp-0.1.0-cp313-cp313-manylinux_2_39_x86_64.whl + +# --------------------------------------------------------------------------- +# 6. Install pycaret from archive +# --------------------------------------------------------------------------- +RUN pip install /app/smlp/docker/python3.13/pycaret_313.tar.gz + +# --------------------------------------------------------------------------- +# 7. Install UTF-8 fonts +# --------------------------------------------------------------------------- +RUN locale-gen en_US.UTF-8 +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 + +# --------------------------------------------------------------------------- +# 8. Docker GUI patch +# --------------------------------------------------------------------------- +COPY ./tkagg_patch.sh . +RUN ./tkagg_patch.sh + +# --------------------------------------------------------------------------- +# 9. SMLP regression patch +# --------------------------------------------------------------------------- +COPY ./smlp_regr_patch.sh . +RUN ./smlp_regr_patch.sh + +# --------------------------------------------------------------------------- +# 10. Define python version +# --------------------------------------------------------------------------- +RUN ln -sf /usr/bin/python3.13 /usr/bin/python +RUN ln -sf /usr/bin/python3.13 /usr/bin/python3 + +# --------------------------------------------------------------------------- +# 11. Copy and run DORA +# --------------------------------------------------------------------------- +COPY ./run_dora /app +RUN ./run_dora + +# Default command +CMD ["/usr/bin/bash"] diff --git a/package_test/python3.13/24.04/run_docker_build b/package_test/python3.13/24.04/run_docker_build new file mode 100755 index 00000000..570e1e5a --- /dev/null +++ b/package_test/python3.13/24.04/run_docker_build @@ -0,0 +1,9 @@ +#!/usr/bin/tcsh -f +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +foreach f (run_dora smlp_regr_patch.sh tkagg_patch.sh run_mathsat_build.sh) + \cp -fp `realpath $0 | xargs dirname`/../22.04/$f . +end +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t smlp-test-build-24.04-python313 . diff --git a/package_test/python3.13/24.04/run_docker_build_incremental b/package_test/python3.13/24.04/run_docker_build_incremental new file mode 100755 index 00000000..122fb736 --- /dev/null +++ b/package_test/python3.13/24.04/run_docker_build_incremental @@ -0,0 +1,19 @@ +#!/usr/bin/tcsh -f +set build_args="" +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +foreach f (run_dora smlp_regr_patch.sh tkagg_patch.sh run_mathsat_build.sh) + \cp -fp `realpath $0 | xargs dirname`/../22.04/$f . +end +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`git ls-remote https://github.com/SMLP-Systems/smlp.git refs/heads/smlp_python313 | cut -f1`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t smlp-test-build-24.04-python313 ." +echo $cmd +$cmd diff --git a/package_test/python3.13/almalinux_9/Dockerfile b/package_test/python3.13/almalinux_9/Dockerfile new file mode 100644 index 00000000..23958ee9 --- /dev/null +++ b/package_test/python3.13/almalinux_9/Dockerfile @@ -0,0 +1,117 @@ +FROM almalinux:9 + +# --------------------------------------------------------------------------- +# 1. System packages +# --------------------------------------------------------------------------- +RUN dnf install -y epel-release && \ + dnf install -y \ + ca-certificates \ + diffutils \ + git \ + gnupg2 \ + jq \ + libgomp \ + glibc-locale-source \ + glibc-langpack-en \ + gzip \ + tar \ + tcsh \ + tzdata \ + vim \ + wget \ + xorg-x11-fonts-Type1 \ + xorg-x11-fonts-misc \ + xorg-x11-server-Xvfb \ + xorg-x11-utils \ + procps-ng + +# --------------------------------------------------------------------------- +# 2. Python +# --------------------------------------------------------------------------- + +RUN dnf install -y \ + gcc \ + gcc-c++ \ + make \ + wget \ + openssl-devel \ + bzip2-devel \ + libffi-devel \ + zlib-devel \ + readline-devel \ + sqlite-devel \ + tk-devel && \ + dnf clean all + +RUN wget https://www.python.org/ftp/python/3.13.12/Python-3.13.12.tgz && \ + tar -xf Python-3.13.12.tgz && \ + cd Python-3.13.12 && \ + ./configure --enable-optimizations && \ + make -j$(nproc) && \ + make altinstall && \ + cd .. && rm -rf Python-3.13.12 Python-3.13.12.tgz + +# --------------------------------------------------------------------------- +# 3. mathsat +# --------------------------------------------------------------------------- +WORKDIR /app +COPY run_mathsat_build.sh . +RUN ./run_mathsat_build.sh && rm -rf /tmp/mathsat* + +# --------------------------------------------------------------------------- +# 4. Install pip and setuptools for Python 3.13 +# --------------------------------------------------------------------------- +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 && \ + python3.13 -m pip install --upgrade setuptools + +# --------------------------------------------------------------------------- +# 5. Clone smlp (ARG busts the cache to always get latest) +# --------------------------------------------------------------------------- +ARG CACHE_BUST_SMLP +COPY run_git_clone /app +RUN cd /app && ./run_git_clone smlp_python313 + +# --------------------------------------------------------------------------- +# 6. Install smlp from wheel +# --------------------------------------------------------------------------- +RUN python3.13 -m pip install /app/smlp/dist/smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + +# --------------------------------------------------------------------------- +# 7. Install pycaret from archive +# --------------------------------------------------------------------------- +RUN pip install /app/smlp/docker/python3.13/pycaret_313.tar.gz + +# --------------------------------------------------------------------------- +# 8. Configure UTF-8 locale +# --------------------------------------------------------------------------- +RUN localedef -i en_US -f UTF-8 en_US.UTF-8 +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 + +# --------------------------------------------------------------------------- +# 9. Docker GUI patch +# --------------------------------------------------------------------------- +COPY tkagg_patch.sh . +RUN ./tkagg_patch.sh + +# --------------------------------------------------------------------------- +# 10. SMLP regression patch +# --------------------------------------------------------------------------- +COPY smlp_regr_patch.sh . +RUN ./smlp_regr_patch.sh + +# --------------------------------------------------------------------------- +# 11. Define python version +# --------------------------------------------------------------------------- +RUN ln -sf /usr/local/bin/python3.13 /usr/bin/python && \ + ln -sf /usr/local/bin/python3.13 /usr/bin/python3 + +# --------------------------------------------------------------------------- +# 12. Copy and run DORA +# --------------------------------------------------------------------------- +COPY run_dora /app +RUN ./run_dora + +# Default command +CMD ["/usr/bin/bash"] diff --git a/package_test/python3.13/almalinux_9/run_docker_build b/package_test/python3.13/almalinux_9/run_docker_build new file mode 100755 index 00000000..12cca864 --- /dev/null +++ b/package_test/python3.13/almalinux_9/run_docker_build @@ -0,0 +1,5 @@ +#!/usr/bin/tcsh -f +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t smlp-test-build-almalinux_9-python313 . diff --git a/package_test/python3.13/almalinux_9/run_docker_build_incremental b/package_test/python3.13/almalinux_9/run_docker_build_incremental new file mode 100755 index 00000000..eb3d132a --- /dev/null +++ b/package_test/python3.13/almalinux_9/run_docker_build_incremental @@ -0,0 +1,16 @@ +#!/usr/bin/tcsh -f +set build_args="" +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`git ls-remote https://github.com/SMLP-Systems/smlp.git refs/heads/smlp_python313 | cut -f1`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t smlp-test-build-almalinux_9-python313 ." +echo $cmd +$cmd diff --git a/package_test/python3.13/almalinux_9/run_dora b/package_test/python3.13/almalinux_9/run_dora new file mode 100755 index 00000000..469eb77f --- /dev/null +++ b/package_test/python3.13/almalinux_9/run_dora @@ -0,0 +1,37 @@ +#!/usr/bin/bash +script_name=$(realpath $0 | xargs basename) +log=$PWD/${script_name}.log +pref=Test83 +\rm -f ${pref}* > /dev/null 2>&1 +\rm -f $log logs.log > /dev/null 2>&1 +if [[ $# -gt 0 ]]; then + if [[ "$1" == "-clean" ]]; then + exit 0 + fi +fi +xvfb-run /usr/local/lib/python3.13/site-packages/smlp/run_smlp.py \ + -data smlp/regr_smlp/data/smlp_toy_num_resp_mult.csv \ + -out_dir ./ \ + -pref Test83 \ + -mode optimize \ + -pareto t \ + -opt_strategy lazy \ + -resp y1,y2 \ + -feat x,p1,p2 \ + -model dt_sklearn \ + -dt_sklearn_max_depth 15 \ + -tree_encoding nested \ + -compress_rules f \ + -spec smlp/regr_smlp/specs/smlp_toy_num_resp_mult_free_inps.spec \ + -data_scaler min_max -beta "y1>7 and y2>6" -objv_names obj1,objv2,objv3 -objv_exprs "(y1+y2)/2;y1/2-y2;y2" \ + -epsilon 0.05 \ + -delta_rel 0.01 \ + -save_model_config f \ + -mrmr_pred 0 \ + -plots f \ + -pred_plots f \ + -resp_plots f \ + -seed 10 \ + -log_time f |& tee $log +cmd="diff ${pref}_smlp_toy_num_resp_mult.txt smlp/regr_smlp/master" +echo $cmd && $cmd diff --git a/package_test/python3.13/almalinux_9/run_mathsat_build.sh b/package_test/python3.13/almalinux_9/run_mathsat_build.sh new file mode 100755 index 00000000..1c20bcc3 --- /dev/null +++ b/package_test/python3.13/almalinux_9/run_mathsat_build.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -e + +MATHSAT="mathsat-5.6.8-linux-x86_64-reentrant" +MATHSAT_BIN_DIR="$(dirname "$(realpath "$0")")/external/${MATHSAT}/bin" + +mkdir -p "${MATHSAT_BIN_DIR}" + +wget --tries=5 --timeout=30 --waitretry=2 \ + "https://mathsat.fbk.eu/release/${MATHSAT}.tar.gz" \ + -O "/tmp/${MATHSAT}.tar.gz" + +cd /tmp +tar -xvf "${MATHSAT}.tar.gz" > "${MATHSAT}.tar.log" 2>&1 + +cp -p "${MATHSAT}/bin/mathsat" "${MATHSAT_BIN_DIR}" diff --git a/package_test/python3.13/almalinux_9/smlp_regr_patch.sh b/package_test/python3.13/almalinux_9/smlp_regr_patch.sh new file mode 100755 index 00000000..cd1262fb --- /dev/null +++ b/package_test/python3.13/almalinux_9/smlp_regr_patch.sh @@ -0,0 +1,2 @@ +#!/usr/bin/bash +sed -i.bak 's@../../src@/usr/local/lib/python3.13/site-packages/smlp@' /app/smlp/regr_smlp/code/smlp_regr.py diff --git a/package_test/python3.13/almalinux_9/tkagg_patch.sh b/package_test/python3.13/almalinux_9/tkagg_patch.sh new file mode 100755 index 00000000..ee54a7ed --- /dev/null +++ b/package_test/python3.13/almalinux_9/tkagg_patch.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash +sed -i.bak '/^from/iif os.path.exists("\/.dockerenv"): \ + print("Script is running inside a Docker container.") \ + import matplotlib \ + matplotlib.use("TkAgg") \ + import matplotlib.pyplot as plt\n' /usr/local/lib/python3.13/site-packages/smlp/run_smlp.py diff --git a/package_test/python3.13/manylinux_2_28/Dockerfile b/package_test/python3.13/manylinux_2_28/Dockerfile new file mode 100644 index 00000000..bcef1c93 --- /dev/null +++ b/package_test/python3.13/manylinux_2_28/Dockerfile @@ -0,0 +1,90 @@ +FROM ubuntu:22.04 + +# Avoid interactive prompts during apt install +ENV DEBIAN_FRONTEND=noninteractive + +# --------------------------------------------------------------------------- +# 1. System packages +# --------------------------------------------------------------------------- +RUN apt-get update && apt-get install -y \ + ca-certificates \ + curl \ + git \ + gnupg \ + jq \ + libgomp1 \ + locales \ + tzdata wget \ + xvfb && \ + curl -fsSL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF23C5A6CF475977595C89F51BA6932366A755776" \ + | gpg --dearmor -o /etc/apt/trusted.gpg.d/deadsnakes.gpg && \ + echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu jammy main" \ + > /etc/apt/sources.list.d/deadsnakes.list && \ + apt-get update && apt-get install -y \ + python3.13 python3.13-dev python3.13-venv python3.13-tk && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + + +# --------------------------------------------------------------------------- +# 2. mathsat +# --------------------------------------------------------------------------- +WORKDIR /app +COPY run_mathsat_build.sh . +RUN ./run_mathsat_build.sh && rm -rf /tmp/mathsat* + +# --------------------------------------------------------------------------- +# 3. Install pip and setuptools for Python 3.13 +# --------------------------------------------------------------------------- +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 && \ + python3.13 -m pip install --user --force-reinstall --break-system-packages setuptools + +# --------------------------------------------------------------------------- +# 4. Clone smlp (ARG busts the cache to always get latest) +# --------------------------------------------------------------------------- +ARG CACHE_BUST_SMLP +COPY run_git_clone /app +RUN cd /app && ./run_git_clone smlp_python313 + +# --------------------------------------------------------------------------- +# 5. Install smlp from wheel +# --------------------------------------------------------------------------- +RUN python3.13 -m pip install /app/smlp/dist/smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + +# 6. Install pycaret from archive +# --------------------------------------------------------------------------- +RUN pip install /app/smlp/docker/python3.13/pycaret_313.tar.gz + +# --------------------------------------------------------------------------- +# 7. Install UTF-8 fonts +# --------------------------------------------------------------------------- +RUN locale-gen en_US.UTF-8 +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 + +# --------------------------------------------------------------------------- +# 8. Docker GUI patch +# --------------------------------------------------------------------------- +COPY tkagg_patch.sh . +RUN ./tkagg_patch.sh + +# --------------------------------------------------------------------------- +# 9. SMLP regression patch +# --------------------------------------------------------------------------- +COPY smlp_regr_patch.sh . +RUN ./smlp_regr_patch.sh + +# --------------------------------------------------------------------------- +# 10. Define python version +# --------------------------------------------------------------------------- +RUN ln -sf /usr/bin/python3.13 /usr/bin/python +RUN ln -sf /usr/bin/python3.13 /usr/bin/python3 + +# --------------------------------------------------------------------------- +# 11. Copy and run DORA +# --------------------------------------------------------------------------- +COPY run_dora /app +RUN ./run_dora + +# Default command +CMD ["/usr/bin/bash"] diff --git a/package_test/python3.13/manylinux_2_28/run_docker_build b/package_test/python3.13/manylinux_2_28/run_docker_build new file mode 100755 index 00000000..67e6b8c1 --- /dev/null +++ b/package_test/python3.13/manylinux_2_28/run_docker_build @@ -0,0 +1,5 @@ +#!/usr/bin/tcsh -f +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t smlp-test-build-22.04-manylinux-python313 . diff --git a/package_test/python3.13/manylinux_2_28/run_docker_build_incremental b/package_test/python3.13/manylinux_2_28/run_docker_build_incremental new file mode 100755 index 00000000..27076a86 --- /dev/null +++ b/package_test/python3.13/manylinux_2_28/run_docker_build_incremental @@ -0,0 +1,16 @@ +#!/usr/bin/tcsh -f +set build_args="" +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`git ls-remote https://github.com/SMLP-Systems/smlp.git refs/heads/smlp_python313 | cut -f1`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t smlp-test-build-22.04-manylinux-python313 ." +echo $cmd +$cmd diff --git a/package_test/python3.13/manylinux_2_28/run_dora b/package_test/python3.13/manylinux_2_28/run_dora new file mode 100755 index 00000000..75174dc0 --- /dev/null +++ b/package_test/python3.13/manylinux_2_28/run_dora @@ -0,0 +1,37 @@ +#!/usr/bin/bash +script_name=$(realpath $0 | xargs basename) +log=$PWD/${script_name}.log +pref=Test83 +\rm -f ${pref}* > /dev/null 2>&1 +\rm -f $log logs.log > /dev/null 2>&1 +if [[ $# -gt 0 ]]; then + if [[ "$1" == "-clean" ]]; then + exit 0 + fi +fi +xvfb-run /usr/local/lib/python3.13/dist-packages/smlp/run_smlp.py \ + -data smlp/regr_smlp/data/smlp_toy_num_resp_mult.csv \ + -out_dir ./ \ + -pref Test83 \ + -mode optimize \ + -pareto t \ + -opt_strategy lazy \ + -resp y1,y2 \ + -feat x,p1,p2 \ + -model dt_sklearn \ + -dt_sklearn_max_depth 15 \ + -tree_encoding nested \ + -compress_rules f \ + -spec smlp/regr_smlp/specs/smlp_toy_num_resp_mult_free_inps.spec \ + -data_scaler min_max -beta "y1>7 and y2>6" -objv_names obj1,objv2,objv3 -objv_exprs "(y1+y2)/2;y1/2-y2;y2" \ + -epsilon 0.05 \ + -delta_rel 0.01 \ + -save_model_config f \ + -mrmr_pred 0 \ + -plots f \ + -pred_plots f \ + -resp_plots f \ + -seed 10 \ + -log_time f |& tee $log +cmd="diff ${pref}_smlp_toy_num_resp_mult.txt smlp/regr_smlp/master" +echo $cmd && $cmd diff --git a/package_test/python3.13/manylinux_2_28/run_mathsat_build.sh b/package_test/python3.13/manylinux_2_28/run_mathsat_build.sh new file mode 100755 index 00000000..1c20bcc3 --- /dev/null +++ b/package_test/python3.13/manylinux_2_28/run_mathsat_build.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -e + +MATHSAT="mathsat-5.6.8-linux-x86_64-reentrant" +MATHSAT_BIN_DIR="$(dirname "$(realpath "$0")")/external/${MATHSAT}/bin" + +mkdir -p "${MATHSAT_BIN_DIR}" + +wget --tries=5 --timeout=30 --waitretry=2 \ + "https://mathsat.fbk.eu/release/${MATHSAT}.tar.gz" \ + -O "/tmp/${MATHSAT}.tar.gz" + +cd /tmp +tar -xvf "${MATHSAT}.tar.gz" > "${MATHSAT}.tar.log" 2>&1 + +cp -p "${MATHSAT}/bin/mathsat" "${MATHSAT_BIN_DIR}" diff --git a/package_test/python3.13/manylinux_2_28/smlp_regr_patch.sh b/package_test/python3.13/manylinux_2_28/smlp_regr_patch.sh new file mode 100755 index 00000000..588f0657 --- /dev/null +++ b/package_test/python3.13/manylinux_2_28/smlp_regr_patch.sh @@ -0,0 +1,2 @@ +#!/usr/bin/bash +sed -i.bak 's@../../src@/usr/local/lib/python3.13/dist-packages/smlp@' /app/smlp/regr_smlp/code/smlp_regr.py diff --git a/package_test/python3.13/manylinux_2_28/tkagg_patch.sh b/package_test/python3.13/manylinux_2_28/tkagg_patch.sh new file mode 100755 index 00000000..5404df51 --- /dev/null +++ b/package_test/python3.13/manylinux_2_28/tkagg_patch.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash +sed -i.bak '/^from/iif os.path.exists("\/.dockerenv"): \ + print("Script is running inside a Docker container.") \ + import matplotlib \ + matplotlib.use("TkAgg") \ + import matplotlib.pyplot as plt\n' /usr/local/lib/python3.13/dist-packages/smlp/run_smlp.py diff --git a/package_test/python3.13/opensuse_15.5/Dockerfile b/package_test/python3.13/opensuse_15.5/Dockerfile new file mode 100644 index 00000000..3d2bae99 --- /dev/null +++ b/package_test/python3.13/opensuse_15.5/Dockerfile @@ -0,0 +1,125 @@ +FROM opensuse/leap:15.5 + +# --------------------------------------------------------------------------- +# 1. System packages +# --------------------------------------------------------------------------- +RUN zypper refresh && zypper install -y \ + ca-certificates \ + curl \ + git \ + gpg2 \ + jq \ + libgomp1 \ + glibc-locale \ + glibc-locale-base \ + glibc-i18ndata \ + gzip \ + tar \ + tcsh \ + timezone \ + vim \ + wget \ + xorg-x11-fonts \ + xorg-x11-fonts-core \ + xorg-x11-server-Xvfb \ + xset \ + procps + +# --------------------------------------------------------------------------- +# 2. gcc and g++ +# --------------------------------------------------------------------------- +RUN zypper --non-interactive install -y \ + gcc13 \ + gcc13-c++ && \ + ln -sf /usr/bin/gcc-13 /usr/bin/gcc && \ + ln -sf /usr/bin/g++-13 /usr/bin/g++ + +# --------------------------------------------------------------------------- +# 3. Python 3.13 +# --------------------------------------------------------------------------- +RUN zypper --non-interactive install -y \ + make \ + zlib-devel \ + libopenssl-devel \ + libffi-devel \ + readline-devel \ + sqlite3-devel \ + bzip2 \ + libbz2-devel \ + xz \ + liblzma5 \ + tk-devel \ + wget && \ + zypper clean --all + +RUN wget https://www.python.org/ftp/python/3.13.12/Python-3.13.12.tgz && \ + tar -xf Python-3.13.12.tgz && \ + cd Python-3.13.12 && \ + CC=gcc-13 CXX=g++-13 ./configure --enable-optimizations --with-tcltk && \ + make -j$(nproc) && \ + make altinstall && \ + cd .. && rm -rf Python-3.13.12 Python-3.13.12.tgz + +# --------------------------------------------------------------------------- +# 3. mathsat +# --------------------------------------------------------------------------- +WORKDIR /app +COPY run_mathsat_build.sh . +RUN ./run_mathsat_build.sh && rm -rf /tmp/mathsat* + +# --------------------------------------------------------------------------- +# 4. Install pip and setuptools for Python 3.13 +# --------------------------------------------------------------------------- +RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.13 && \ + python3.13 -m pip install --upgrade setuptools + +# --------------------------------------------------------------------------- +# 5. Clone smlp (ARG busts the cache to always get latest) +# --------------------------------------------------------------------------- +ARG CACHE_BUST_SMLP +COPY run_git_clone /app +RUN cd /app && ./run_git_clone smlp_python313 + +# --------------------------------------------------------------------------- +# 6. Install smlp from wheel +# --------------------------------------------------------------------------- +RUN python3.13 -m pip install /app/smlp/dist/smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + +# 7. Install pycaret from archive +# --------------------------------------------------------------------------- +RUN pip install /app/smlp/docker/python3.13/pycaret_313.tar.gz + +# --------------------------------------------------------------------------- +# 8. Configure UTF-8 locale +# --------------------------------------------------------------------------- +RUN localedef -i en_US -f UTF-8 en_US.UTF-8 +ENV LANG=en_US.UTF-8 +ENV LANGUAGE=en_US:en +ENV LC_ALL=en_US.UTF-8 + +# --------------------------------------------------------------------------- +# 9. Docker GUI patch +# --------------------------------------------------------------------------- +COPY tkagg_patch.sh . +RUN ./tkagg_patch.sh + +# --------------------------------------------------------------------------- +# 10. SMLP regression patch +# --------------------------------------------------------------------------- +COPY smlp_regr_patch.sh . +RUN ./smlp_regr_patch.sh + +# --------------------------------------------------------------------------- +# 11. Define python version +# --------------------------------------------------------------------------- +RUN ln -sf /usr/local/bin/python3.13 /usr/bin/python && \ + ln -sf /usr/local/bin/python3.13 /usr/bin/python3 + +# --------------------------------------------------------------------------- +# 12. Copy and run DORA +# --------------------------------------------------------------------------- +COPY run_dora /app +RUN ./run_dora + +# Default command +CMD ["/usr/bin/bash"] diff --git a/package_test/python3.13/opensuse_15.5/run_docker_build b/package_test/python3.13/opensuse_15.5/run_docker_build new file mode 100755 index 00000000..bfe6b9ae --- /dev/null +++ b/package_test/python3.13/opensuse_15.5/run_docker_build @@ -0,0 +1,5 @@ +#!/usr/bin/tcsh -f +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +env DOCKER_BUILDKIT=1 docker build --no-cache --progress=plain -t smlp-test-build-opensuse_15.5-python313 . diff --git a/package_test/python3.13/opensuse_15.5/run_docker_build_incremental b/package_test/python3.13/opensuse_15.5/run_docker_build_incremental new file mode 100755 index 00000000..b74cca81 --- /dev/null +++ b/package_test/python3.13/opensuse_15.5/run_docker_build_incremental @@ -0,0 +1,16 @@ +#!/usr/bin/tcsh -f +set build_args="" +set clone_script=run_git_clone +\rm -f $clone_script >& /dev/null +\cp -p `realpath $0 | xargs dirname`/../../../package_build/python3.13/22.04/$clone_script . +if($#argv > 0) then + if("-rebuild_smlp" == "$argv[1]" ) then + set build_args="--build-arg CACHE_BUST_SMLP=`git ls-remote https://github.com/SMLP-Systems/smlp.git refs/heads/smlp_python313 | cut -f1`" + else + echo "\nUsage: `realpath $0 | xargs basename` [-rebuild_smlp]\n" + exit 0 + endif +endif +set cmd="env DOCKER_BUILDKIT=1 docker build $build_args --progress=plain -t smlp-test-build-opensuse_15.5-python313 ." +echo $cmd +$cmd diff --git a/package_test/python3.13/opensuse_15.5/run_dora b/package_test/python3.13/opensuse_15.5/run_dora new file mode 100755 index 00000000..9b13335c --- /dev/null +++ b/package_test/python3.13/opensuse_15.5/run_dora @@ -0,0 +1,47 @@ +#!/usr/bin/bash +script_name=$(realpath $0 | xargs basename) +log=$PWD/${script_name}.log +pref=Test83 +\rm -f ${pref}* > /dev/null 2>&1 +\rm -f $log logs.log > /dev/null 2>&1 +if [[ $# -gt 0 ]]; then + if [[ "$1" == "-clean" ]]; then + exit 0 + fi +fi +pkill Xvfb > /dev/null +export DISPLAY=:99 +Xvfb $DISPLAY -screen 0 1024x768x24 > /dev/null 2>&1 & +sleep 1 +if xset q > /dev/null 2>&1; then + echo "" && echo "X display has been initialized successfully" && echo "" +else + echo "" && echo "X display initialization failed" && echo "" + exit 1 +fi +/usr/local/lib/python3.13/site-packages/smlp/run_smlp.py \ + -data smlp/regr_smlp/data/smlp_toy_num_resp_mult.csv \ + -out_dir ./ \ + -pref Test83 \ + -mode optimize \ + -pareto t \ + -opt_strategy lazy \ + -resp y1,y2 \ + -feat x,p1,p2 \ + -model dt_sklearn \ + -dt_sklearn_max_depth 15 \ + -tree_encoding nested \ + -compress_rules f \ + -spec smlp/regr_smlp/specs/smlp_toy_num_resp_mult_free_inps.spec \ + -data_scaler min_max -beta "y1>7 and y2>6" -objv_names obj1,objv2,objv3 -objv_exprs "(y1+y2)/2;y1/2-y2;y2" \ + -epsilon 0.05 \ + -delta_rel 0.01 \ + -save_model_config f \ + -mrmr_pred 0 \ + -plots f \ + -pred_plots f \ + -resp_plots f \ + -seed 10 \ + -log_time f |& tee $log +cmd="diff ${pref}_smlp_toy_num_resp_mult.txt smlp/regr_smlp/master" +echo $cmd && $cmd diff --git a/package_test/python3.13/opensuse_15.5/run_mathsat_build.sh b/package_test/python3.13/opensuse_15.5/run_mathsat_build.sh new file mode 100755 index 00000000..1c20bcc3 --- /dev/null +++ b/package_test/python3.13/opensuse_15.5/run_mathsat_build.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -e + +MATHSAT="mathsat-5.6.8-linux-x86_64-reentrant" +MATHSAT_BIN_DIR="$(dirname "$(realpath "$0")")/external/${MATHSAT}/bin" + +mkdir -p "${MATHSAT_BIN_DIR}" + +wget --tries=5 --timeout=30 --waitretry=2 \ + "https://mathsat.fbk.eu/release/${MATHSAT}.tar.gz" \ + -O "/tmp/${MATHSAT}.tar.gz" + +cd /tmp +tar -xvf "${MATHSAT}.tar.gz" > "${MATHSAT}.tar.log" 2>&1 + +cp -p "${MATHSAT}/bin/mathsat" "${MATHSAT_BIN_DIR}" diff --git a/package_test/python3.13/opensuse_15.5/smlp_regr_patch.sh b/package_test/python3.13/opensuse_15.5/smlp_regr_patch.sh new file mode 100755 index 00000000..cd1262fb --- /dev/null +++ b/package_test/python3.13/opensuse_15.5/smlp_regr_patch.sh @@ -0,0 +1,2 @@ +#!/usr/bin/bash +sed -i.bak 's@../../src@/usr/local/lib/python3.13/site-packages/smlp@' /app/smlp/regr_smlp/code/smlp_regr.py diff --git a/package_test/python3.13/opensuse_15.5/tkagg_patch.sh b/package_test/python3.13/opensuse_15.5/tkagg_patch.sh new file mode 100755 index 00000000..ee54a7ed --- /dev/null +++ b/package_test/python3.13/opensuse_15.5/tkagg_patch.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash +sed -i.bak '/^from/iif os.path.exists("\/.dockerenv"): \ + print("Script is running inside a Docker container.") \ + import matplotlib \ + matplotlib.use("TkAgg") \ + import matplotlib.pyplot as plt\n' /usr/local/lib/python3.13/site-packages/smlp/run_smlp.py diff --git a/package_test/python3.13/venv/README.md b/package_test/python3.13/venv/README.md new file mode 100644 index 00000000..dad61cc7 --- /dev/null +++ b/package_test/python3.13/venv/README.md @@ -0,0 +1,23 @@ +# SMLP Installation Guide for Ubuntu 24.04 + +## 1. Install Python 3.13 + +```bash +sudo add-apt-repository ppa:deadsnakes/ppa +sudo apt-get update +sudo apt-get install -y python3.13 python3.13-dev python3.13-venv python3.13-tk +``` + +## 2. Install SMLP package, MathSat and run DORA test + +```bash +./run_dora +``` + +## 3. Run regression + +```bash +source smlp_package_venv/bin/activate +cd smlp_package_venv/smlp/regr_smlp/code +./smlp_regr.py +``` diff --git a/package_test/python3.13/venv/run_dora b/package_test/python3.13/venv/run_dora new file mode 100755 index 00000000..ca6c32ab --- /dev/null +++ b/package_test/python3.13/venv/run_dora @@ -0,0 +1,30 @@ +#!/usr/bin/bash +smlp_venv_dir=smlp_package_venv +\rm -rf $smlp_venv_dir > /dev/null +if [[ $# -gt 0 ]]; then + if [ "$1" == "-clean" ]; then + exit 0 + fi +fi +python3.13 -m venv $smlp_venv_dir +cd $smlp_venv_dir +source bin/activate +git clone https://github.com/SMLP-Systems/smlp +GIT_BRANCH=smlp_python313 +cd smlp +if [ $(git branch -r --list origin/$GIT_BRANCH) ]; then + git switch $GIT_BRANCH +fi +mathsat_dest_dir=../external +mathsat_src_dir=package_test/python3.13/manylinux_2_28/external +$(dirname $mathsat_src_dir)/run_mathsat_build.sh +\rm -rf $mathsat_dest_dir > /dev/null +\mv $mathsat_src_dir $mathsat_dest_dir +pip install dist/smlp-0.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl +pip install docker/python3.13/pycaret_313.tar.gz +sed -i.bak "s@../../src@$(realpath ../lib/python3.13/site-packages/smlp)@" regr_smlp/code/smlp_regr.py +cd regr_smlp/code +../../../lib/python3.13/site-packages/smlp/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test1 -mode train -resp y1 -feat x,p1,p2 -model dt_caret -save_model_config f -mrmr_pred 0 -plots t -seed 10 -log_time f +cmd="diff Test1_smlp_toy_num_resp_mult.txt ../master" +echo $cmd +$cmd diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..c64fe4de --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,43 @@ +[build-system] +requires = [ + "setuptools>=68", + "wheel", +] +build-backend = "setuptools.build_meta" + +[project] +name = "smlp" +version = "0.1.0" +description = "SMLP - System for Machine Learning-based Parameter analysis" +requires-python = "==3.13.*" +dependencies = [ + "doepy", + "jenkspy", + "keras_tuner", + "matplotlib==3.10.8", + "meson", + "mrmr-selection", + "numpy==2.3.5", + "pandas==2.3.3", + "pydoe==0.3.8", + "pysubgroup", + "scikit-learn==1.7.2", + "scipy", + "seaborn", + "z3-solver==4.8.12", + "tensorflow", +] + +[tool.setuptools.packages.find] +where = ["."] +exclude = ["src*", "smlp_py*", "regr_smlp*", "utils*"] + +# --------------------------------------------------------------------------- +# Build-time environment variables (all optional) +# --------------------------------------------------------------------------- +# BOOST_ROOT Reuse an existing Boost prefix, skips download + compile. +# e.g. export BOOST_ROOT=~/.local/boost_py313 +# BOOST_CACHE_DIR Where to cache compiled Boost (default: ~/.local/boost_py313). +# BOOST_VERSION Boost version to download (default: 1.83.0). +# KAY_DIR Path to an existing kay checkout. +# SMLP_BRANCH Git branch to use in the smlp repo (auto-detected if unset). diff --git a/regr_smlp/code/smlp_regr.csv b/regr_smlp/code/smlp_regr.csv old mode 100755 new mode 100644 index 29592b4c..f05a3684 --- a/regr_smlp/code/smlp_regr.csv +++ b/regr_smlp/code/smlp_regr.csv @@ -170,8 +170,8 @@ d,data,new_data,switches,description 169,smlp_toy_num_resp_mult,,"-mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -rf_sklearn_n_estimators 3 -et_sklearn_bootstrap f -tree_encoding flat -model_per_response t -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -solver_path mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic test for et_sklearn with flat tree_encoding and model_per_response t in model exploration mode optimize 170,smlp_toy_num_resp_mult,,"-mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -rf_sklearn_n_estimators 3 -et_sklearn_bootstrap f -tree_encoding flat -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -solver_path mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic test for et_sklearn with flat tree_encoding and model_per_response f in model exploration mode optimize 171,smlp_toy_num_resp_mult,,"-mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_caret -tree_encoding flat -model_per_response t -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic test for et_caret with flat tree_encoding in model exploration mode optimize -172,smlp_toy_num_resp_mult,,"-mode verify -resp y2 -feat x,p1,p2 -model nn_keras -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -nn_keras_tuner hyperband -nn_keras_layers_grid ""2,2;3,3,3"" -save_model_config f -spec smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs ""2*y2>1"" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -solver_path "/nfs/iil/proj/dt/eva/smlp/external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"",basic test for nn_keras flat encoding for functional api, i, one response variable, adapts test 154 -173,smlp_toy_num_resp_mult,,"-mode verify -resp y2 -feat x,p1,p2 -model nn_keras -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -nn_keras_tuner hyperband -nn_keras_layers_grid ""2,2;3,3,3"" -save_model_config f -spec smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs ""2*y2>1"" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics mae -solver_path "/nfs/iil/proj/dt/eva/smlp/external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"",basic test for nn_keras flat encoding for sequential api, one response variable, adapts test 155 +172,smlp_toy_num_resp_mult,,"-mode verify -resp y2 -feat x,p1,p2 -model nn_keras -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -nn_keras_tuner hyperband -nn_keras_layers_grid ""2,2;3,3,3"" -save_model_config f -spec smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs ""2*y2>1"" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -solver_path "mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"",basic test for nn_keras flat encoding for functional api, i, one response variable, adapts test 154 +173,smlp_toy_num_resp_mult,,"-mode verify -resp y2 -feat x,p1,p2 -model nn_keras -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -nn_keras_tuner hyperband -nn_keras_layers_grid ""2,2;3,3,3"" -save_model_config f -spec smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs ""2*y2>1"" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics mae -solver_path "mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"",basic test for nn_keras flat encoding for sequential api, one response variable, adapts test 155 174,smlp_toy_num_resp_mult,,"-mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response f -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic layered nn_keras encoding test with model_per_response f nn_keras_seq_api f for nn_keras in model exploration mode optsyn 175,smlp_toy_num_resp_mult,,"-mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response f -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic layered nn_keras encoding test with model_per_response f nn_keras_seq_api t for nn_keras in model exploration mode optsyn 176,smlp_toy_num_resp_mult,,"-mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic layered nn_keras encoding test with model_per_response t nn_keras_seq_api f for nn_keras in model exploration mode optsyn @@ -180,7 +180,7 @@ d,data,new_data,switches,description 179,smlp_toy_num_resp_mult,,"-mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response f -scale_resp f -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic layered nn_keras encoding test with model_per_response f nn_keras_seq_api f for nn_keras in model exploration mode optsyn when resposes are not scaled adapts test 174 180,smlp_toy_num_resp_mult,,"-mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response f -scale_feat f -scale_resp f -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic layered nn_keras encoding test with model_per_response f nn_keras_seq_api t for nn_keras in model exploration mode optsyn when features and responses are not scaled adapts test 175 181,smlp_toy_num_resp_mult,,"-mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding flat -scale_feat f -spec smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic flat tree encoding test for dt_sklearn multi objective pareto optimization when features are not scaled modifies test 164 -182,smlp_toy_num_resp_mult,,"-mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding flat -scale_resp f -spec smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path "/nfs/iil/proj/dt/eva/smlp/external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"",basic flat tree encoding test for dt_sklearn multi objective pareto optimization when responses are not scaled modifies test 164 +182,smlp_toy_num_resp_mult,,"-mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding flat -scale_resp f -spec smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path "mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"",basic flat tree encoding test for dt_sklearn multi objective pareto optimization when responses are not scaled modifies test 164 183,smlp_toy_num_resp_mult,,"-mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding flat -scale_resp f -scale_feat f -spec smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic flat tree encoding test for dt_sklearn multi objective pareto optimization when features and responses are not scaled modifies test 164 184,smlp_toy_num_resp_noknobs,smlp_toy_num_resp_noknobs_pred_labeled,"-mode verify -resp y1,y2 -feat x0,x1,x2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -mrmr_pred 2 -model_per_response t -spec smlp_toy_num_resp_noknobs_verify.spec -asrt_names asrt1,asrt2,asrt3 -asrt_exprs ""(y2**3+x2)/2<6;y1>=9;y2<0"" -trace_anonym t -trace_prec 3 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",tests model term formation when mrmr_pred is activated and not all features are selected for training the model adapts test 139 185,smlp_toy_num_resp_noknobs,smlp_toy_num_resp_noknobs_pred_labeled,"-mode verify -resp y1,y2 -feat x0,x1,x2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -mrmr_pred 2 -model_per_response t -spec smlp_toy_num_resp_noknobs_verify.spec -asrt_names asrt1,asrt2,asrt3 -asrt_exprs ""(y2**3+x2)/2<6;y1>=9;y2<0"" -trace_anonym t -trace_prec 3 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",tests model term construction with branched_encoding of tress and model per reponse when mrmr_pred is activated and not all features are selected for training the model, adapts test 162 @@ -195,7 +195,7 @@ d,data,new_data,switches,description 194,smlp_toy_num_resp_mult,,"-mode optsyn -resp y1,y2 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 4 -rf_sklearn_n_estimators 3 -tree_encoding branched -compress_rules t -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic branched tree encoding test with model_per_response t for rf_sklearn in model exploration mode optsyn, adapts test 94 and test 167 195,smlp_toy_num_resp_mult,,"-mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -et_sklearn_n_estimators 3 -et_sklearn_bootstrap f -tree_encoding branched -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -solver_path mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic test for et_sklearn with branched tree_encoding and model_per_response f in model exploration mode optimize adapts test 192 by setting n_estimators 3 and then discrepancy between z3, mathsat and yices results disappear 196,smlp_toy_num_resp_mult,,"-mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -scale_feat f -spec smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic branched tree encoding test for dt_sklearn multi objective pareto optimization when features are not scaled modifies test 164 and test 181 -197,smlp_toy_num_resp_mult,,"-mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -scale_resp f -spec smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path "/nfs/iil/proj/dt/eva/smlp/external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"",basic branched tree encoding test for dt_sklearn multi objective pareto optimization when responses are not scaled modifies test 164 and test 182 +197,smlp_toy_num_resp_mult,,"-mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -scale_resp f -spec smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path "mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"",basic branched tree encoding test for dt_sklearn multi objective pareto optimization when responses are not scaled modifies test 164 and test 182 198,smlp_toy_num_resp_mult,,"-mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -scale_resp f -scale_feat f -spec smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat",basic branched tree encoding test for dt_sklearn multi objective pareto optimization when features and responses are not scaled modifies test 164 and test 183 199,smlp_toy_num_resp_mult,,"-mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -et_sklearn_n_estimators 100 -et_sklearn_bootstrap f -tree_encoding branched -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",test to demonstrate that in pareto optimization and optsyn modes with multiple objectives when beta constraints are not present SMLP results are not consistent when different solvers are used; this is due to fact that when a subset of objectoves are exemined in pareto algo, outputs not covered by the active objectives become don't cares (there are no contraints on then except model constraints) and this situation is likely not modeled in SMLP accurately; modifies test 192 to use z3 instead of mathsat 200,smlp_toy_num_resp_mult,,"-mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -et_sklearn_n_estimators 100 -et_sklearn_bootstrap f -tree_encoding branched -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0 -solver_path mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f",basic test for et_sklearn with branched tree_encoding and model_per_response f in model exploration mode optimize adapts test 170 !!!!!!!!! in this test z3 result differs from mathsat and yices results (the latter two give sma results, cvc5 faild with incomparable ite tipes for if and else branches) diff --git a/regr_smlp/code/smlp_regr.py b/regr_smlp/code/smlp_regr.py index 12e7d73a..d773c101 100755 --- a/regr_smlp/code/smlp_regr.py +++ b/regr_smlp/code/smlp_regr.py @@ -697,6 +697,7 @@ def worker(q, id_q, print_l): print('command (2)', command); with print_l: + print("") print("Running test {0} test type: {1}, description: {2}".format(test_id, test_type, test_description)) print(command + '\n') diff --git a/regr_smlp/master/Test100_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test100_smlp_toy_num_resp_mult.txt index 77d19bac..7bc0ffc8 100644 --- a/regr_smlp/master/Test100_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test100_smlp_toy_num_resp_mult.txt @@ -121,7 +121,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test100_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test101_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test101_smlp_toy_num_resp_mult.txt index bd75e30c..08c112f6 100644 --- a/regr_smlp/master/Test101_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test101_smlp_toy_num_resp_mult.txt @@ -148,7 +148,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test101_model_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test103_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test103_smlp_toy_num_resp_mult.txt index 990f164f..e816da6a 100644 --- a/regr_smlp/master/Test103_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test103_smlp_toy_num_resp_mult.txt @@ -150,7 +150,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test103_model_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test104_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test104_smlp_toy_num_resp_mult.txt index 86e3ca8b..52571d75 100644 --- a/regr_smlp/master/Test104_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test104_smlp_toy_num_resp_mult.txt @@ -119,7 +119,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test104_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test105_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test105_smlp_toy_num_resp_mult.txt index 8a662fa2..e9b5e65d 100644 --- a/regr_smlp/master/Test105_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test105_smlp_toy_num_resp_mult.txt @@ -119,7 +119,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test105_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test106_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test106_smlp_toy_num_resp_mult.txt index e7323e52..1dc0b832 100644 --- a/regr_smlp/master/Test106_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test106_smlp_toy_num_resp_mult.txt @@ -119,7 +119,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test106_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test107_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test107_smlp_toy_num_resp_mult.txt index 3a836be1..02784475 100644 --- a/regr_smlp/master/Test107_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test107_smlp_toy_num_resp_mult.txt @@ -119,7 +119,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test107_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test108_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test108_smlp_toy_num_resp_mult.txt index 530bc277..b354c244 100644 --- a/regr_smlp/master/Test108_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test108_smlp_toy_num_resp_mult.txt @@ -139,7 +139,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test108_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test109_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test109_smlp_toy_num_resp_mult.txt index 64a139de..873f5af3 100644 --- a/regr_smlp/master/Test109_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test109_smlp_toy_num_resp_mult.txt @@ -139,7 +139,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test109_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index ee4504e9..1fe5b55b 100644 --- a/regr_smlp/master/Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test10_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled.txt b/regr_smlp/master/Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled.txt index 09883356..0b14cde7 100644 --- a/regr_smlp/master/Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled.txt +++ b/regr_smlp/master/Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled.txt @@ -97,7 +97,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test110_model_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test113_smlp_toy_basic.txt b/regr_smlp/master/Test113_smlp_toy_basic.txt index 03a9bb33..29fa3a26 100644 --- a/regr_smlp/master/Test113_smlp_toy_basic.txt +++ b/regr_smlp/master/Test113_smlp_toy_basic.txt @@ -120,7 +120,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test113_model_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test114_smlp_toy_basic.txt b/regr_smlp/master/Test114_smlp_toy_basic.txt index 3f9bbb02..baee2e4f 100644 --- a/regr_smlp/master/Test114_smlp_toy_basic.txt +++ b/regr_smlp/master/Test114_smlp_toy_basic.txt @@ -120,7 +120,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test114_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test115_smlp_toy_basic.txt b/regr_smlp/master/Test115_smlp_toy_basic.txt index a4ccb119..3db8c1c8 100644 --- a/regr_smlp/master/Test115_smlp_toy_basic.txt +++ b/regr_smlp/master/Test115_smlp_toy_basic.txt @@ -150,7 +150,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test115_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test116_smlp_toy_basic.txt b/regr_smlp/master/Test116_smlp_toy_basic.txt index ff9caa58..5896a69c 100644 --- a/regr_smlp/master/Test116_smlp_toy_basic.txt +++ b/regr_smlp/master/Test116_smlp_toy_basic.txt @@ -150,7 +150,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test116_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test117_smlp_toy_basic.txt b/regr_smlp/master/Test117_smlp_toy_basic.txt index 6d1ef161..da4f7d79 100644 --- a/regr_smlp/master/Test117_smlp_toy_basic.txt +++ b/regr_smlp/master/Test117_smlp_toy_basic.txt @@ -120,7 +120,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test117_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test118_smlp_toy_basic.txt b/regr_smlp/master/Test118_smlp_toy_basic.txt index c83f5020..e6ff2448 100644 --- a/regr_smlp/master/Test118_smlp_toy_basic.txt +++ b/regr_smlp/master/Test118_smlp_toy_basic.txt @@ -120,7 +120,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test118_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test119_smlp_toy_basic.txt b/regr_smlp/master/Test119_smlp_toy_basic.txt index 326b4413..2c884910 100644 --- a/regr_smlp/master/Test119_smlp_toy_basic.txt +++ b/regr_smlp/master/Test119_smlp_toy_basic.txt @@ -118,7 +118,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test119_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index 687ce3b3..df7d7c3e 100644 --- a/regr_smlp/master/Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test11_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test120_smlp_toy_basic.txt b/regr_smlp/master/Test120_smlp_toy_basic.txt index 07d83947..d1d78f9f 100644 --- a/regr_smlp/master/Test120_smlp_toy_basic.txt +++ b/regr_smlp/master/Test120_smlp_toy_basic.txt @@ -118,7 +118,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test120_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test121_smlp_toy_basic.txt b/regr_smlp/master/Test121_smlp_toy_basic.txt index 30c6c878..49014352 100644 --- a/regr_smlp/master/Test121_smlp_toy_basic.txt +++ b/regr_smlp/master/Test121_smlp_toy_basic.txt @@ -118,7 +118,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test121_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test122_smlp_toy_basic.txt b/regr_smlp/master/Test122_smlp_toy_basic.txt index f220f62a..a7f9d6b3 100644 --- a/regr_smlp/master/Test122_smlp_toy_basic.txt +++ b/regr_smlp/master/Test122_smlp_toy_basic.txt @@ -118,7 +118,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test122_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test123_smlp_toy_basic.txt b/regr_smlp/master/Test123_smlp_toy_basic.txt index e2d5711a..71effdad 100644 --- a/regr_smlp/master/Test123_smlp_toy_basic.txt +++ b/regr_smlp/master/Test123_smlp_toy_basic.txt @@ -118,7 +118,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test123_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test124_smlp_toy_basic.txt b/regr_smlp/master/Test124_smlp_toy_basic.txt index 0dc4cf3c..907ead55 100644 --- a/regr_smlp/master/Test124_smlp_toy_basic.txt +++ b/regr_smlp/master/Test124_smlp_toy_basic.txt @@ -118,7 +118,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test124_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test125_smlp_toy_basic.txt b/regr_smlp/master/Test125_smlp_toy_basic.txt index fe2f37f7..ced48afd 100644 --- a/regr_smlp/master/Test125_smlp_toy_basic.txt +++ b/regr_smlp/master/Test125_smlp_toy_basic.txt @@ -118,7 +118,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test125_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test126_smlp_toy_basic.txt b/regr_smlp/master/Test126_smlp_toy_basic.txt index e831e623..0cc657b9 100644 --- a/regr_smlp/master/Test126_smlp_toy_basic.txt +++ b/regr_smlp/master/Test126_smlp_toy_basic.txt @@ -116,7 +116,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test126_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test127_smlp_toy_basic.txt b/regr_smlp/master/Test127_smlp_toy_basic.txt index 9cf9b46f..378f4288 100644 --- a/regr_smlp/master/Test127_smlp_toy_basic.txt +++ b/regr_smlp/master/Test127_smlp_toy_basic.txt @@ -120,7 +120,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test127_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - Creating model exploration base components: Start diff --git a/regr_smlp/master/Test128_smlp_toy_ctg_num_resp.txt b/regr_smlp/master/Test128_smlp_toy_ctg_num_resp.txt index df601b8f..2ef9d8ce 100644 --- a/regr_smlp/master/Test128_smlp_toy_ctg_num_resp.txt +++ b/regr_smlp/master/Test128_smlp_toy_ctg_num_resp.txt @@ -160,7 +160,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test128_smlp_toy_ctg_num_resp_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test129_smlp_toy_ctg_num_resp.txt b/regr_smlp/master/Test129_smlp_toy_ctg_num_resp.txt index 5f24ef2b..888d680b 100644 --- a/regr_smlp/master/Test129_smlp_toy_ctg_num_resp.txt +++ b/regr_smlp/master/Test129_smlp_toy_ctg_num_resp.txt @@ -155,7 +155,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test129_smlp_toy_ctg_num_resp_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test12_smlp_toy_basic.txt b/regr_smlp/master/Test12_smlp_toy_basic.txt index f0a364f8..e4e35d47 100644 --- a/regr_smlp/master/Test12_smlp_toy_basic.txt +++ b/regr_smlp/master/Test12_smlp_toy_basic.txt @@ -97,7 +97,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test12_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test13_smlp_toy_basic.txt b/regr_smlp/master/Test13_smlp_toy_basic.txt index 7eccffb1..a06e09d5 100644 --- a/regr_smlp/master/Test13_smlp_toy_basic.txt +++ b/regr_smlp/master/Test13_smlp_toy_basic.txt @@ -97,7 +97,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test13_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - TRAIN MODEL @@ -123,42 +123,41 @@ smlp_logger - INFO - output layer of size 1 smlp_logger - INFO - model summary: start -smlp_logger - INFO - Model: "model" -__________________________________________________________________________________________________ - Layer (type) Output Shape Param # Connected to -================================================================================================== - input_1 (InputLayer) [(None, 4)] 0 [] - - dense (Dense) (None, 8) 40 ['input_1[0][0]'] - - dense_1 (Dense) (None, 4) 36 ['dense[0][0]'] - - y1 (Dense) (None, 1) 5 ['dense_1[0][0]'] - - y2 (Dense) (None, 1) 5 ['dense_1[0][0]'] - -================================================================================================== -Total params: 86 (344.00 Byte) -Trainable params: 86 (344.00 Byte) -Non-trainable params: 0 (0.00 Byte) -__________________________________________________________________________________________________ - - -smlp_logger - INFO - Optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} +smlp_logger - INFO - Model: "functional" +┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓ +┃ Layer (type) ┃ Output Shape ┃ Param # ┃ Connected to ┃ +┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩ +│ input_layer │ (None, 4) │ 0 │ - │ +│ (InputLayer) │ │ │ │ +├─────────────────────┼───────────────────┼────────────┼───────────────────┤ +│ dense (Dense) │ (None, 8) │ 40 │ input_layer[0][0] │ +├─────────────────────┼───────────────────┼────────────┼───────────────────┤ +│ dense_1 (Dense) │ (None, 4) │ 36 │ dense[0][0] │ +├─────────────────────┼───────────────────┼────────────┼───────────────────┤ +│ y1 (Dense) │ (None, 1) │ 5 │ dense_1[0][0] │ +├─────────────────────┼───────────────────┼────────────┼───────────────────┤ +│ y2 (Dense) │ (None, 1) │ 5 │ dense_1[0][0] │ +└─────────────────────┴───────────────────┴────────────┴───────────────────┘ + Total params: 86 (344.00 B) + Trainable params: 86 (344.00 B) + Non-trainable params: 0 (0.00 B) + + +smlp_logger - INFO - Optimizer: {'name': 'adam', 'learning_rate': 0.0010000000474974513, 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'loss_scale_factor': None, 'gradient_accumulation_steps': None, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} smlp_logger - INFO - Learning rate: 0.001 smlp_logger - INFO - Loss function: mse -smlp_logger - INFO - Metrics: ['mse'] +smlp_logger - INFO - Metrics: ['loss', 'compile_metrics'] -smlp_logger - INFO - Model configuration: {'name': 'model', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_input_shape': (None, 4), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_1'}, 'registered_name': None, 'name': 'input_1', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': 'float32', 'units': 8, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 4)}, 'name': 'dense', 'inbound_nodes': [[['input_1', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': 'float32', 'units': 4, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 8)}, 'name': 'dense_1', 'inbound_nodes': [[['dense', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y1', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 4)}, 'name': 'y1', 'inbound_nodes': [[['dense_1', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 4)}, 'name': 'y2', 'inbound_nodes': [[['dense_1', 0, 0, {}]]]}], 'input_layers': [['input_1', 0, 0]], 'output_layers': [['y1', 0, 0], ['y2', 0, 0]]} +smlp_logger - INFO - Model configuration: {'name': 'functional', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_shape': (None, 4), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_layer', 'optional': False}, 'registered_name': None, 'name': 'input_layer', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 8, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 4)}, 'name': 'dense', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 4), 'dtype': 'float32', 'keras_history': ['input_layer', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 4, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 8)}, 'name': 'dense_1', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 8), 'dtype': 'float32', 'keras_history': ['dense', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 4)}, 'name': 'y1', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 4), 'dtype': 'float32', 'keras_history': ['dense_1', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 4)}, 'name': 'y2', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 4), 'dtype': 'float32', 'keras_history': ['dense_1', 0, 0]}},), 'kwargs': {}}]}], 'input_layers': ['input_layer', 0, 0], 'output_layers': [['y1', 0, 0], ['y2', 0, 0]]} smlp_logger - INFO - Epochs: 20 smlp_logger - INFO - Batch size: 200 -smlp_logger - INFO - Callbacks: [""] +smlp_logger - INFO - Callbacks: [""] smlp_logger - INFO - model summary: end @@ -182,9 +181,9 @@ smlp_logger - INFO - Saving predictions summary into file: smlp_logger - INFO - Saving prediction precisions into file: ./Test13_smlp_toy_basic_training_prediction_precisions.csv -smlp_logger - INFO - Prediction on training data -- msqe: 41.749 +smlp_logger - INFO - Prediction on training data -- msqe: 38.795 -smlp_logger - INFO - Prediction on training data -- r2_score: -10.416 +smlp_logger - INFO - Prediction on training data -- r2_score: -9.102 smlp_logger - INFO - Reporting prediction results: end @@ -202,9 +201,9 @@ smlp_logger - INFO - Saving predictions summary into file: smlp_logger - INFO - Saving prediction precisions into file: ./Test13_smlp_toy_basic_test_prediction_precisions.csv -smlp_logger - INFO - Prediction on test data -- msqe: 11.659 +smlp_logger - INFO - Prediction on test data -- msqe: 11.661 -smlp_logger - INFO - Prediction on test data -- r2_score: -4.262 +smlp_logger - INFO - Prediction on test data -- r2_score: -4.243 smlp_logger - INFO - Reporting prediction results: end @@ -222,9 +221,9 @@ smlp_logger - INFO - Saving predictions summary into file: smlp_logger - INFO - Saving prediction precisions into file: ./Test13_smlp_toy_basic_labeled_prediction_precisions.csv -smlp_logger - INFO - Prediction on labeled data -- msqe: 35.731 +smlp_logger - INFO - Prediction on labeled data -- msqe: 33.368 -smlp_logger - INFO - Prediction on labeled data -- r2_score: -3.158 +smlp_logger - INFO - Prediction on labeled data -- r2_score: -2.803 smlp_logger - INFO - Reporting prediction results: end diff --git a/regr_smlp/master/Test14_smlp_toy_basic.txt b/regr_smlp/master/Test14_smlp_toy_basic.txt index 9aa2317c..5e17e5b4 100644 --- a/regr_smlp/master/Test14_smlp_toy_basic.txt +++ b/regr_smlp/master/Test14_smlp_toy_basic.txt @@ -97,7 +97,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test14_smlp_toy_basic_data_bounds.json -smlp_logger - INFO - {'x1': {'min': -0.8218, 'max': 9.546}, 'x2': {'min': -1.0, 'max': 1.0}, 'p1': {'min': 0.1, 'max': 10.0}, 'p2': {'min': 3.0, 'max': 7.0}, 'y1': {'min': 0.24, 'max': 10.7007}, 'y2': {'min': 1.12, 'max': 12.02}} +smlp_logger - INFO - {'x1': {'min': np.float64(-0.8218), 'max': np.float64(9.546)}, 'x2': {'min': np.float64(-1.0), 'max': np.float64(1.0)}, 'p1': {'min': np.float64(0.1), 'max': np.float64(10.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(7.0)}, 'y1': {'min': np.float64(0.24), 'max': np.float64(10.7007)}, 'y2': {'min': np.float64(1.12), 'max': np.float64(12.02)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index 57e23541..47e18a5a 100644 --- a/regr_smlp/master/Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test19_model_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test1_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test1_smlp_toy_num_resp_mult.txt index b4646f33..b7893349 100644 --- a/regr_smlp/master/Test1_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test1_smlp_toy_num_resp_mult.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test1_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled.txt b/regr_smlp/master/Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled.txt index 557b87ae..7371930e 100644 --- a/regr_smlp/master/Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled.txt +++ b/regr_smlp/master/Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled.txt @@ -88,7 +88,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test22_model_data_bounds.json -smlp_logger - INFO - {'categ': {'min': 0, 'max': 9}, 'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3, 'max': 8}, 'PF ': {'min': -2.0, 'max': 8.0}, '|PF |': {'min': -3.0, 'max': 12.0}} +smlp_logger - INFO - {'categ': {'min': np.int64(0), 'max': np.int64(9)}, 'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.int64(3), 'max': np.int64(8)}, 'PF ': {'min': np.float64(-2.0), 'max': np.float64(8.0)}, '|PF |': {'min': np.float64(-3.0), 'max': np.float64(12.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index c62b3c6f..21274c80 100644 --- a/regr_smlp/master/Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test24_model_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index bd812f19..462bc2ce 100644 --- a/regr_smlp/master/Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -120,7 +120,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test26_model_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index d69213ce..5fb487e5 100644 --- a/regr_smlp/master/Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test27_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start @@ -204,37 +204,35 @@ smlp_logger - INFO - output layer of size 1 smlp_logger - INFO - model summary: start smlp_logger - INFO - Model: "sequential" -_________________________________________________________________ - Layer (type) Output Shape Param # -================================================================= - dense (Dense) (None, 6) 24 - - dense_1 (Dense) (None, 3) 21 - - y2 (Dense) (None, 1) 4 - -================================================================= -Total params: 49 (196.00 Byte) -Trainable params: 49 (196.00 Byte) -Non-trainable params: 0 (0.00 Byte) -_________________________________________________________________ - - -smlp_logger - INFO - Optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ +┃ Layer (type) ┃ Output Shape ┃ Param # ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ +│ dense (Dense) │ (None, 6) │ 24 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense_1 (Dense) │ (None, 3) │ 21 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ y2 (Dense) │ (None, 1) │ 4 │ +└─────────────────────────────────┴────────────────────────┴───────────────┘ + Total params: 49 (196.00 B) + Trainable params: 49 (196.00 B) + Non-trainable params: 0 (0.00 B) + + +smlp_logger - INFO - Optimizer: {'name': 'adam', 'learning_rate': 0.0010000000474974513, 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'loss_scale_factor': None, 'gradient_accumulation_steps': None, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} smlp_logger - INFO - Learning rate: 0.001 smlp_logger - INFO - Loss function: mse -smlp_logger - INFO - Metrics: ['mse'] +smlp_logger - INFO - Metrics: ['loss', 'compile_metrics'] -smlp_logger - INFO - Model configuration: {'name': 'sequential', 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_input_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'dense_input'}, 'registered_name': None}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': 'float32', 'batch_input_shape': (None, 3), 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': 'float32', 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}}]} +smlp_logger - INFO - Model configuration: {'name': 'sequential', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_layer', 'optional': False}, 'registered_name': None}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}}], 'build_input_shape': (None, 3)} smlp_logger - INFO - Epochs: 20 smlp_logger - INFO - Batch size: 200 -smlp_logger - INFO - Callbacks: [""] +smlp_logger - INFO - Callbacks: [""] smlp_logger - INFO - model summary: end diff --git a/regr_smlp/master/Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index 145dd21f..29d01f60 100644 --- a/regr_smlp/master/Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test28_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start @@ -203,40 +203,38 @@ smlp_logger - INFO - output layer of size 1 smlp_logger - INFO - model summary: start -smlp_logger - INFO - Model: "model" -_________________________________________________________________ - Layer (type) Output Shape Param # -================================================================= - input_1 (InputLayer) [(None, 3)] 0 - - dense (Dense) (None, 6) 24 - - dense_1 (Dense) (None, 3) 21 - - y2 (Dense) (None, 1) 4 - -================================================================= -Total params: 49 (196.00 Byte) -Trainable params: 49 (196.00 Byte) -Non-trainable params: 0 (0.00 Byte) -_________________________________________________________________ - - -smlp_logger - INFO - Optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} +smlp_logger - INFO - Model: "functional" +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ +┃ Layer (type) ┃ Output Shape ┃ Param # ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ +│ input_layer (InputLayer) │ (None, 3) │ 0 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense (Dense) │ (None, 6) │ 24 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense_1 (Dense) │ (None, 3) │ 21 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ y2 (Dense) │ (None, 1) │ 4 │ +└─────────────────────────────────┴────────────────────────┴───────────────┘ + Total params: 49 (196.00 B) + Trainable params: 49 (196.00 B) + Non-trainable params: 0 (0.00 B) + + +smlp_logger - INFO - Optimizer: {'name': 'adam', 'learning_rate': 0.0010000000474974513, 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'loss_scale_factor': None, 'gradient_accumulation_steps': None, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} smlp_logger - INFO - Learning rate: 0.001 smlp_logger - INFO - Loss function: mse -smlp_logger - INFO - Metrics: ['mse'] +smlp_logger - INFO - Metrics: ['loss', 'compile_metrics'] -smlp_logger - INFO - Model configuration: {'name': 'model', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_input_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_1'}, 'registered_name': None, 'name': 'input_1', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': 'float32', 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [[['input_1', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': 'float32', 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [[['dense', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [[['dense_1', 0, 0, {}]]]}], 'input_layers': [['input_1', 0, 0]], 'output_layers': [['y2', 0, 0]]} +smlp_logger - INFO - Model configuration: {'name': 'functional', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_layer', 'optional': False}, 'registered_name': None, 'name': 'input_layer', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['input_layer', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 6), 'dtype': 'float32', 'keras_history': ['dense', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['dense_1', 0, 0]}},), 'kwargs': {}}]}], 'input_layers': ['input_layer', 0, 0], 'output_layers': [['y2', 0, 0]]} smlp_logger - INFO - Epochs: 20 smlp_logger - INFO - Batch size: 200 -smlp_logger - INFO - Callbacks: [""] +smlp_logger - INFO - Callbacks: [""] smlp_logger - INFO - model summary: end diff --git a/regr_smlp/master/Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index 8c400ce0..9d96a6a7 100644 --- a/regr_smlp/master/Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test2_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled.txt b/regr_smlp/master/Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled.txt index 610b53d1..07122e85 100644 --- a/regr_smlp/master/Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled.txt +++ b/regr_smlp/master/Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test3_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test46_smlp_toy_pf_mult_smlp_toy_pf_mult.txt b/regr_smlp/master/Test46_smlp_toy_pf_mult_smlp_toy_pf_mult.txt index c44c460d..f5d50ef2 100644 --- a/regr_smlp/master/Test46_smlp_toy_pf_mult_smlp_toy_pf_mult.txt +++ b/regr_smlp/master/Test46_smlp_toy_pf_mult_smlp_toy_pf_mult.txt @@ -88,7 +88,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test47_model_data_bounds.json -smlp_logger - INFO - {'categ': {'min': 0, 'max': 9}, 'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3, 'max': 8}, 'PF': {'min': 0, 'max': 1}, 'PF1': {'min': 0, 'max': 1}} +smlp_logger - INFO - {'categ': {'min': np.int64(0), 'max': np.int64(9)}, 'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.int64(3), 'max': np.int64(8)}, 'PF': {'min': np.int64(0), 'max': np.int64(1)}, 'PF1': {'min': np.int64(0), 'max': np.int64(1)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index 269b951d..371d4b73 100644 --- a/regr_smlp/master/Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test4_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start @@ -203,40 +203,38 @@ smlp_logger - INFO - output layer of size 1 smlp_logger - INFO - model summary: start -smlp_logger - INFO - Model: "model" -_________________________________________________________________ - Layer (type) Output Shape Param # -================================================================= - input_1 (InputLayer) [(None, 3)] 0 - - dense (Dense) (None, 6) 24 - - dense_1 (Dense) (None, 3) 21 - - y2 (Dense) (None, 1) 4 - -================================================================= -Total params: 49 (196.00 Byte) -Trainable params: 49 (196.00 Byte) -Non-trainable params: 0 (0.00 Byte) -_________________________________________________________________ - - -smlp_logger - INFO - Optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} +smlp_logger - INFO - Model: "functional" +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ +┃ Layer (type) ┃ Output Shape ┃ Param # ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ +│ input_layer (InputLayer) │ (None, 3) │ 0 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense (Dense) │ (None, 6) │ 24 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense_1 (Dense) │ (None, 3) │ 21 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ y2 (Dense) │ (None, 1) │ 4 │ +└─────────────────────────────────┴────────────────────────┴───────────────┘ + Total params: 49 (196.00 B) + Trainable params: 49 (196.00 B) + Non-trainable params: 0 (0.00 B) + + +smlp_logger - INFO - Optimizer: {'name': 'adam', 'learning_rate': 0.0010000000474974513, 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'loss_scale_factor': None, 'gradient_accumulation_steps': None, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} smlp_logger - INFO - Learning rate: 0.001 smlp_logger - INFO - Loss function: mse -smlp_logger - INFO - Metrics: ['mse'] +smlp_logger - INFO - Metrics: ['loss', 'compile_metrics'] -smlp_logger - INFO - Model configuration: {'name': 'model', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_input_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_1'}, 'registered_name': None, 'name': 'input_1', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': 'float32', 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [[['input_1', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': 'float32', 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [[['dense', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [[['dense_1', 0, 0, {}]]]}], 'input_layers': [['input_1', 0, 0]], 'output_layers': [['y2', 0, 0]]} +smlp_logger - INFO - Model configuration: {'name': 'functional', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_layer', 'optional': False}, 'registered_name': None, 'name': 'input_layer', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['input_layer', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 6), 'dtype': 'float32', 'keras_history': ['dense', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['dense_1', 0, 0]}},), 'kwargs': {}}]}], 'input_layers': ['input_layer', 0, 0], 'output_layers': [['y2', 0, 0]]} smlp_logger - INFO - Epochs: 20 smlp_logger - INFO - Batch size: 200 -smlp_logger - INFO - Callbacks: [""] +smlp_logger - INFO - Callbacks: [""] smlp_logger - INFO - model summary: end diff --git a/regr_smlp/master/Test58_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test58_smlp_toy_num_resp_mult.txt index 77c79ebb..cd899c2b 100644 --- a/regr_smlp/master/Test58_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test58_smlp_toy_num_resp_mult.txt @@ -105,7 +105,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test58_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3, 'max': 8}, 'y1': {'min': 5, 'max': 9}, 'y2': {'min': 5, 'max': 9}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.int64(3), 'max': np.int64(8)}, 'y1': {'min': np.int64(5), 'max': np.int64(9)}, 'y2': {'min': np.int64(5), 'max': np.int64(9)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test59_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test59_smlp_toy_num_resp_mult.txt index 7c267c63..0f5d9adc 100644 --- a/regr_smlp/master/Test59_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test59_smlp_toy_num_resp_mult.txt @@ -117,7 +117,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test59_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL @@ -141,40 +141,38 @@ smlp_logger - INFO - output layer of size 1 smlp_logger - INFO - model summary: start -smlp_logger - INFO - Model: "model" -_________________________________________________________________ - Layer (type) Output Shape Param # -================================================================= - input_1 (InputLayer) [(None, 3)] 0 - - dense (Dense) (None, 6) 24 - - dense_1 (Dense) (None, 3) 21 - - y2 (Dense) (None, 1) 4 - -================================================================= -Total params: 49 (196.00 Byte) -Trainable params: 49 (196.00 Byte) -Non-trainable params: 0 (0.00 Byte) -_________________________________________________________________ - - -smlp_logger - INFO - Optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} +smlp_logger - INFO - Model: "functional" +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ +┃ Layer (type) ┃ Output Shape ┃ Param # ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ +│ input_layer (InputLayer) │ (None, 3) │ 0 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense (Dense) │ (None, 6) │ 24 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense_1 (Dense) │ (None, 3) │ 21 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ y2 (Dense) │ (None, 1) │ 4 │ +└─────────────────────────────────┴────────────────────────┴───────────────┘ + Total params: 49 (196.00 B) + Trainable params: 49 (196.00 B) + Non-trainable params: 0 (0.00 B) + + +smlp_logger - INFO - Optimizer: {'name': 'adam', 'learning_rate': 0.0010000000474974513, 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'loss_scale_factor': None, 'gradient_accumulation_steps': None, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} smlp_logger - INFO - Learning rate: 0.001 smlp_logger - INFO - Loss function: mse -smlp_logger - INFO - Metrics: ['mse'] +smlp_logger - INFO - Metrics: ['loss', 'compile_metrics'] -smlp_logger - INFO - Model configuration: {'name': 'model', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_input_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_1'}, 'registered_name': None, 'name': 'input_1', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': 'float32', 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [[['input_1', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': 'float32', 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [[['dense', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [[['dense_1', 0, 0, {}]]]}], 'input_layers': [['input_1', 0, 0]], 'output_layers': [['y2', 0, 0]]} +smlp_logger - INFO - Model configuration: {'name': 'functional', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_layer', 'optional': False}, 'registered_name': None, 'name': 'input_layer', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['input_layer', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 6), 'dtype': 'float32', 'keras_history': ['dense', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['dense_1', 0, 0]}},), 'kwargs': {}}]}], 'input_layers': ['input_layer', 0, 0], 'output_layers': [['y2', 0, 0]]} smlp_logger - INFO - Epochs: 20 smlp_logger - INFO - Batch size: 200 -smlp_logger - INFO - Callbacks: [""] +smlp_logger - INFO - Callbacks: [""] smlp_logger - INFO - model summary: end diff --git a/regr_smlp/master/Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index 0855a56e..dcbb99ed 100644 --- a/regr_smlp/master/Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test5_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test60_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test60_smlp_toy_num_resp_mult.txt index c917517d..3e51fea9 100644 --- a/regr_smlp/master/Test60_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test60_smlp_toy_num_resp_mult.txt @@ -117,7 +117,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test60_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL @@ -142,37 +142,35 @@ smlp_logger - INFO - output layer of size 1 smlp_logger - INFO - model summary: start smlp_logger - INFO - Model: "sequential" -_________________________________________________________________ - Layer (type) Output Shape Param # -================================================================= - dense (Dense) (None, 6) 24 - - dense_1 (Dense) (None, 3) 21 - - y2 (Dense) (None, 1) 4 - -================================================================= -Total params: 49 (196.00 Byte) -Trainable params: 49 (196.00 Byte) -Non-trainable params: 0 (0.00 Byte) -_________________________________________________________________ - - -smlp_logger - INFO - Optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ +┃ Layer (type) ┃ Output Shape ┃ Param # ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ +│ dense (Dense) │ (None, 6) │ 24 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense_1 (Dense) │ (None, 3) │ 21 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ y2 (Dense) │ (None, 1) │ 4 │ +└─────────────────────────────────┴────────────────────────┴───────────────┘ + Total params: 49 (196.00 B) + Trainable params: 49 (196.00 B) + Non-trainable params: 0 (0.00 B) + + +smlp_logger - INFO - Optimizer: {'name': 'adam', 'learning_rate': 0.0010000000474974513, 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'loss_scale_factor': None, 'gradient_accumulation_steps': None, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} smlp_logger - INFO - Learning rate: 0.001 smlp_logger - INFO - Loss function: mse -smlp_logger - INFO - Metrics: ['mse'] +smlp_logger - INFO - Metrics: ['loss', 'compile_metrics'] -smlp_logger - INFO - Model configuration: {'name': 'sequential', 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_input_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'dense_input'}, 'registered_name': None}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': 'float32', 'batch_input_shape': (None, 3), 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': 'float32', 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}}]} +smlp_logger - INFO - Model configuration: {'name': 'sequential', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_layer', 'optional': False}, 'registered_name': None}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}}], 'build_input_shape': (None, 3)} smlp_logger - INFO - Epochs: 20 smlp_logger - INFO - Batch size: 200 -smlp_logger - INFO - Callbacks: [""] +smlp_logger - INFO - Callbacks: [""] smlp_logger - INFO - model summary: end diff --git a/regr_smlp/master/Test63_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test63_smlp_toy_num_resp_mult.txt index 19722ef7..3ca9dd3d 100644 --- a/regr_smlp/master/Test63_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test63_smlp_toy_num_resp_mult.txt @@ -119,7 +119,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test63_model_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test69_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test69_smlp_toy_num_resp_mult.txt index bcae9ce8..fb9bf191 100644 --- a/regr_smlp/master/Test69_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test69_smlp_toy_num_resp_mult.txt @@ -117,7 +117,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test69_model_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL @@ -141,40 +141,38 @@ smlp_logger - INFO - output layer of size 1 smlp_logger - INFO - model summary: start -smlp_logger - INFO - Model: "model" -_________________________________________________________________ - Layer (type) Output Shape Param # -================================================================= - input_1 (InputLayer) [(None, 3)] 0 - - dense (Dense) (None, 6) 24 - - dense_1 (Dense) (None, 3) 21 - - y2 (Dense) (None, 1) 4 - -================================================================= -Total params: 49 (196.00 Byte) -Trainable params: 49 (196.00 Byte) -Non-trainable params: 0 (0.00 Byte) -_________________________________________________________________ - - -smlp_logger - INFO - Optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} +smlp_logger - INFO - Model: "functional" +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ +┃ Layer (type) ┃ Output Shape ┃ Param # ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ +│ input_layer (InputLayer) │ (None, 3) │ 0 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense (Dense) │ (None, 6) │ 24 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ dense_1 (Dense) │ (None, 3) │ 21 │ +├─────────────────────────────────┼────────────────────────┼───────────────┤ +│ y2 (Dense) │ (None, 1) │ 4 │ +└─────────────────────────────────┴────────────────────────┴───────────────┘ + Total params: 49 (196.00 B) + Trainable params: 49 (196.00 B) + Non-trainable params: 0 (0.00 B) + + +smlp_logger - INFO - Optimizer: {'name': 'adam', 'learning_rate': 0.0010000000474974513, 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'loss_scale_factor': None, 'gradient_accumulation_steps': None, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} smlp_logger - INFO - Learning rate: 0.001 smlp_logger - INFO - Loss function: mse -smlp_logger - INFO - Metrics: ['mse'] +smlp_logger - INFO - Metrics: ['loss', 'compile_metrics'] -smlp_logger - INFO - Model configuration: {'name': 'model', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_input_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_1'}, 'registered_name': None, 'name': 'input_1', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': 'float32', 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [[['input_1', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': 'float32', 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [[['dense', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [[['dense_1', 0, 0, {}]]]}], 'input_layers': [['input_1', 0, 0]], 'output_layers': [['y2', 0, 0]]} +smlp_logger - INFO - Model configuration: {'name': 'functional', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_layer', 'optional': False}, 'registered_name': None, 'name': 'input_layer', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['input_layer', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 6), 'dtype': 'float32', 'keras_history': ['dense', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['dense_1', 0, 0]}},), 'kwargs': {}}]}], 'input_layers': ['input_layer', 0, 0], 'output_layers': [['y2', 0, 0]]} smlp_logger - INFO - Epochs: 20 smlp_logger - INFO - Batch size: 200 -smlp_logger - INFO - Callbacks: [""] +smlp_logger - INFO - Callbacks: [""] smlp_logger - INFO - model summary: end diff --git a/regr_smlp/master/Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index babbf0bc..476115bf 100644 --- a/regr_smlp/master/Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test6_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test79_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test79_smlp_toy_num_resp_mult.txt index e4d068c9..83ca0e9b 100644 --- a/regr_smlp/master/Test79_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test79_smlp_toy_num_resp_mult.txt @@ -139,7 +139,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test79_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index b792f8c7..5345cc7a 100644 --- a/regr_smlp/master/Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test7_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/regr_smlp/master/Test80_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test80_smlp_toy_num_resp_mult.txt index 414c94d6..e60feab8 100644 --- a/regr_smlp/master/Test80_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test80_smlp_toy_num_resp_mult.txt @@ -117,7 +117,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test80_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test81_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test81_smlp_toy_num_resp_mult.txt index 79f3cd98..239eea23 100644 --- a/regr_smlp/master/Test81_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test81_smlp_toy_num_resp_mult.txt @@ -117,7 +117,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test81_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test82_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test82_smlp_toy_num_resp_mult.txt index 31c23f6e..fe9678d5 100644 --- a/regr_smlp/master/Test82_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test82_smlp_toy_num_resp_mult.txt @@ -121,7 +121,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test82_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test83_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test83_smlp_toy_num_resp_mult.txt index 3bb53844..756cfdb4 100644 --- a/regr_smlp/master/Test83_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test83_smlp_toy_num_resp_mult.txt @@ -121,7 +121,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test83_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test85_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test85_smlp_toy_num_resp_mult.txt index 0b810371..61df7032 100644 --- a/regr_smlp/master/Test85_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test85_smlp_toy_num_resp_mult.txt @@ -119,7 +119,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test85_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test86_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test86_smlp_toy_num_resp_mult.txt index ee8c376b..c5bbba0b 100644 --- a/regr_smlp/master/Test86_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test86_smlp_toy_num_resp_mult.txt @@ -125,7 +125,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test86_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test87_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test87_smlp_toy_num_resp_mult.txt index 9050d2b4..ccde5494 100644 --- a/regr_smlp/master/Test87_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test87_smlp_toy_num_resp_mult.txt @@ -139,7 +139,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test87_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test88_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test88_smlp_toy_num_resp_mult.txt index 293efd46..d2df1e24 100644 --- a/regr_smlp/master/Test88_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test88_smlp_toy_num_resp_mult.txt @@ -121,7 +121,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test88_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test89_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test89_smlp_toy_num_resp_mult.txt index b3a2de28..181bf524 100644 --- a/regr_smlp/master/Test89_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test89_smlp_toy_num_resp_mult.txt @@ -139,7 +139,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test89_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index 4db78d56..57f57115 100644 --- a/regr_smlp/master/Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -102,7 +102,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test8_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - Preparing new data for modeling: start @@ -205,42 +205,41 @@ smlp_logger - INFO - output layer of size 1 smlp_logger - INFO - model summary: start -smlp_logger - INFO - Model: "model" -__________________________________________________________________________________________________ - Layer (type) Output Shape Param # Connected to -================================================================================================== - input_1 (InputLayer) [(None, 3)] 0 [] - - dense (Dense) (None, 6) 24 ['input_1[0][0]'] - - dense_1 (Dense) (None, 3) 21 ['dense[0][0]'] - - y1 (Dense) (None, 1) 4 ['dense_1[0][0]'] - - y2 (Dense) (None, 1) 4 ['dense_1[0][0]'] - -================================================================================================== -Total params: 53 (212.00 Byte) -Trainable params: 53 (212.00 Byte) -Non-trainable params: 0 (0.00 Byte) -__________________________________________________________________________________________________ - - -smlp_logger - INFO - Optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} +smlp_logger - INFO - Model: "functional" +┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓ +┃ Layer (type) ┃ Output Shape ┃ Param # ┃ Connected to ┃ +┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩ +│ input_layer │ (None, 3) │ 0 │ - │ +│ (InputLayer) │ │ │ │ +├─────────────────────┼───────────────────┼────────────┼───────────────────┤ +│ dense (Dense) │ (None, 6) │ 24 │ input_layer[0][0] │ +├─────────────────────┼───────────────────┼────────────┼───────────────────┤ +│ dense_1 (Dense) │ (None, 3) │ 21 │ dense[0][0] │ +├─────────────────────┼───────────────────┼────────────┼───────────────────┤ +│ y1 (Dense) │ (None, 1) │ 4 │ dense_1[0][0] │ +├─────────────────────┼───────────────────┼────────────┼───────────────────┤ +│ y2 (Dense) │ (None, 1) │ 4 │ dense_1[0][0] │ +└─────────────────────┴───────────────────┴────────────┴───────────────────┘ + Total params: 53 (212.00 B) + Trainable params: 53 (212.00 B) + Non-trainable params: 0 (0.00 B) + + +smlp_logger - INFO - Optimizer: {'name': 'adam', 'learning_rate': 0.0010000000474974513, 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'loss_scale_factor': None, 'gradient_accumulation_steps': None, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} smlp_logger - INFO - Learning rate: 0.001 smlp_logger - INFO - Loss function: mse -smlp_logger - INFO - Metrics: ['mse'] +smlp_logger - INFO - Metrics: ['loss', 'compile_metrics'] -smlp_logger - INFO - Model configuration: {'name': 'model', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_input_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_1'}, 'registered_name': None, 'name': 'input_1', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': 'float32', 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [[['input_1', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': 'float32', 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [[['dense', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y1', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y1', 'inbound_nodes': [[['dense_1', 0, 0, {}]]]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [[['dense_1', 0, 0, {}]]]}], 'input_layers': [['input_1', 0, 0]], 'output_layers': [['y1', 0, 0], ['y2', 0, 0]]} +smlp_logger - INFO - Model configuration: {'name': 'functional', 'trainable': True, 'layers': [{'module': 'keras.layers', 'class_name': 'InputLayer', 'config': {'batch_shape': (None, 3), 'dtype': 'float32', 'sparse': False, 'ragged': False, 'name': 'input_layer', 'optional': False}, 'registered_name': None, 'name': 'input_layer', 'inbound_nodes': []}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 6, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'dense', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['input_layer', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 3, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 6)}, 'name': 'dense_1', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 6), 'dtype': 'float32', 'keras_history': ['dense', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y1', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y1', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['dense_1', 0, 0]}},), 'kwargs': {}}]}, {'module': 'keras.layers', 'class_name': 'Dense', 'config': {'name': 'y2', 'trainable': True, 'dtype': {'module': 'keras', 'class_name': 'DTypePolicy', 'config': {'name': 'float32'}, 'registered_name': None}, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel_initializer': {'module': 'keras.initializers', 'class_name': 'GlorotUniform', 'config': {'seed': None}, 'registered_name': None}, 'bias_initializer': {'module': 'keras.initializers', 'class_name': 'Zeros', 'config': {}, 'registered_name': None}, 'kernel_regularizer': None, 'bias_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None, 'quantization_config': None}, 'registered_name': None, 'build_config': {'input_shape': (None, 3)}, 'name': 'y2', 'inbound_nodes': [{'args': ({'class_name': '__keras_tensor__', 'config': {'shape': (None, 3), 'dtype': 'float32', 'keras_history': ['dense_1', 0, 0]}},), 'kwargs': {}}]}], 'input_layers': ['input_layer', 0, 0], 'output_layers': [['y1', 0, 0], ['y2', 0, 0]]} smlp_logger - INFO - Epochs: 20 smlp_logger - INFO - Batch size: 200 -smlp_logger - INFO - Callbacks: [""] +smlp_logger - INFO - Callbacks: [""] smlp_logger - INFO - model summary: end @@ -264,9 +263,9 @@ smlp_logger - INFO - Saving predictions summary into file: smlp_logger - INFO - Saving prediction precisions into file: ./Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv -smlp_logger - INFO - Prediction on training data -- msqe: 7.938 +smlp_logger - INFO - Prediction on training data -- msqe: 7.935 -smlp_logger - INFO - Prediction on training data -- r2_score: -1.022 +smlp_logger - INFO - Prediction on training data -- r2_score: -1.021 smlp_logger - INFO - Reporting prediction results: end @@ -284,7 +283,7 @@ smlp_logger - INFO - Saving predictions summary into file: smlp_logger - INFO - Saving prediction precisions into file: ./Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv -smlp_logger - INFO - Prediction on test data -- msqe: 6.834 +smlp_logger - INFO - Prediction on test data -- msqe: 6.833 smlp_logger - INFO - Prediction on test data -- r2_score: -0.922 @@ -304,9 +303,9 @@ smlp_logger - INFO - Saving predictions summary into file: smlp_logger - INFO - Saving prediction precisions into file: ./Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv -smlp_logger - INFO - Prediction on labeled data -- msqe: 7.637 +smlp_logger - INFO - Prediction on labeled data -- msqe: 7.634 -smlp_logger - INFO - Prediction on labeled data -- r2_score: -0.925 +smlp_logger - INFO - Prediction on labeled data -- r2_score: -0.924 smlp_logger - INFO - Reporting prediction results: end @@ -324,9 +323,9 @@ smlp_logger - INFO - Saving predictions summary into file: smlp_logger - INFO - Saving prediction precisions into file: ./Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv -smlp_logger - INFO - Prediction on new data -- msqe: 7.977 +smlp_logger - INFO - Prediction on new data -- msqe: 7.974 -smlp_logger - INFO - Prediction on new data -- r2_score: -1.019 +smlp_logger - INFO - Prediction on new data -- r2_score: -1.018 smlp_logger - INFO - Reporting prediction results: end diff --git a/regr_smlp/master/Test90_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test90_smlp_toy_num_resp_mult.txt index e82f1d10..b1123468 100644 --- a/regr_smlp/master/Test90_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test90_smlp_toy_num_resp_mult.txt @@ -143,7 +143,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test90_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test91_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test91_smlp_toy_num_resp_mult.txt index cebf153b..b8fc3956 100644 --- a/regr_smlp/master/Test91_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test91_smlp_toy_num_resp_mult.txt @@ -139,7 +139,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test91_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test92_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test92_smlp_toy_num_resp_mult.txt index 2d92fd5f..649d0af7 100644 --- a/regr_smlp/master/Test92_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test92_smlp_toy_num_resp_mult.txt @@ -139,7 +139,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test92_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test93_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test93_smlp_toy_num_resp_mult.txt index 76fb99b0..6b28508d 100644 --- a/regr_smlp/master/Test93_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test93_smlp_toy_num_resp_mult.txt @@ -143,7 +143,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test93_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test94_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test94_smlp_toy_num_resp_mult.txt index 8b684b5e..02bd3343 100644 --- a/regr_smlp/master/Test94_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test94_smlp_toy_num_resp_mult.txt @@ -143,7 +143,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test94_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test95_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test95_smlp_toy_num_resp_mult.txt index bf7f8b93..1e355edb 100644 --- a/regr_smlp/master/Test95_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test95_smlp_toy_num_resp_mult.txt @@ -143,7 +143,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test95_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test96_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test96_smlp_toy_num_resp_mult.txt index 46b24ab0..d43bb619 100644 --- a/regr_smlp/master/Test96_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test96_smlp_toy_num_resp_mult.txt @@ -143,7 +143,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test96_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test97_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test97_smlp_toy_num_resp_mult.txt index 5995a089..6dbae4a8 100644 --- a/regr_smlp/master/Test97_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test97_smlp_toy_num_resp_mult.txt @@ -139,7 +139,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test97_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test98_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test98_smlp_toy_num_resp_mult.txt index 9d4f8dea..7dea7286 100644 --- a/regr_smlp/master/Test98_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test98_smlp_toy_num_resp_mult.txt @@ -143,7 +143,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test98_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test99_smlp_toy_num_resp_mult.txt b/regr_smlp/master/Test99_smlp_toy_num_resp_mult.txt index b24d9657..8835f654 100644 --- a/regr_smlp/master/Test99_smlp_toy_num_resp_mult.txt +++ b/regr_smlp/master/Test99_smlp_toy_num_resp_mult.txt @@ -121,7 +121,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./Test99_smlp_toy_num_resp_mult_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3.0, 'max': 8.0}, 'y1': {'min': 5.0, 'max': 9.0}, 'y2': {'min': 5.0, 'max': 9.0}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.float64(3.0), 'max': np.float64(8.0)}, 'y1': {'min': np.float64(5.0), 'max': np.float64(9.0)}, 'y2': {'min': np.float64(5.0), 'max': np.float64(9.0)}} smlp_logger - INFO - TRAIN MODEL diff --git a/regr_smlp/master/Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt b/regr_smlp/master/Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt index 5858f4ce..4d868cce 100644 --- a/regr_smlp/master/Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt +++ b/regr_smlp/master/Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt @@ -88,7 +88,7 @@ smlp_logger - INFO - Preparing training data for modeling: end smlp_logger - INFO - Saving data bounds into file:./test20_model_data_bounds.json -smlp_logger - INFO - {'x': {'min': 9.0, 'max': 12.0}, 'p1': {'min': 2.0, 'max': 4.0}, 'p2': {'min': 3, 'max': 8}, 'y1': {'min': 5, 'max': 9}, 'y2': {'min': 5, 'max': 9}} +smlp_logger - INFO - {'x': {'min': np.float64(9.0), 'max': np.float64(12.0)}, 'p1': {'min': np.float64(2.0), 'max': np.float64(4.0)}, 'p2': {'min': np.int64(3), 'max': np.int64(8)}, 'y1': {'min': np.int64(5), 'max': np.int64(9)}, 'y2': {'min': np.int64(5), 'max': np.int64(9)}} smlp_logger - INFO - Preparing new data for modeling: start diff --git a/repair_wheel.py b/repair_wheel.py new file mode 100644 index 00000000..8d19ef0c --- /dev/null +++ b/repair_wheel.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3.13 +""" +Run auditwheel repair on the smlp wheel in dist/ to produce a +self-contained manylinux wheel with all .so dependencies bundled. + +Usage: + python3.13 repair_wheel.py [dist_dir] + +dist_dir defaults to 'dist/'. +""" +import sys +import subprocess +from pathlib import Path + + +def _find_auditwheel_location() -> str | None: + result = subprocess.run( + [sys.executable, "-m", "pip", "show", "auditwheel"], + capture_output=True, text=True + ) + if result.returncode == 0: + for line in result.stdout.splitlines(): + if line.startswith("Location:"): + return line.split(":", 1)[1].strip() + + user_site = ( + Path.home() / ".local" / "lib" + / f"python{sys.version_info.major}.{sys.version_info.minor}" + / "site-packages" + ) + if (user_site / "auditwheel").exists(): + return str(user_site) + + return None + + +def main(): + dist_dir = Path(sys.argv[1] if len(sys.argv) > 1 else "dist") + + location = _find_auditwheel_location() + if not location: + print("ERROR: auditwheel not found.") + print("Install with: python3.13 -m pip install --user auditwheel patchelf") + sys.exit(1) + + wheels = sorted(dist_dir.glob("smlp-*linux_x86_64.whl"), key=lambda p: p.stat().st_mtime) + if not wheels: + print(f"ERROR: No linux_x86_64 wheel found in {dist_dir}/") + sys.exit(1) + + wheel = wheels[-1] + print(f"Repairing {wheel} ...") + subprocess.check_call([ + sys.executable, "-c", + f"import sys; sys.path.insert(0, {location!r}); " + f"from auditwheel.main import main; sys.exit(main())", + "repair", str(wheel), "-w", str(dist_dir) + ]) + print(f"Done. manylinux wheel saved to {dist_dir}/") + + +if __name__ == "__main__": + main() diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..a85ab71e --- /dev/null +++ b/setup.py @@ -0,0 +1,851 @@ +""" +setup.py for the smlp package. + +System prerequisites (require sudo, install once) +-------------------------------------------------- + sudo apt install gcc g++ git make m4 pkg-config + +User prerequisites (no sudo, install once) +------------------------------------------ + python3.13 -m pip install --user meson ninja z3-solver + +Build flow +---------- +1. Boost.Python 1.83 is compiled from source for Python 3.13 and cached in + ~/.local/boost_py313 (or the path in $BOOST_CACHE_DIR). + The build is skipped on subsequent runs if the cache directory already + contains the marker file .built_for_python313. + Set $BOOST_ROOT to point at an existing Boost prefix to skip this step + entirely. + +2. The 'kay' C++ dependency is cloned from GitHub into the pip build-temp + directory (or reused from $KAY_DIR). + +3. `meson setup` + `ninja install` is run inside utils/poly/ of the + repository this setup.py lives in. No repo cloning is performed; + setup.py is expected to be in the root of the smlp checkout. + +4. The installed smlp extension package is copied into the wheel. + +Environment variables +--------------------- +BOOST_ROOT Reuse an existing Boost prefix – skips download + compile. + e.g. export BOOST_ROOT=~/.local/boost_py313 +BOOST_CACHE_DIR Where to cache the compiled Boost (default: ~/.local/boost_py313). +BOOST_VERSION Boost version to download (default: 1.83.0). +KAY_DIR Reuse an existing kay checkout. +GMP_ROOT Reuse an existing GMP prefix – skips download + compile. + e.g. export GMP_ROOT=~/.local/gmp +GMP_CACHE_DIR Where to cache compiled GMP (default: ~/.local/gmp). +GMP_VERSION GMP version to download (default: 6.3.0). +Z3_PREFIX Reuse an existing Z3 install prefix – skips pip z3-solver. + e.g. export Z3_PREFIX=~/.local/z3 +Z3_VERSION Z3 version to download binary for (default: 4.8.12). +Z3_BIN_DIR Path to directory containing z3 binary (default: ~/.local/z3/bin). +SMLP_BRANCH Git branch to switch to in the smlp repo (auto-detected if unset). +""" + +import os +import platform +import shutil +import subprocess +import sys +import tarfile +import urllib.request +from pathlib import Path + +from setuptools import setup +from setuptools.command.build_ext import build_ext as _build_ext + + +# --------------------------------------------------------------------------- +# Constants / defaults +# --------------------------------------------------------------------------- + +BOOST_VERSION = os.environ.get("BOOST_VERSION", "1.83.0") +BOOST_CACHE_DIR = Path( + os.environ.get("BOOST_CACHE_DIR", Path.home() / ".local" / "boost_py313") +).expanduser() + +# Default Z3_PREFIX: where z3-solver installs its lib/libz3.so +# This is the standard location when installed via: +# python3.13 -m pip install --user z3-solver +Z3_DEFAULT_PREFIX = ( + Path.home() / ".local" / "lib" / f"python{sys.version_info.major}.{sys.version_info.minor}" + / "site-packages" / "z3" +) + +GMP_VERSION = os.environ.get("GMP_VERSION", "6.3.0") +GMP_CACHE_DIR = Path( + os.environ.get("GMP_CACHE_DIR", Path.home() / ".local" / "gmp") +).expanduser() + +Z3_VERSION = os.environ.get("Z3_VERSION", "4.8.12") +Z3_BIN_DIR = Path( + os.environ.get("Z3_BIN_DIR", Path.home() / ".local" / "z3" / "bin") +).expanduser() + +# Root of this repository (where setup.py lives) +REPO_ROOT = Path(__file__).parent.resolve() + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _run(cmd, **kwargs): + print(f"[smlp build] $ {' '.join(str(c) for c in cmd)}") + result = subprocess.run([str(c) for c in cmd], **kwargs) + if result.returncode != 0: + # Print captured output if any + if hasattr(result, "stdout") and result.stdout: + print(result.stdout) + if hasattr(result, "stderr") and result.stderr: + print(result.stderr, file=sys.stderr) + raise subprocess.CalledProcessError(result.returncode, cmd) + + +def _verify_tarball(path: Path) -> bool: + """Return True if the tarball can be opened and is not truncated.""" + try: + with tarfile.open(path) as tf: + # Read all members to detect truncation + tf.getmembers() + return True + except Exception: + return False + + +def _download(url: str, dest: Path, retries: int = 5) -> None: + """Download a file with retries, verifying integrity after each attempt.""" + import time + + # Remove any existing file — it may be a corrupt partial download + if dest.exists(): + if _verify_tarball(dest): + print(f"[smlp build] Using verified cached tarball {dest}") + return + else: + print(f"[smlp build] Removing corrupt cached tarball {dest}") + dest.unlink() + + for attempt in range(1, retries + 1): + try: + print(f"[smlp build] Downloading {url} (attempt {attempt}/{retries}) ...") + urllib.request.urlretrieve(url, dest) + if _verify_tarball(dest): + print(f"[smlp build] Download verified OK.") + return + else: + print(f"[smlp build] Download corrupt, retrying ...") + dest.unlink() + except Exception as e: + print(f"[smlp build] Download failed: {e}") + if dest.exists(): + dest.unlink() + if attempt < retries: + wait = 2 ** attempt + print(f"[smlp build] Retrying in {wait}s ...") + time.sleep(wait) + sys.exit(f"[smlp build] ERROR: failed to download {url} after {retries} attempts.") + + +def _meson_bin(build_tmp: Path) -> list[str]: + """ + Write a meson wrapper script and return the command to invoke it. + + The wrapper explicitly adds the meson install location to sys.path, + so it works in pip's isolated build environment where user site-packages + is not on sys.path. Meson stores the wrapper path in the build dir and + reuses it for internal calls like `meson install`, so it must be a real + executable file — not a -c string. + """ + # Find where mesonbuild is installed via pip show + mesonbuild_location = None + result = subprocess.run( + [sys.executable, "-m", "pip", "show", "meson"], + capture_output=True, text=True + ) + if result.returncode == 0: + for line in result.stdout.splitlines(): + if line.startswith("Location:"): + mesonbuild_location = line.split(":", 1)[1].strip() + break + + # Fallback: check user site-packages directly + if not mesonbuild_location: + user_site = ( + Path.home() / ".local" / "lib" + / f"python{sys.version_info.major}.{sys.version_info.minor}" + / "site-packages" + ) + if (user_site / "mesonbuild").exists(): + mesonbuild_location = str(user_site) + + if not mesonbuild_location: + raise RuntimeError( + "[smlp build] meson not found. Run: python3.13 -m pip install --user meson" + ) + + print(f"[smlp build] meson location: {mesonbuild_location}") + + # Write a wrapper script with a proper shebang so Meson can store and + # reuse its path for internal calls (meson install, meson test, etc.) + wrapper = build_tmp / "meson" + wrapper.write_text( + f"#!/usr/bin/env {sys.executable}\n" + "import sys\n" + f"sys.path.insert(0, {mesonbuild_location!r})\n" + "from mesonbuild.mesonmain import main\n" + "sys.exit(main())\n" + ) + wrapper.chmod(0o755) + print(f"[smlp build] Using meson wrapper: {wrapper}") + return [str(wrapper)] + + +def _ninja_bin() -> str: + """ + Resolve the ninja binary, preferring user-space installs over system ones. + + Search order: + 1. The 'ninja' PyPI package (pip install ninja → /bin/ninja) + 2. ~/.local/bin/ninja (pip install --user ninja) + 3. PATH (last resort — may find /usr/bin/ninja) + """ + import importlib.util + from shutil import which + + # ── 1. pip ninja package ───────────────────────────────────────────── + spec = importlib.util.find_spec("ninja") + if spec is not None: + try: + import ninja as _ninja_pkg # type: ignore + candidate = Path(_ninja_pkg.BIN_DIR) / "ninja" + if candidate.exists(): + print(f"[smlp build] Using pip ninja: {candidate}") + return str(candidate) + except Exception: + pass + + # ── 2. ~/.local/bin (pip install --user) ───────────────────────────── + user_ninja = Path.home() / ".local" / "bin" / "ninja" + if user_ninja.exists(): + print(f"[smlp build] Using user ninja: {user_ninja}") + return str(user_ninja) + + # ── 3. PATH fallback ───────────────────────────────────────────────── + found = which("ninja") + if found: + print(f"[smlp build] Using ninja from PATH: {found}") + return found + + raise RuntimeError( + "[smlp build] ninja not found. Run: pip install ninja" + ) + + +# --------------------------------------------------------------------------- +# Step 1 – Boost.Python (compiled from source, cached in user-space) +# --------------------------------------------------------------------------- + +def _boost_prefix() -> Path: + """ + Return the Boost install prefix, building from source if necessary. + + Search order: + 1. $BOOST_ROOT env var → use as-is, no build + 2. BOOST_CACHE_DIR marker → cache hit, skip build + 3. Download + compile into BOOST_CACHE_DIR + """ + # ── Option A: caller supplied an existing prefix ────────────────────── + env_root = os.environ.get("BOOST_ROOT") + if env_root: + prefix = Path(env_root).expanduser() + print(f"[smlp build] Using BOOST_ROOT={prefix}") + return prefix + + # ── Option B: cached build already present ──────────────────────────── + tag_file = BOOST_CACHE_DIR / ".built_for_python313" + if tag_file.exists(): + print(f"[smlp build] Boost cache found at {BOOST_CACHE_DIR}, skipping build.") + return BOOST_CACHE_DIR + + # ── Option C: download + compile into user-space cache ──────────────── + ver_flat = BOOST_VERSION.replace(".", "_") + tarball_name = f"boost_{ver_flat}.tar.gz" + url = f"https://archives.boost.io/release/{BOOST_VERSION}/source/{tarball_name}" + + # Temporary directory for download + extraction (sibling of cache dir) + tmp_dir = BOOST_CACHE_DIR.parent / "_boost_build_tmp" + tmp_dir.mkdir(parents=True, exist_ok=True) + + tarball_path = tmp_dir / tarball_name + if not tarball_path.exists(): + print(f"[smlp build] Downloading Boost {BOOST_VERSION} ...") + _download(url, tarball_path) + else: + print(f"[smlp build] Using cached tarball {tarball_path}") + + print(f"[smlp build] Extracting {tarball_name} ...") + with tarfile.open(tarball_path) as tf: + tf.extractall(tmp_dir) + + src = tmp_dir / f"boost_{ver_flat}" + + print(f"[smlp build] Bootstrapping Boost (python={sys.executable}) ...") + _run( + ["./bootstrap.sh", + f"--with-python={sys.executable}", + "--with-libraries=python"], + cwd=str(src), + ) + + BOOST_CACHE_DIR.mkdir(parents=True, exist_ok=True) + print(f"[smlp build] Compiling Boost → {BOOST_CACHE_DIR} (this takes a few minutes) ...") + _run( + ["./b2", "install", + f"--prefix={BOOST_CACHE_DIR}", + "--with-python", + "python=3.13"], + cwd=str(src), + ) + + # Leave a marker so we skip the build on the next pip install + tag_file.touch() + + # Remove the source + tarball; keep only the install + shutil.rmtree(tmp_dir, ignore_errors=True) + + print(f"[smlp build] Boost built and cached at {BOOST_CACHE_DIR}") + return BOOST_CACHE_DIR + + + +def _boost_env(prefix: Path) -> dict: + """ + Environment variables for meson/ninja so the user-space Boost is found + without touching /usr/lib. + """ + lib_dir = prefix / "lib" + inc_dir = prefix / "include" + + env = os.environ.copy() + env["BOOST_ROOT"] = str(prefix) + env["BOOST_INCLUDEDIR"] = str(inc_dir) + env["BOOST_LIBRARYDIR"] = str(lib_dir) + + # Force Meson to use the same Python that is running this build script, + # preventing it from falling back to the system Python 3.12. + env["PYTHON"] = sys.executable + env["PYTHON3"] = sys.executable + + # Tell Meson the exact versioned Boost.Python library name, + # e.g. Python 3.13 → boost_python313 + py_ver = f"{sys.version_info.major}{sys.version_info.minor}" + env["BOOST_PYTHON_LIBNAME"] = f"boost_python{py_ver}" + + existing_ld = env.get("LD_LIBRARY_PATH", "") + env["LD_LIBRARY_PATH"] = f"{lib_dir}:{existing_ld}" if existing_ld else str(lib_dir) + + pkgconfig = lib_dir / "pkgconfig" + existing_pkg = env.get("PKG_CONFIG_PATH", "") + env["PKG_CONFIG_PATH"] = f"{pkgconfig}:{existing_pkg}" if existing_pkg else str(pkgconfig) + + return env + + +def _add_z3_to_env(env: dict, z3_lib: Path) -> dict: + """Prepend the z3-solver lib/bin directories to the relevant env vars.""" + existing_ld = env.get("LD_LIBRARY_PATH", "") + env["LD_LIBRARY_PATH"] = f"{z3_lib}:{existing_ld}" if existing_ld else str(z3_lib) + + existing_pkg = env.get("PKG_CONFIG_PATH", "") + env["PKG_CONFIG_PATH"] = f"{z3_lib}:{existing_pkg}" if existing_pkg else str(z3_lib) + + # Add z3 binary to PATH so meson can find the solver executable + z3_bin = z3_lib.parent / "bin" + existing_path = env.get("PATH", os.environ.get("PATH", "")) + env["PATH"] = f"{z3_bin}:{existing_path}" if existing_path else str(z3_bin) + + return env + + +def _add_gmp_to_env(env: dict, gmp_prefix: Path) -> dict: + """Prepend the GMP lib/include directories to the relevant env vars.""" + gmp_lib = gmp_prefix / "lib" + gmp_inc = gmp_prefix / "include" + + existing_ld = env.get("LD_LIBRARY_PATH", "") + env["LD_LIBRARY_PATH"] = f"{gmp_lib}:{existing_ld}" if existing_ld else str(gmp_lib) + + pkgconfig = gmp_lib / "pkgconfig" + existing_pkg = env.get("PKG_CONFIG_PATH", "") + env["PKG_CONFIG_PATH"] = f"{pkgconfig}:{existing_pkg}" if existing_pkg else str(pkgconfig) + + existing_cpp = env.get("CPPFLAGS", "") + env["CPPFLAGS"] = f"-I{gmp_inc} {existing_cpp}".strip() + + existing_ld_flags = env.get("LDFLAGS", "") + env["LDFLAGS"] = f"-L{gmp_lib} {existing_ld_flags}".strip() + + return env + + +# --------------------------------------------------------------------------- +# Step 2 – kay dependency +# --------------------------------------------------------------------------- + +def _ensure_kay(build_tmp: Path) -> Path: + kay_env = os.environ.get("KAY_DIR") + if kay_env: + kay_dir = Path(kay_env).expanduser() + print(f"[smlp build] Using existing kay at {kay_dir}") + return kay_dir + + kay_dir = build_tmp.resolve() / "kay" + if kay_dir.exists(): + print(f"[smlp build] Reusing kay clone at {kay_dir}") + else: + _run(["git", "clone", "https://github.com/fbrausse/kay", str(kay_dir)]) + return kay_dir + + +# --------------------------------------------------------------------------- +# Step 1c – GMP (compiled from source, cached in user-space) +# --------------------------------------------------------------------------- + +def _write_gmp_pc(prefix: Path) -> None: + """ + Write a gmp.pc pkg-config file into /lib/pkgconfig/. + GMP does not generate one by default, so Meson cannot find it + via pkg-config without this file. + """ + pkgconfig_dir = prefix / "lib" / "pkgconfig" + pkgconfig_dir.mkdir(parents=True, exist_ok=True) + pc_file = pkgconfig_dir / "gmp.pc" + pc_file.write_text( + f"prefix={prefix}\n" + "exec_prefix=${prefix}\n" + "libdir=${exec_prefix}/lib\n" + "includedir=${prefix}/include\n" + "\n" + "Name: gmp\n" + "Description: GNU Multiple Precision Arithmetic Library\n" + f"Version: {GMP_VERSION}\n" + "Libs: -L${libdir} -lgmp\n" + "Cflags: -I${includedir}\n" + ) + print(f"[smlp build] Wrote pkg-config file: {pc_file}") + + # Also write gmpxx.pc for the C++ wrapper library + pcxx_file = pkgconfig_dir / "gmpxx.pc" + pcxx_file.write_text( + f"prefix={prefix}\n" + "exec_prefix=${prefix}\n" + "libdir=${exec_prefix}/lib\n" + "includedir=${prefix}/include\n" + "\n" + "Name: gmpxx\n" + "Description: GNU Multiple Precision Arithmetic Library (C++ bindings)\n" + f"Version: {GMP_VERSION}\n" + "Requires: gmp\n" + "Libs: -L${libdir} -lgmpxx -lgmp\n" + "Cflags: -I${includedir}\n" + ) + print(f"[smlp build] Wrote pkg-config file: {pcxx_file}") + + +def _gmp_prefix() -> Path: + """ + Return the GMP install prefix, building from source if necessary. + + Search order: + 1. $GMP_ROOT env var → use as-is, no build + 2. GMP_CACHE_DIR marker → cache hit, skip build + 3. Download + compile into GMP_CACHE_DIR + """ + # ── Option A: caller supplied an existing prefix ────────────────────── + env_root = os.environ.get("GMP_ROOT") + if env_root: + prefix = Path(env_root).expanduser() + print(f"[smlp build] Using GMP_ROOT={prefix}") + return prefix + + # ── Option B: cached build already present ──────────────────────────── + tag_file = GMP_CACHE_DIR / ".built" + if tag_file.exists(): + print(f"[smlp build] GMP cache found at {GMP_CACHE_DIR}, skipping build.") + _write_gmp_pc(GMP_CACHE_DIR) + return GMP_CACHE_DIR + + # ── Option C: download + compile into user-space cache ──────────────── + tarball_name = f"gmp-{GMP_VERSION}.tar.xz" + url = f"https://gmplib.org/download/gmp/{tarball_name}" + + tmp_dir = GMP_CACHE_DIR.parent / "_gmp_build_tmp" + tmp_dir.mkdir(parents=True, exist_ok=True) + + tarball_path = tmp_dir / tarball_name + if not tarball_path.exists(): + print(f"[smlp build] Downloading GMP {GMP_VERSION} ...") + _download(url, tarball_path) + else: + print(f"[smlp build] Using cached tarball {tarball_path}") + + print(f"[smlp build] Extracting {tarball_name} ...") + with tarfile.open(tarball_path) as tf: + tf.extractall(tmp_dir) + + src = tmp_dir / f"gmp-{GMP_VERSION}" + + GMP_CACHE_DIR.mkdir(parents=True, exist_ok=True) + print(f"[smlp build] Compiling GMP → {GMP_CACHE_DIR} (this takes a minute) ...") + import platform as _platform + machine = _platform.machine() # e.g. x86_64, aarch64 + system = _platform.system().lower() # linux + host = f"{machine}-pc-{system}-gnu" + + _run( + ["./configure", + f"--prefix={GMP_CACHE_DIR}", + f"--host={host}", + "--enable-shared", + "--enable-static", + "--disable-assembly", + "--enable-cxx"], # avoids platform-specific asm issues + cwd=str(src), + ) + _run(["make", f"-j{os.cpu_count() or 1}"], cwd=str(src)) + _run(["make", "install"], cwd=str(src)) + + # Generate a gmp.pc pkg-config file — GMP doesn't ship one by default + _write_gmp_pc(GMP_CACHE_DIR) + + # Leave a marker so we skip the build on the next pip install + tag_file.touch() + + # Remove the source + tarball; keep only the install + shutil.rmtree(tmp_dir, ignore_errors=True) + + print(f"[smlp build] GMP built and cached at {GMP_CACHE_DIR}") + return GMP_CACHE_DIR + + + +# --------------------------------------------------------------------------- +# Step 1b – Z3 (via pip z3-solver, no sudo) +# --------------------------------------------------------------------------- + +# --------------------------------------------------------------------------- +# Step 1d – Z3 binary (downloaded from GitHub releases) +# --------------------------------------------------------------------------- + +def _z3_binary() -> Path: + """ + Return the path to the z3 executable. + + Search order: + 1. Z3_BIN_DIR env var / constant (~/.local/z3/bin/z3) + 2. ~/.local/bin/z3 (pip install --user z3-solver installs it here) + 3. System z3 on PATH (sudo apt install z3) + 4. Download pre-built from GitHub (no sudo fallback) + """ + from shutil import which + + # ── 1. Explicit Z3_BIN_DIR ──────────────────────────────────────────── + if Z3_BIN_DIR.exists() and (Z3_BIN_DIR / "z3").exists(): + print(f"[smlp build] Using z3 binary from Z3_BIN_DIR: {Z3_BIN_DIR / 'z3'}") + return Z3_BIN_DIR / "z3" + + # ── 2. ~/.local/bin/z3 ─────────────────────────────────────────────── + user_z3 = Path.home() / ".local" / "bin" / "z3" + if user_z3.exists(): + print(f"[smlp build] Using user z3 binary: {user_z3}") + return user_z3 + + # ── 3. PATH ─────────────────────────────────────────────────────────── + system_z3 = which("z3") + if system_z3: + print(f"[smlp build] Using system z3: {system_z3}") + return Path(system_z3) + + # ── 4. Download pre-built binary from GitHub releases ───────────────── + import platform as _platform + machine = _platform.machine() + arch_map = {"x86_64": "x64", "aarch64": "arm64"} + arch = arch_map.get(machine, machine) + z3_release = f"z3-{Z3_VERSION}-{arch}-glibc-2.31" + url = ( + f"https://github.com/Z3Prover/z3/releases/download/z3-{Z3_VERSION}/" + f"{z3_release}.zip" + ) + + tmp_dir = Z3_BIN_DIR.parent.parent / "_z3_build_tmp" + tmp_dir.mkdir(parents=True, exist_ok=True) + zip_path = tmp_dir / f"{z3_release}.zip" + + print(f"[smlp build] Downloading z3 binary {Z3_VERSION} ...") + _download(url, zip_path) + + import zipfile, shutil as _shutil + print(f"[smlp build] Extracting z3 binary ...") + with zipfile.ZipFile(zip_path) as zf: + zf.extractall(tmp_dir) + + Z3_BIN_DIR.mkdir(parents=True, exist_ok=True) + src_bin = tmp_dir / z3_release / "bin" / "z3" + _shutil.copy2(src_bin, Z3_BIN_DIR / "z3") + (Z3_BIN_DIR / "z3").chmod(0o755) + _shutil.rmtree(tmp_dir, ignore_errors=True) + + print(f"[smlp build] z3 binary installed at {Z3_BIN_DIR / 'z3'}") + return Z3_BIN_DIR / "z3" + + +def _write_z3_pc(z3_lib: Path) -> None: + """ + Write a z3.pc pkg-config file into /pkgconfig/. + z3-solver does not ship one, so Meson cannot find it via pkg-config + without this file. + """ + import re + # Detect z3 version from libz3.so filename e.g. libz3.so.4.8.12 + version = "4.8.12" # fallback + for f in z3_lib.glob("libz3.so.*"): + m = re.search(r"libz3\.so\.([\d.]+)", f.name) + if m: + version = m.group(1) + break + + prefix = z3_lib.parent # /z3 + inc_dir = prefix / "include" + + pkgconfig_dir = z3_lib / "pkgconfig" + pkgconfig_dir.mkdir(parents=True, exist_ok=True) + pc_file = pkgconfig_dir / "z3.pc" + pc_file.write_text( + f"prefix={prefix}\n" + f"libdir={z3_lib}\n" + f"includedir={inc_dir}\n" + "\n" + "Name: z3\n" + "Description: Z3 Theorem Prover\n" + f"Version: {version}\n" + "Libs: -L${libdir} -lz3\n" + "Cflags: -I${includedir}\n" + ) + print(f"[smlp build] Wrote pkg-config file: {pc_file}") + + +def _z3_prefix() -> Path: + """ + Return the z3-solver lib directory containing libz3.so. + + Search order: + 1. $Z3_PREFIX env var → use /lib + 2. Z3_DEFAULT_PREFIX constant → ~/.local/lib/python3.13/site-packages/z3/lib + (standard location for: pip install --user z3-solver) + """ + env_prefix = os.environ.get("Z3_PREFIX") + prefix = Path(env_prefix).expanduser() if env_prefix else Z3_DEFAULT_PREFIX + lib_dir = prefix / "lib" + + print(f"[smlp build] Looking for libz3.so in: {lib_dir}") + + found = list(lib_dir.rglob("libz3.so")) if lib_dir.exists() else [] + if found: + print(f"[smlp build] Using z3 lib dir: {lib_dir}") + _write_z3_pc(lib_dir) + return lib_dir + + sys.exit( + f"[smlp build] ERROR: libz3.so not found at {lib_dir}.\n" + "Install z3-solver with: python3.13 -m pip install --user z3-solver\n" + "Or set Z3_PREFIX to your z3 package directory, e.g.:\n" + " export Z3_PREFIX=~/.local/lib/python3.13/site-packages/z3" + ) + + +def _write_native_file(boost_prefix: Path, gmp_prefix: Path, z3_lib: Path, z3_bin: Path, build_tmp: Path) -> Path: + """ + Write a Meson native file that points to the user-space Boost install. + This is the most reliable way to pass non-standard library paths to Meson — + more reliable than environment variables, which Meson may ignore depending + on version and platform. + """ + boost_lib = boost_prefix / "lib" + boost_inc = boost_prefix / "include" + gmp_lib = gmp_prefix / "lib" + gmp_inc = gmp_prefix / "include" + z3_pc_dir = z3_lib / "pkgconfig" + + native_file = build_tmp / "native.ini" + native_file.write_text( + "[properties]\n" + f"boost_root = '{boost_prefix}'\n" + f"boost_includedir = '{boost_inc}'\n" + f"boost_librarydir = '{boost_lib}'\n" + f"gmp_includedir = '{gmp_inc}'\n" + f"gmp_librarydir = '{gmp_lib}'\n" + f"gmpxx_includedir = '{gmp_inc}'\n" + f"gmpxx_librarydir = '{gmp_lib}'\n" + "\n" + "[binaries]\n" + f"python = '{sys.executable}'\n" + f"python3 = '{sys.executable}'\n" + f"pkg-config = 'pkg-config'\n" + f"z3 = '{z3_bin}'\n" + "\n" + "[built-in options]\n" + f"pkg_config_path = ['{gmp_lib / 'pkgconfig'}', '{boost_lib / 'pkgconfig'}', '{z3_pc_dir}']\n" + f"c_args = ['-I{gmp_inc}', '-I{boost_inc}']\n" + f"cpp_args = ['-I{gmp_inc}', '-I{boost_inc}']\n" + f"c_link_args = ['-L{gmp_lib}', '-L{boost_lib}', '-Wl,-rpath,{gmp_lib}', '-Wl,-rpath,{boost_lib}']\n" + f"cpp_link_args = ['-L{gmp_lib}', '-L{boost_lib}', '-Wl,-rpath,{gmp_lib}', '-Wl,-rpath,{boost_lib}', '-Wl,-rpath,{z3_lib}']\n" + ) + print(f"[smlp build] Wrote Meson native file: {native_file}") + return native_file + + +def _meson_build(poly_dir: Path, kay_dir: Path, + boost_prefix: Path, build_tmp: Path) -> Path: + """ + Run meson setup + ninja install. + Returns the path to the installed smlp package directory. + """ + meson_build_dir = poly_dir / "build" + install_prefix = build_tmp.resolve() / "smlp_install" + + if meson_build_dir.exists(): + shutil.rmtree(meson_build_dir) + + z3_lib = _z3_prefix() + z3_bin = _z3_binary() + gmp_prefix = _gmp_prefix() + env = _boost_env(boost_prefix) + env = _add_z3_to_env(env, z3_lib) + env = _add_gmp_to_env(env, gmp_prefix) + + # Embed RPATH into the built .so so it finds user-space libs at runtime + # without needing LD_LIBRARY_PATH to be set. + rpath_dirs = [ + str(boost_prefix / "lib"), + str(gmp_prefix / "lib"), + str(z3_lib), + ] + rpath_flags = ":".join(f"-Wl,-rpath,{d}" for d in rpath_dirs) + existing_ldflags = env.get("LDFLAGS", "") + env["LDFLAGS"] = f"{rpath_flags} {existing_ldflags}".strip() + native_file = _write_native_file(boost_prefix, gmp_prefix, z3_lib, z3_bin, build_tmp) + + meson_flags = [ + "--wipe", + f"--native-file={native_file}", + f"-Dkay-prefix={kay_dir}", + "-Dz3=enabled", + "--prefix", str(install_prefix), + # Explicitly pass both source dir and build dir as absolute paths + # so Meson works correctly regardless of cwd + str(poly_dir), + str(poly_dir / "build"), + ] + + print(f"[smlp build] PKG_CONFIG_PATH = {env.get('PKG_CONFIG_PATH', '(not set)')}") + print(f"[smlp build] LD_LIBRARY_PATH = {env.get('LD_LIBRARY_PATH', '(not set)')}") + _run( + _meson_bin(build_tmp) + ["setup"] + meson_flags, + env=env, + ) + + _run([_ninja_bin(), "-C", str(poly_dir / "build"), "install"], + cwd=str(poly_dir), env=env) + + # Locate the installed smlp package (Meson may use a versioned python path) + candidates = (list(install_prefix.glob("lib/python*/dist-packages/smlp")) + + list(install_prefix.glob("lib/python3/dist-packages/smlp"))) + if not candidates: + sys.exit( + f"[smlp build] ERROR: could not find installed smlp package under " + f"{install_prefix}. Check the Meson/Ninja output above." + ) + return candidates[0] + + +# --------------------------------------------------------------------------- +# Custom build_ext +# --------------------------------------------------------------------------- + +class MesonBuildExt(_build_ext): + + def run(self): + build_tmp = Path(self.build_temp).resolve() + build_tmp.mkdir(parents=True, exist_ok=True) + + # 1. Boost (compiled from source, cached in ~/.local/boost_py313) + boost_prefix = _boost_prefix() + + # 2. kay + kay_dir = _ensure_kay(build_tmp) + + # 3. Meson build – run from within the repo + poly_dir = REPO_ROOT / "utils" / "poly" + if not poly_dir.is_dir(): + sys.exit( + f"[smlp build] ERROR: expected utils/poly/ at {poly_dir}.\n" + "Make sure setup.py is run from the root of the smlp repository." + ) + + # Optionally switch branch (useful in CI) + branch = os.environ.get("SMLP_BRANCH") + if branch: + _run(["git", "switch", branch], cwd=str(REPO_ROOT)) + else: + result = subprocess.run( + ["git", "branch", "-r", "--list", "origin/smlp_python313"], + capture_output=True, text=True, cwd=str(REPO_ROOT) + ) + if result.stdout.strip(): + _run(["git", "switch", "smlp_python313"], cwd=str(REPO_ROOT)) + + installed_pkg = _meson_build(poly_dir, kay_dir, boost_prefix, build_tmp) + + # 4. Copy into the wheel's lib tree + dest = Path(self.build_lib) / "smlp" + if dest.exists(): + shutil.rmtree(dest) + shutil.copytree(str(installed_pkg), str(dest)) + print(f"[smlp build] smlp extension copied to wheel at {dest}") + + # 5. Copy Python source from src/smlp_py into smlp/smlp_py inside the wheel + smlp_py_src = REPO_ROOT / "src" / "smlp_py" + if smlp_py_src.is_dir(): + smlp_py_dest = dest / "smlp_py" # dest is already smlp/ + if smlp_py_dest.exists(): + shutil.rmtree(smlp_py_dest) + shutil.copytree(str(smlp_py_src), str(smlp_py_dest)) + print(f"[smlp build] smlp_py source copied to wheel at {smlp_py_dest}") + else: + print(f"[smlp build] WARNING: src/smlp_py not found at {smlp_py_src}, skipping.") + + # 6. Copy src/run_smlp.py into smlp/ inside the wheel + run_smlp_src = REPO_ROOT / "src" / "run_smlp.py" + if run_smlp_src.is_file(): + shutil.copy2(str(run_smlp_src), str(dest / "run_smlp.py")) + print(f"[smlp build] run_smlp.py copied to wheel at {dest / 'run_smlp.py'}") + else: + print(f"[smlp build] WARNING: src/run_smlp.py not found at {run_smlp_src}, skipping.") + + +# --------------------------------------------------------------------------- +# setup() +# --------------------------------------------------------------------------- + +setup( + cmdclass={"build_ext": MesonBuildExt}, + # Dummy extension so setuptools produces a platform-specific wheel + # and actually invokes build_ext. + ext_modules=[ + __import__("setuptools").Extension(name="smlp._dummy", sources=[]), + ], +) diff --git a/smlp_regression/create_diff_report b/smlp_regression/create_diff_report new file mode 100755 index 00000000..a4dd068f --- /dev/null +++ b/smlp_regression/create_diff_report @@ -0,0 +1,9 @@ +#!/usr/bin/tcsh -f +set script_path=`realpath $0 | xargs dirname` +set root_path=`git rev-parse --show-toplevel`/regr_smlp +foreach f (`grep Failed $root_path/code/all_log.txt | grep -v : | grep -v "master file does not exist" | awk '{print $1}'`) + set new_results="$root_path/code/${f}" + echo =================== Diff report for: $new_results:t ================================== + diff -w $new_results $new_results:h:h/master + echo =================== End of $new_results:t diff report ================================ +end diff --git a/smlp_regression/run_smlp_regression b/smlp_regression/run_smlp_regression new file mode 100755 index 00000000..ab41804d --- /dev/null +++ b/smlp_regression/run_smlp_regression @@ -0,0 +1,27 @@ +#!/usr/bin/tcsh -f +set script_path=`realpath $0 | xargs dirname` +set script_name=`realpath $0 | xargs basename` +set log=$PWD/${script_name}.log +set diff_report=$log:h/${script_name}_diff_report.log +git rev-parse --show-toplevel >& /dev/null +if($status) then + echo "\nERROR: Current directory is outside git directory tree\n" + exit(1) +endif +cd `git rev-parse --show-toplevel` +\rm -f regr_smlp/models/test{63,101}_model_y*_smlp_* >& /dev/null +\rm -rf regr_smlp/code/*{test,Test}* >& /dev/null +\rm -rf regr_smlp/code/{__pycache__,all_log.txt,logs.log,_doe.csv} >& /dev/null +if($#argv > 0) then + if("-clean" == "$argv[1]") then + exit(0) + endif +endif +cd `git rev-parse --show-toplevel`/regr_smlp/code +set path=($PWD $path) +set python_path="$PWD" +\ln -sf /usr/bin/python3.13 python3 +echo "Log file: $log" +echo n | env CUDA_VISIBLE_DEVICES=-1 ./smlp_regr.py -w 8 -def n -t all -tol 7 -g |& tee $log +\rm -f $python_path/python3 +${script_path}/create_diff_report >& $diff_report diff --git a/smlp_regression/run_smlp_regression_expected.log b/smlp_regression/run_smlp_regression_expected.log new file mode 100644 index 00000000..72d8f684 --- /dev/null +++ b/smlp_regression/run_smlp_regression_expected.log @@ -0,0 +1,4521 @@ +Calling 8 workers for multiprocessing... +Initiating 0 worker... +Initiating 1 worker... +Initiating 2 worker... +Initiating 3 worker... +Initiating 4 worker... +Initiating 5 worker... +Initiating 6 worker... + +Running test 3 test type: prediction, description: basic poly_sklearn prediction test on labeled and new data with numeric response in training/test data only +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test3 -mode predict -resp y1 -feat x,p1,p2 -model poly_sklearn -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_unlabeled.csv" + + +Running test 10 test type: prediction, description: basic et_sklearn prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test10 -mode predict -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 15 -et_sklearn_bootstrap f -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 17 test type: prediction, description: basic poly_sklearn prediction test from saved model on new data with numeric labels and two responses +../../src/run_smlp.py -model_name "../models/Test11_smlp_toy_num_resp_mult" -out_dir ./ -pref Test17 -mode predict -resp y1,y2 -feat x,p1,p2 -model poly_sklearn -save_model f -use_model t -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 24 test type: prediction, description: basic dt_sklearn prediction test using a model saved under a name specified through model_name option on new data with numeric labels +../../src/run_smlp.py -model_name "../models/test24_model" -out_dir ./ -pref Test24 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model f -use_model t -model_per_response t -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 30 test type: subgroups, description: basic test for subgroup discovery for numric responses +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test30 -mode subgroups -psg_dim 3 -psg_top 10 -resp y1,y2 -feat x,p1,p2 -plots t -seed 10 -log_time f + + +Running test 37 test type: doe, description: doe test with four levels with box_behnken +../../src/run_smlp.py -doe_spec "../grids/doe_three_levels_real_nan.csv" -out_dir ./ -pref Test37 -mode doe -doe_algo box_behnken -log_time f + + +Running test 47 test type: prediction, description: tests options -pos_val and -neg_val when re-using saved model +../../src/run_smlp.py -model_name "../models/test47_model" -out_dir ./ -pref Test47 -mode predict -resp "PF,PF1" -model poly_sklearn -save_model f -use_model t -data_scaler none -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -pos_val fail -neg_val pass -new_dat "../data/smlp_toy_pf_mult.csv" + + +Running test 54 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test54 -mode discretize -resp "PF,PF1" -discr_algo ordinals -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + +spec_fn smlp_toy_num_resp_noknobs_verify.spec +specs_path ../specs + +Running test 68 test type: verify, description: basic dt_sklearn assertion verification test on data with one numeric response +../../src/run_smlp.py -model_name "../models/test67_model" -out_dir ./ -pref Test68 -mode verify -resp y1,y2 -feat x0,x1,x2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -model_per_response t -save_model f -use_model t -spec ../specs/smlp_toy_num_resp_noknobs_verify.spec -asrt_names asrt1,asrt2 -asrt_exprs "x0**2+y1>4.3;(y1+x2)/2<6" -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult.spec +specs_path ../specs + +Running test 80 test type: optimize, description: basic dt_sklearn single objective optimization test with numeric labels and integer grid as domain and with scaling objectives +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test80 -mode optimize -pareto f -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult.spec -data_scaler min_max -objv_names obj1 -objv_exprs "(y1+y2)/2" -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 88 test type: optimize, description: basic dt_sklearn multi objective pareto optimization test with beta and objectives specified in spec file +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test88 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 94 test type: optsyn, description: basic test for rf_sklearn in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test94 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_witness.spec +specs_path ../specs + +Running test 102 test type: certify, description: basic test in certify mode to test stability (theta) and guard (eta) constraint generation +../../src/run_smlp.py -model_name "../models/test101_model" -out_dir ./ -pref Test102 -mode certify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model t -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_witness.spec -quer_names query1,query2,query3 -quer_exprs "(y2**3+p2)/2<6;y1>=9;y2<20" -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult.spec +specs_path ../specs + +Running test 104 test type: verify, description: assertion verfication test with wrong spec that does not assign a single value using a singleton grid or range with equal max and min +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test104 -mode verify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult.spec -asrt_names asrt_y1,asrt_y2 -asrt_expr "y1*2+x<=5 and y1<=10;-2*y2-1<10-p2" -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_beta_verify.spec +specs_path ../specs + +Running test 107 test type: verify, description: test for verification mode to check that eta contraints are not contradictory and as otherwise verification problem is not well defined +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test107 -mode verify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_beta_verify.spec -asrt_names asrt_y1,asrt_y2 -asrt_expr "y1*2+x<=5 and y1<=10;-2*y2-1<10-p2" -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_basic.spec +specs_path ../specs + +Running test 114 test type: optimize, description: smlp toy basic test for mode optimize from SMLP manual without specifying resp and feat in command line +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test114 -mode optimize -pareto t -opt_strategy lazy -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -mrmr_pred 0 -epsilon 0.05 -delta_rel 0.01 -save_model f -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -spec ../specs/smlp_toy_basic.spec + +Running test 7 test type: prediction, description: basic rf_sklearn prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test7 -mode predict -resp y1,y2 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 15 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 9 test type: prediction, description: basic dt_sklearn prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test9 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model t -model_name test20_model -data_scaler none -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -save_config t -save_model_config t -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 16 test type: prediction, description: basic nn_keras prediction test from saved model on new data with numeric labels and two responses +../../src/run_smlp.py -model_name "../models/Test8_smlp_toy_num_resp_mult" -out_dir ./ -pref Test16 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -save_model f -use_model t -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 22 test type: prediction, description: test for illegal symbols in column names +../../src/run_smlp.py -model_name "../models/test22_model" -out_dir ./ -pref Test22 -mode predict -resp "PF ,|PF |" -model poly_sklearn -save_model f -use_model t -pred_plots t -resp_plots t -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_metasymbol_mult_reg_pred_labeled.csv" + + +Running test 29 test type: subgroups, description: basic test for subgroup discovery for pass-fail responses +../../src/run_smlp.py -data "../data/smlp_toy_cls_metasymbol_colnames_mult.csv" -out_dir ./ -pref Test29 -mode subgroups -psg_dim 3 -psg_top 10 -resp "PF 1,PF#" -plots t -seed 10 -log_time f + + +Running test 35 test type: doe, description: doe test with four levels with plackett_burman +../../src/run_smlp.py -doe_spec "../grids/doe_four_levels_real.csv" -out_dir ./ -pref Test35 -mode doe -doe_algo plackett_burman -log_time f + + +Running test 43 test type: doe, description: doe test with four levels with halton_sequence +../../src/run_smlp.py -doe_spec "../grids/doe_two_levels.csv" -out_dir ./ -pref Test43 -mode doe -doe_algo halton_sequence -doe_samples 20 -log_time f + + +Running test 52 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test52 -mode discretize -resp "PF,PF1" -discr_algo jenks -discr_bins 6 -discr_labels t -discr_type ordered -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + +spec_fn smlp_toy_num_resp_mult_y1_verify.spec +specs_path ../specs + +Running test 63 test type: verify, description: basic dt_sklearn assertion verification test on data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test63 -mode verify -resp y1 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model t -use_model f -model_name test63_model -spec ../specs/smlp_toy_num_resp_mult_y1_verify.spec -asrt_names asrt1,asrt2 -asrt_exprs "x/2+y1>4.3;(y1+p2)/2<6" -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult.spec +specs_path ../specs + +Running test 79 test type: query, description: basic test in query mode to test stability (theta) and guard (eta) constraint generation +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test79 -mode query -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult.spec -quer_names query1,query2,query3 -quer_exprs "(y2**3+p2)/2<6;y1>=9;y2<0" -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_query_vacuous.spec +specs_path ../specs + +Running test 91 test type: query, description: test to detect contradictory constraints in optimization mode due to contradictory alpha global and alpha bounds constraints on FMAX_xyx +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test91 -mode query -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_query_vacuous.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 99 test type: optimize, description: testing that the response and feature names can be taken from spec file in model exploration modes when the responses and/or features are not specified in the command line +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test99 -mode optimize -pareto t -opt_strategy lazy -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_certify_witness.spec +specs_path ../specs + +Running test 103 test type: certify, description: +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test103 -mode certify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model t -use_model f -model_name test103_model -model_per_response f -spec ../specs/smlp_toy_num_resp_mult_certify_witness.spec -quer_names valid_candidate,grid_conflict,range_conflict -quer_exprs "True;True;True" -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 111 test type: unknown, description: smlp toy basic test to rerun saved model using the model rerun config file saved during model training +../../src/run_smlp.py -model_name "../models/test110_model" -out_dir ./ -pref Test111 -config ../models/test110_model_rerun_model_config.json -new_dat "../data/smlp_toy_basic_pred_unlabeled.csv" + +spec_fn smlp_toy_system.spec +specs_path ../specs + +Running test 115 test type: certify, description: basic test in certify mode +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test115 -mode certify -resp y1,y2 -feat x1,x2,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_system.spec -quer_names query1,query2 -quer_exprs "y1>0;y2<=0" -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_certify.spec +specs_path ../specs + +Running test 127 test type: certify, description: certification example with knobs only and fictitious inputs with values fixed through their ranges +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test127 -mode certify -model system -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_certify.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 142 test type: optsyn, description: basic test for compress_rules option for rf_sklearn in optsin mode +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test142 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 15 -tree_encoding nested -compress_rules t -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +Running test 2 test type: prediction, description: basic rf_sklearn prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test2 -mode predict -resp y1 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 15 -save_model_config f -mrmr_pred 0 -plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 12 test type: train, description: EV-SI real life dt_sklearn predict test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test12 -mode train -resp y1,y2 -feat x1,x2,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 18 test type: prediction, description: basic dt_sklearn prediction test on labeled and new data with numeric labels and saving model using name specified through model_name option - adapts Test6 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test18 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model t -use_model f -model_name test19_model -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 25 test type: prediction, description: basic dt_sklearn prediction test on labeled and new data with numeric labels and saving model using name specified through model_name option - adapts Test6 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test25 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model t -use_model f -model_name test26_model -mrmr_pred 2 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 36 test type: doe, description: doe test with four levels with sukharev_grid +../../src/run_smlp.py -doe_spec "../grids/doe_four_levels_real.csv" -out_dir ./ -pref Test36 -mode doe -doe_algo sukharev_grid -doe_samples 125 -log_time f + + +Running test 42 test type: doe, description: doe test with four levels with maximin_reconstruction +../../src/run_smlp.py -doe_spec "../grids/doe_two_levels.csv" -out_dir ./ -pref Test42 -mode doe -doe_algo maximin_reconstruction -doe_samples 20 -log_time f + + +Running test 51 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test51 -mode discretize -resp "PF,PF1" -discr_algo jenks -discr_bins 6 -discr_labels f -discr_type integer -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + +spec_fn smlp_toy_num_resp_mult_y2_verify.spec +specs_path ../specs + +Running test 60 test type: verify, description: basic nn_keras assertion verification test for functional nn_keras model +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test60 -mode verify -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -save_model_config f -spec ../specs/smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs "2*y2>1" -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat + +spec_fn smlp_toy_num_resp_mult_free_inps.spec +specs_path ../specs + +Running test 83 test type: optimize, description: basic dt_sklearn multi objective pareto optimization test with numeric labels and integer grid as domain and with scaling objectives +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test83 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_free_inps.spec -data_scaler min_max -beta "y1>7 and y2>6" -objv_names obj1,objv2,objv3 -objv_exprs "(y1+y2)/2;y1/2-y2;y2" -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_verify_vacuous.spec +specs_path ../specs + +Running test 92 test type: verify, description: test to detect contradictory constraints in verification mode +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test92 -mode verify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model t -mrmr_pred 2 -model_per_response f -spec ../specs/smlp_toy_num_resp_mult_verify_vacuous.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 100 test type: optimize, description: basic test for sat_threshold option enabing usage of objectve values in SAT assignments that prove optimization thresholds +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test100 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_stable_verify.spec +specs_path ../specs + +Running test 105 test type: verify, description: basic dt_sklearn assertion verfication test with numeric labels and integer grid as domain +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test105 -mode verify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_stable_verify.spec -asrt_names asrt_y1,asrt_y2 -asrt_expr "y1*2+x<=5 and y1<=10;-2*y2-1<10-p2" -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_cannot_synthesize.spec +specs_path ../specs + +Running test 109 test type: synthesize, description: basic test for mode synthesize where synthesis fails +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test109 -mode synthesize -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_cannot_synthesize.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_fail.spec +specs_path ../specs + +Running test 122 test type: optimize, description: optimization test with constant knob and no inputs where synthesis is not feasible because the assertion is not feasible but beta constraint is feasible therefore optimization is performed +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test122 -mode optimize -pareto f -opt_strategy lazy -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_fail.spec -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_verify.spec +specs_path ../specs + +Running test 140 test type: verify, description: verification example with knobs only and fictitious inputs that have no effect where proparty is valid without stability and fails with stability +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test140 -mode verify -model system -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_verify.spec -trace_prec 1 -trace_anonym t -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 147 test type: prediction, description: checks nn_keras prediction with sw_coef 0.8 and sequential API +Running test 1 test type: train, description: basic dt_caret training and test on labeled data with single numeric response +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test1 -mode train -resp y1 -feat x,p1,p2 -model dt_caret -save_model_config f -mrmr_pred 0 -plots f -seed 10 -log_time f + + +Running test 20 test type: prediction, description: basic dt_sklearn prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -model_name "../models/test20_model" -out_dir ./ -pref Test20 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model f -use_model t -data_scaler none -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 27 test type: prediction, description: checks nn_keras prediction with nn_keras_seq_api t +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test27 -mode predict -resp y2 -feat x,p1,p2 -model nn_keras -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 34 test type: doe, description: doe test with four levels with full_factorial method +../../src/run_smlp.py -doe_spec "../grids/doe_four_levels_real.csv" -out_dir ./ -pref Test34 -mode doe -doe_algo full_factorial -log_time f + + +Running test 44 test type: doe, description: doe test with four levels with uniform_random_matrix +../../src/run_smlp.py -doe_spec "../grids/doe_two_levels.csv" -out_dir ./ -pref Test44 -mode doe -doe_algo uniform_random_matrix -doe_samples 20 -log_time f + + +Running test 50 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test50 -mode discretize -resp "PF,PF1" -discr_algo kmeans -discr_bins 6 -discr_labels t -discr_type ordered -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + +spec_fn smlp_toy_num_resp_mult_y2_verify.spec +specs_path ../specs + +Running test 59 test type: verify, description: basic nn_keras assertion verification test for functional nn_keras model +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test59 -mode verify -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -save_model_config f -spec ../specs/smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs "2*y2>1" -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat + +spec_fn smlp_toy_num_resp_mult_free_inps.spec +specs_path ../specs + +Running test 82 test type: optimize, description: basic dt_sklearn single objective optimization test with numeric labels and integer grid as domain and with scaling objectives +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test82 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_free_inps.spec -data_scaler min_max -objv_names obj1,objv2,objv3 -objv_exprs "(y1+y2)/2;y1;y2" -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 93 test type: optsyn, description: basic test for mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test93 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_witness.spec +specs_path ../specs + +Running test 101 test type: certify, description: basic test in certify mode to test stability (theta) and guard (eta) constraint generation +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test101 -mode certify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model t -use_model f -model_name test101_model -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_witness.spec -quer_names query1,query2,query3 -quer_exprs "(y2**3+p2)/2<6;y1>=9;y2<20" -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_unsat_eta_verify.spec +specs_path ../specs + +Running test 106 test type: verify, description: test for verification mode to check that eta contraints are not contradictory and as otherwise verification problem is not well defined +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test106 -mode verify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_unsat_eta_verify.spec -asrt_names asrt_y1,asrt_y2 -asrt_expr "y1*2+x<=5 and y1<=10;-2*y2-1<10-p2" -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 110 test type: prediction, description: smlp toy basic example for predict mode from SMLP user manual +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test110 -mode predict -resp y1,y2 -feat x1,x2,p1,p2 -model poly_sklearn -save_model t -model_name test110_model -save_model_config t -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_basic_pred_unlabeled.csv" + +spec_fn smlp_toy_system_stable_constant_certify.spec +specs_path ../specs + +Running test 117 test type: certify, description: certification test with knobs only where assertion is valid without stability and fails with stability +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test117 -mode certify -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_certify.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_fail.spec +specs_path ../specs + +Running test 124 test type: optsyn, description: optimized synthesis test with constant knob and no inputs where synthesis is not feasible because while beta constraint is feasible the assertion is not feasible therefore optimization is not performed +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test124 -mode optsyn -pareto f -opt_strategy lazy -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_fail.spec -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult.spec +specs_path ../specs + +Running test 141 test type: optimize, description: basic test for compress_rules option for dt_sklearn in optimization mode +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test141 -mode optimize -opt_strategy lazy -pareto f -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules t -spec ../specs/smlp_toy_num_resp_mult.spec -objv_names objv_y1,objv_y2 -objv_exprs "y1;y2" -epsilon 0.01 -delta_rel 0.01 -data_scaler none -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 148 test type: prediction, description: checks nn_keras prediction with sw_coef 0.8 and sequential API +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test148 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -sw_coef 0.8 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + +Running test 6 test type: prediction, description: basic dt_sklearn prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test6 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 11 test type: prediction, description: basic poly_sklearn prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test11 -mode predict -resp y1,y2 -feat x,p1,p2 -model poly_sklearn -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 15 test type: prediction, description: basic dt_caret prediction test from saved model on new data with numeric labels +../../src/run_smlp.py -model_name "../models/Test5_smlp_toy_num_resp_mult" -out_dir ./ -pref Test15 -mode predict -resp y1 -feat x,p1,p2 -model dt_caret -save_model f -use_model t -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 23 test type: prediction, description: basic dt_sklearn prediction test on labeled and new data with numeric labels and saving model using name specified through model_name option - adapts Test6 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test23 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model t -use_model f -model_name test24_model -model_per_response t -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 31 test type: subgroups, description: testing resp2b in subgroup discovery mode +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test31 -mode subgroups -psg_dim 3 -psg_top 10 -resp y1,y2 -resp2b "y1<6;y2>6" -feat x,p1,p2 -plots t -seed 10 -log_time f -save_config t + + +Running test 38 test type: doe, description: doe test with four levels with box_wilson +../../src/run_smlp.py -doe_spec "../grids/doe_two_levels.csv" -out_dir ./ -pref Test38 -mode doe -doe_algo box_wilson -doe_cc_face ccc -doe_cc_alpha r -doe_cc_center 2,3 -log_time f + + +Running test 46 test type: prediction, description: tests options -pos_val and -neg_val +../../src/run_smlp.py -data "../data/smlp_toy_pf_mult.csv" -out_dir ./ -pref Test46 -mode predict -resp "PF,PF1" -model poly_sklearn -save_model t -save_model_config f -use_model f -model_name test47_model -data_scaler none -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -pos_val fail -neg_val pass -new_dat "../data/smlp_toy_pf_mult.csv" + + +Running test 55 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test55 -mode discretize -resp "PF,PF1" -discr_algo ranks -discr_bins 6 -discr_labels t -discr_type category -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + +spec_fn smlp_toy_num_resp_noknobs_verify.spec +specs_path ../specs + +Running test 66 test type: verify, description: basic dt_sklearn assertion verification test on data with one numeric response +../../src/run_smlp.py -model_name "../models/test65_model" -out_dir ./ -pref Test66 -mode verify -resp y1,y2 -feat x0,x1,x2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model t -spec ../specs/smlp_toy_num_resp_noknobs_verify.spec -asrt_names asrt1,asrt2 -asrt_exprs "x0**2+y1>4.3;(y1+x2)/2<6" -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 77 test type: unknown, description: verification test run using model_rerun config covering the case when mrmr selcts only a subset of features specified through the command line or config file +../../src/run_smlp.py -model_name "../models/test76_model" -out_dir ./ -pref Test77 -config ../models/test76_model_rerun_model_config.json + +spec_fn smlp_toy_num_resp_mult.spec +specs_path ../specs + +Running test 86 test type: optimize, description: tests alpha +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test86 -mode optimize -pareto f -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult.spec -data_scaler min_max -objv_names obj1,objv2 -objv_exprs "(y1+y2)/2;y1" -asrt_names asrt1,asrt2,asrt3 -asrt_exprs "(y2**3+p2)/2<6;y1>=9;y2<0" -alpha "p2<5 and x==10 and x<12" -eta "p1==4" -epsilon 0.05 -delta_rel 0.01 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 95 test type: optsyn, description: basic test for dt_caret in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test95 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_caret -save_model f -use_model f -tree_encoding nested -compress_rules f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_synthesize.spec +specs_path ../specs + +Running test 108 test type: synthesize, description: basic test for dt_sklearn in model exploration mode synthesize where synthesis succeeds +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test108 -mode synthesize -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_synthesize.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_fail.spec +specs_path ../specs + +Running test 120 test type: synthesize, description: synthesis test with constant knob and no inputs where synthesis is not feasible because the assertion is not feasible +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test120 -mode synthesize -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_fail.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_witness_certify.spec +specs_path ../specs + +Running test 128 test type: certify, description: Basic regression test in certify mode covering all four possible outcomes when certifying a witness for a query: the witness is stable +../../src/run_smlp.py -data "../data/smlp_toy_ctg_num_resp.csv" -out_dir ./ -pref Test128 -mode certify -resp y1,y2 -feat x,p1,p2 -model poly_sklearn -dt_sklearn_max_depth 15 -save_model f -use_model f -model_per_response f -spec ../specs/smlp_toy_witness_certify.spec -quer_names query_stable_witness,query_grid_conflict,query_unstable_witness,query_infeasible_witness,query_poly_intercept_sensitive -quer_exprs "y2<=90;y1>=9;y1>=(-13);y1>9;y1>=(-10)" -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 149 test type: prediction, description: tests the mae loss function MeanAbsoluteError and sample weoghts +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test149 -mode predict -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nn_keras_loss mae -sw_coef 0.8 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + +spec_fn smlp_toy_num_resp_mult_verify.spec +specs_path +Running test 4 test type: prediction, description: basic nn_keras prediction test on labeled and new data with numeric labels and one response +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test4 -mode predict -resp y2 -feat x,p1,p2 -model nn_keras -nn_keras_weights_precision 2 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 13 test type: train, description: EV-SI real life nn_keras prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test13 -mode train -resp y1,y2 -feat x1,x2,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api f -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 28 test type: prediction, description: checks nn_keras prediction with sw_coef 0.8 and functional API +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test28 -mode predict -resp y2 -feat x,p1,p2 -model nn_keras -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -sw_coef 0.8 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 41 test type: doe, description: doe test with four levels with random_k_means +../../src/run_smlp.py -doe_spec "../grids/doe_two_levels.csv" -out_dir ./ -pref Test41 -mode doe -doe_algo random_k_means -doe_samples 20 -log_time f + + +Running test 49 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test49 -mode discretize -resp "PF,PF1" -discr_algo quantile -discr_bins 6 -discr_labels t -discr_type category -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + +spec_fn smlp_toy_num_resp_mult.spec +specs_path ../specs + +Running test 58 test type: optimize, description: basic dt_sklearn optimization test with numeric labels and integer grid as domain and without scaling objectives +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test58 -mode optimize -pareto f -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult.spec -objv_names objv_y1,objv_y2 -objv_exprs "y1;y2" -epsilon 0.01 -delta_rel 0.01 -data_scaler none -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_y2_verify.spec +specs_path ../specs + +Running test 70 test type: verify, description: nn_keras verification test with re-using saved model_per_response trained model +../../src/run_smlp.py -model_name "../models/test69_model" -out_dir ./ -pref Test70 -mode verify -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model f -use_model t -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs "(y2**3+p2)/2<6" -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat + +spec_fn smlp_toy_num_resp_mult_free_inps.spec +specs_path ../specs + +Running test 81 test type: optimize, description: basic dt_sklearn single objective optimization test with numeric labels and integer grid as domain and with scaling objectives +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test81 -mode optimize -pareto f -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_free_inps.spec -data_scaler min_max -objv_names obj1 -objv_exprs "(y1+y2)/2" -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat + +spec_fn smlp_toy_num_resp_mult_query.spec +specs_path ../specs + +Running test 89 test type: query, description: basic test in query mode to test stability (theta) and guard (eta) constraint generation +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test89 -mode query -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_query.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_query.spec +specs_path ../specs + +Running test 97 test type: query, description: basic test for rf_sklearn in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test97 -mode query -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_bootstrap f -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_query.spec -epsilon 0.1 -delta_rel 0.05 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 112 test type: prediction, description: smlp toy basic test from SMLP manual +../../src/run_smlp.py -model_name "../models/test110_model" -out_dir ./ -pref Test112 -mode predict -resp y1,y2 -feat x1,x2,p1,p2 -model poly_sklearn -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -use_model t -save_model f -new_dat "../data/smlp_toy_basic_pred_unlabeled.csv" + +spec_fn smlp_toy_system_stable_constant_verify.spec +specs_path ../specs + +Running test 118 test type: verify, description: verification test with knobs only where assertion is valid without stability and fails with stability +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test118 -mode verify -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_verify.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_feasible.spec +specs_path ../specs + +Running test 123 test type: optimize, description: optimization test with constant knob and no inputs where synthesis is feasible and optimization is performed +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test123 -mode optimize -pareto t -opt_strategy lazy -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_feasible.spec -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_feasible.spec +specs_path ../specs + +Running test 145 test type: optimize, description: optimization test with constant knob and no inputs where synthesis is feasible and optimization is performed +../../src/run_smlp.py -out_dir ./ -pref Test145 -mode optimize -pareto t -opt_strategy lazy -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_feasible.spec -doe_spec ../grids/doe_two_levels_opt.csv -doe_algo latin_hypercube -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 151 test type: prediction, description: tests msle loss function MeanSquaredLogarithmicError and and sample weoghts +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test151 -mode predict -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nn_keras_loss msle -sw_coef 3 -sw_exp 10 -sw_int 0 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + +spec_fn +Running test 5 test type: prediction, description: basic dt_caret prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test5 -mode predict -resp y1 -feat x,p1,p2 -model dt_caret -save_model t -use_model f -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 19 test type: prediction, description: basic dt_sklearn prediction test using a model saved under a name specified through model_name option on new data with numeric labels +../../src/run_smlp.py -model_name "../models/test19_model" -out_dir ./ -pref Test19 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model f -use_model t -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 26 test type: prediction, description: basic dt_sklearn prediction test using a model saved under a name specified through model_name option on new data with numeric labels +../../src/run_smlp.py -model_name "../models/test26_model" -out_dir ./ -pref Test26 -mode predict -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -save_model f -use_model t -mrmr_pred 2 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 32 test type: unknown, description: test reusing saved model by using configuration file +../../src/run_smlp.py -model_name "../models/test20_model" -out_dir ./ -pref Test32 -config ../models/test20_model_rerun_model_config.json -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 39 test type: doe, description: doe test with four levels with latin_hypercube +../../src/run_smlp.py -doe_spec "../grids/doe_two_levels.csv" -out_dir ./ -pref Test39 -mode doe -doe_algo latin_hypercube -doe_prob_distr Exponential -doe_samples 30 -log_time f + + +Running test 45 test type: doe, description: doe test with four levels with fractional_factorial +../../src/run_smlp.py -doe_spec "../grids/doe_two_levels_real.csv" -out_dir ./ -pref Test45 -mode doe -doe_algo fractional_factorial -doe_resolution 5 -log_time f + + +Running test 53 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test53 -mode discretize -resp "PF,PF1" -discr_algo ordinals -discr_bins 6 -discr_labels f -discr_type integer -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + +spec_fn smlp_toy_num_resp_mult_y1_verify.spec +specs_path ../specs + +Running test 64 test type: verify, description: basic dt_sklearn assertion verification test on data with one numeric response +../../src/run_smlp.py -model_name "../models/test63_model" -out_dir ./ -pref Test64 -mode verify -resp y1 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model t -spec ../specs/smlp_toy_num_resp_mult_y1_verify.spec -asrt_names asrt1,asrt2 -asrt_exprs "x/2+y1>4.3;(y1+p2)/2<6" -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_noknobs_verify.spec +specs_path ../specs + +Running test 72 test type: verify, description: nn_keras verification test with re-using saved model_per_response trained model +../../src/run_smlp.py -model_name "../models/test71_model" -out_dir ./ -pref Test72 -mode verify -resp y1,y2 -feat x0,x1,x2 -model nn_keras -nnet_encoding nested -save_model f -use_model t -model_per_response t -spec ../specs/smlp_toy_num_resp_noknobs_verify.spec -asrt_names asrt1 -asrt_exprs "(y2**3+x2)/2<6" -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat + +spec_fn smlp_toy_num_resp_mult_alpha_asrt_verify.spec +specs_path ../specs + +Running test 87 test type: verify, description: tests global alpha constraints and assertions specified in spec file +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test87 -mode verify -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model t -mrmr_pred 2 -model_per_response f -spec ../specs/smlp_toy_num_resp_mult_alpha_asrt_verify.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 96 test type: optsyn, description: basic test for rf_sklearn in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test96 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model rf_caret -save_model f -use_model f -tree_encoding nested -compress_rules f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_basic.spec +specs_path ../specs + +Running test 113 test type: optimize, description: smlp toy basic test for mode optimize from SMLP manual +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test113 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x1,x2,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -mrmr_pred 0 -epsilon 0.05 -delta_rel 0.01 -save_model t -model_name test113_model -save_model_config t -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -spec ../specs/smlp_toy_basic.spec + +spec_fn smlp_toy_system_stable_constant_query.spec +specs_path ../specs + +Running test 119 test type: query, description: query test with knobs only where query is satisfiable without stability and fails with stability +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test119 -mode query -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_query.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_feasible.spec +specs_path ../specs + +Running test 125 test type: optsyn, description: optimized synthesis test with constant knob and no inputs where synthesis is feasible and optimization is performed +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test125 -mode optsyn -pareto t -opt_strategy lazy -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_feasible.spec -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system.spec +specs_path ../specs + +Running test 146 test type: optimize, description: optimization test with constant knob and no inputs where synthesis is feasible and optimization is performed +../../src/run_smlp.py -out_dir ./ -pref Test146 -mode optimize -pareto t -opt_strategy lazy -model poly_sklearn -resp y1,y2 -feat p1,p2,x1,x2 -save_model t -use_model f -mrmr_pred 0 -model_per_response t -split 1 -spec ../specs/smlp_toy_system.spec -doe_spec ../grids/explore_doe_two_levels.csv -doe_algo latin_hypercube -epsilon 0.99999999 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 152 test type: prediction, description: tests the huber loss function Huber and sample weights +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test152 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nn_keras_loss huber -sw_coef 8 -sw_exp 5 -sw_int 0.5 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 160 test type: prediction, description: tests nn keras tuner bayesian +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test160 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nn_keras_loss mape -nn_keras_metrics msle -nn_keras_tuner bayesian -nn_keras_layers_grid "2,3" -nn_keras_losses_grid "mse,mae,huber" -model_per_response f -sw_coef 8 -sw_exp 5 -sw_int 0.5 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + +Running test 8 test type: prediction, description: basic nn_keras prediction test on labeled and new data with numeric labels and two responses +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test8 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -nn_keras_epochs 20 -nn_keras_seq_api f -log_time f -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 14 test type: train, description: EV-SI real life poly_sklearn prediction test on labeled and new data with numeric labels +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test14 -mode train -resp y1,y2 -feat x1,x2,p1,p2 -model poly_sklearn -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 21 test type: prediction, description: test for illegal symbols in column names +../../src/run_smlp.py -data "../data/smlp_toy_num_metasymbol_mult_reg.csv" -out_dir ./ -pref Test21 -mode predict -resp "PF ,|PF |" -model poly_sklearn -save_model t -use_model f -model_name test22_model -pred_plots t -resp_plots t -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -new_dat "../data/smlp_toy_num_metasymbol_mult_reg_pred_labeled.csv" + + +Running test 33 test type: unknown, description: testing -config option with subgroups mode +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test33 -config ../models/Test31_smlp_toy_num_resp_mult_args_config.json + + +Running test 40 test type: doe, description: doe test with four levels with latin_hypercube_space_filling +../../src/run_smlp.py -doe_spec "../grids/doe_two_levels.csv" -out_dir ./ -pref Test40 -mode doe -doe_algo latin_hypercube_sf -doe_samples 20 -log_time f + + +Running test 48 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test48 -mode discretize -resp "PF,PF1" -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + + +Running test 56 test type: discretization, description: tests discretization options +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test56 -mode discretize -resp "PF,PF1" -discr_algo ranks -discr_bins 6 -discr_labels f -discr_type object -data_scaler none -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + +spec_fn smlp_toy_num_resp_mult_y2_verify.spec +specs_path ../specs + +Running test 69 test type: verify, description: nn_keras verification test with model_per_response training +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test69 -mode verify -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model t -use_model f -model_name test69_model -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs "(y2**3+p2)/2<6" -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat + +spec_fn smlp_toy_num_resp_mult.spec +specs_path ../specs + +Running test 85 test type: optimize, description: tests alpha and eta constraints specified in command line in optimization mode +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test85 -mode optimize -pareto f -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -spec ../specs/smlp_toy_num_resp_mult.spec -data_scaler min_max -objv_names obj1,objv2 -objv_exprs "(y1+y2)/2;y1" -alpha "p2<5 and x==10 and x<12" -eta "p1==4" -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn_vacuous.spec +specs_path ../specs + +Running test 90 test type: optsyn, description: test to detect contradictory constraints in optsyn mode +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test90 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -tree_encoding nested -compress_rules f -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn_vacuous.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 98 test type: optsyn, description: basic test for et_caret in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test98 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_caret -save_model f -use_model f -tree_encoding nested -compress_rules f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system.spec +specs_path ../specs + +Running test 116 test type: certify, description: basic test in certify mode when system is specified and is used as the model; p2 rel-rad needs to be 0 or very close to it the witness to first query to be stable +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test116 -mode certify -resp y1,y2 -feat x1,x2,p1,p2 -model system -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_system.spec -quer_names query1,query2 -quer_exprs "y1>0;y2<=0" -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_configuration_verify.spec +specs_path ../specs + +Running test 129 test type: verify, description: verification example with demonstrating all basic result scenarious for assertions +../../src/run_smlp.py -data "../data/smlp_toy_ctg_num_resp.csv" -out_dir ./ -pref Test129 -mode verify -resp y1,y2 -feat x,p1,p2 -model poly_sklearn -save_model f -use_model f -model_per_response f -spec ../specs/smlp_toy_configuration_verify.spec -asrt_names assert_stable_config,assert_grid_conflict,assert_unstable_config,assert_infeasible -asrt_exprs "y2<=90;y1>=9;y1>=(-10);y1>20" -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 150 test type: prediction, description: tests the mape loss function MeanAbsolutePercentageError and sample weights +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test150 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nn_keras_loss mape -sw_coef 0.8 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 158 test type: prediction, description: tests the mape loss function and sample weights with model_per_response t +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test158 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nn_keras_loss mape -model_per_response t -sw_coef 8 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics rmse -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 167 test type: optsyn, description: basic flat tree encoding test with model_per_response t for rf_sklearn in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test167 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 4 -rf_sklearn_n_estimators 3 -tree_encoding flat -compress_rules t -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_feasible.spec +specs_path ../specs + +Running test 121 test type: synthesize, description: synthesis test with constant knob and no inputs where synthesis is feasible +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test121 -mode synthesize -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_feasible.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_verify.spec +specs_path ../specs + +Running test 126 test type: verify, description: verification example with knobs only and fictitious inputs that have no effect where proparty is valid without stability and fails with stability +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test126 -mode verify -model system -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_verify.spec -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_query.spec +specs_path ../specs + +Running test 143 test type: query, description: basic test for compress_rules for et_sklearn in mode query +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test143 -mode query -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_bootstrap f -tree_encoding nested -compress_rules t -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_query.spec -epsilon 0.1 -delta_rel 0.05 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 159 test type: prediction, description: tests the msle loss function and sample weights with model_per_response t +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test159 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -nn_keras_loss msle -model_per_response t -sw_coef 4 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics mae,cosine -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 168 test type: optimize, description: basic test for rf_caret with flat tree_encoding and modelper_response in model exploration mode optimize +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test168 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model rf_caret -model_per_response t -compress_rules t -tree_encoding flat -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 174 test type: optsyn, description: basic layered nn_keras encoding test with model_per_response f nn_keras_seq_api f for nn_keras in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test174 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response f -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 183 test type: optimize, description: basic flat tree encoding test for dt_sklearn multi objective pareto optimization when features and responses are not scaled modifies test 164 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test183 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding flat -scale_resp f -scale_feat f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 189 test type: optsyn, description: basic branched tree encoding test with model_per_response f for rf_sklearn in model exploration mode optsyn adapts test 166 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test189 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 4 -rf_sklearn_n_estimators 3 -tree_encoding branched -compress_rules t -save_model f -use_model f -compress_rules t -mrmr_pred 2 -model_per_response f -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 192 test type: optimize, description: basic test for et_sklearn with branched tree_encoding and model_per_response f in model exploration mode optimize adapts test 170 !!!!!!!!! in this test z3 result differs from mathsat and yices results (the latter two give sma results +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test192 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -et_sklearn_n_estimators 100 -et_sklearn_bootstrap f -tree_encoding branched -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 198 test type: optimize, description: basic branched tree encoding test for dt_sklearn multi objective pareto optimization when features and responses are not scaled modifies test 164 and test 183 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test198 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -scale_resp f -scale_feat f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat + +spec_fn smlp_toy_num_resp_mult_no_input_beta.spec +specs_path ../specs + +Running test 201 test type: optimize, description: basic dt_sklearn single objective optimization with the eager algorithm when there are no inputs and there are beta constraints +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test201 -mode optimize -pareto t -opt_strategy eager -resp y1,y2 -feat p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_no_input_beta.spec -data_scaler min_max -objv_names obj1 -objv_exprs "(y1+y2)/2" -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_feasible.spec +specs_path ../specs + +Running test 204 test type: optimize, description: optimization test with eager strategy and with constant knob and no inputs where synthesis is feasible and optimization is performed adapts test 123 +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test204 -mode optimize -pareto t -opt_strategy eager -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_feasible.spec -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + smlp_toy_num_resp_mult_verify.spec +specs_path ../specs + +Running test 157 test type: verify, description: basic nn_keras assertion verification test that uses keras tuner with sequrntial models for model training; adapts test 155 by consdering multiple responses +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test157 -mode verify -resp y1,y2 -feat x,p1,p2 --model nn_keras -nnet_encoding nested -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -nn_keras_tuner hyperband -nn_keras_layers_grid "2,2;3" -save_model_config f -spec ../specs/smlp_toy_num_resp_mult_verify.spec -asrt_names asrt1 -asrt_exprs "2*y2>1" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics rmse,logcosh + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 166 test type: optsyn, description: basic flat tree encoding test with model_per_response f for rf_sklearn in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test166 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 4 -rf_sklearn_n_estimators 3 -tree_encoding flat -compress_rules t -save_model f -use_model f -compress_rules t -mrmr_pred 2 -model_per_response f -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 170 test type: optimize, description: basic test for et_sklearn with flat tree_encoding and model_per_response f in model exploration mode optimize +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test170 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -rf_sklearn_n_estimators 3 -et_sklearn_bootstrap f -tree_encoding flat -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 176 test type: optsyn, description: basic layered nn_keras encoding test with model_per_response t nn_keras_seq_api f for nn_keras in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test176 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 179 test type: optsyn, description: basic layered nn_keras encoding test with model_per_response f nn_keras_seq_api f for nn_keras in model exploration mode optsyn when resposes are not scaled adapts test 174 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test179 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response f -scale_resp f -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 182 test type: optimize, description: basic flat tree encoding test for dt_sklearn multi objective pareto optimization when responses are not scaled modifies test 164 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test182 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding flat -scale_resp f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"" + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 190 test type: optimize, description: basic test for rf_caret with branched tree_encoding and modelper_response in model exploration mode optimize adapts test 168 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test190 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model rf_caret -model_per_response t -compress_rules t -tree_encoding branched -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 195 test type: optimize, description: basic test for et_sklearn with branched tree_encoding and model_per_response f in model exploration mode optimize adapts test 192 by setting n_estimators 3 and then discrepancy between z3 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test195 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -et_sklearn_n_estimators 3 -et_sklearn_bootstrap f -tree_encoding branched -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 199 test type: optimize, description: test to demonstrate that in pareto optimization and optsyn modes with multiple objectives when beta constraints are not present SMLP results are not consistent when different solvers are used; this is due to fact that when a subset of objectoves are exemined in pareto algo +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test199 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -et_sklearn_n_estimators 100 -et_sklearn_bootstrap f -tree_encoding branched -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_fail.spec +specs_path ../specs + +Running test 203 test type: optimize, description: optimization test with eager strategy and with constant knob and no inputs where synthesis is not feasible because the assertion is not feasible but beta constraint is feasible therefore optimization is performed adapts test 122 +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test203 -mode optimize -pareto f -opt_strategy eager -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_fail.spec -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_feasible.spec +specs_path ../specs + +Running test 206 test type: optsyn, description: optimized synthesis test with eager strategy and with constant knob and no inputs where synthesis is feasible and optimization is performed adapts test 125 +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test206 -mode optsyn -pareto t -opt_strategy eager -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_feasible.spec -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 171 test type: optimize, description: basic test for et_caret with flat tree_encoding in model exploration mode optimize +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test171 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_caret -tree_encoding flat -model_per_response t -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 177 test type: optsyn, description: basic layered nn_keras encoding test with model_per_response t nn_keras_seq_api t for nn_keras in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test177 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 180 test type: optsyn, description: basic layered nn_keras encoding test with model_per_response f nn_keras_seq_api t for nn_keras in model exploration mode optsyn when features and responses are not scaled adapts test 175 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test180 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response f -scale_feat f -scale_resp f -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 188 test type: optsyn, description: basic branched tree encoding test for dt_caretin model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test188 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_caret -tree_encoding branched -save_model f -use_model f -compress_rules f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 193 test type: optimize, description: basic test for et_caret with branched tree_encoding in model exploration mode optimize adapts test 171 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test193 -mode optimize -resp y1,y2 -feat x,p1,p2 -model et_caret -tree_encoding branched -model_per_response t -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 197 test type: optimize, description: basic branched tree encoding test for dt_sklearn multi objective pareto optimization when responses are not scaled modifies test 164 and test 182 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test197 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -scale_resp f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"" + +spec_fn smlp_toy_num_resp_mult_no_input.spec +specs_path ../specs + +Running test 202 test type: optimize, description: basic dt_sklearn single objective optimization with the eager algorithm when there are no inputs and no beta constraints +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test202 -mode optimize -pareto t -opt_strategy eager -resp y1,y2 -feat p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -spec ../specs/smlp_toy_num_resp_mult_no_input.spec -data_scaler min_max -objv_names obj1 -objv_exprs "(y1+y2)/2" -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_system_stable_constant_synth_feasible.spec +specs_path ../specs + +Running test 205 test type: optimize, description: optimization test with eager strategy and with constant knob and no inputs where synthesis is feasible and optimization is performed adapts test 145 +../../src/run_smlp.py -out_dir ./ -pref Test205 -mode optimize -pareto t -opt_strategy eager -model system -resp y1,y2 -feat p1,p2 -save_model f -use_model f -mrmr_pred 0 -model_per_response t -spec ../specs/smlp_toy_system_stable_constant_synth_feasible.spec -doe_spec ../grids/doe_two_levels_opt.csv -doe_algo latin_hypercube -epsilon 0.00000001 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +Running test 216 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test216 -mode correlate -resp y1,y2 -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method correlation -mrmr_pred 0 -plots f -seed 10 -log_time f + + +Running test 218 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test218 -mode correlate -resp "PF,PF1" -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type ordered -data_scaler none -cont_est pearson,spearman,kendall -mi_method correlation -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + + +Running test 222 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test222 -mode correlate -resp "PF,PF1" -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method adjusted -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + + +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test147 -mode predict -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -sw_coef 0.8 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 153 test type: prediction, description: tests the logcosh loss function LogCosh and sample weights +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test153 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -nn_keras_loss logcosh -sw_coef 4 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics mse -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + + +Running test 161 test type: prediction, description: tests nn keras tuner bayesian +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test161 -mode predict -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -nn_keras_loss msle -nn_keras_metrics mape,logcosh -nn_keras_tuner random -nn_keras_lrates_grid "0.01,0.001" -nn_keras_batches_grid "32,64" -model_per_response f -sw_coef 4 -sw_exp 5 -sw_int 0.5 -new_dat "../data/smlp_toy_num_resp_mult_pred_labeled.csv" + +spec_fn smlp_toy_num_resp_mult_y2_verify.spec +specs_path ../specs + +Running test 172 test type: verify, description: basic test for nn_keras flat encoding for functional api +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test172 -mode verify -resp y2 -feat x,p1,p2 -model nn_keras -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nnet_encoding layered -nn_keras_tuner hyperband -nn_keras_layers_grid "2,2;3,3,3" -save_model_config f -spec ../specs/smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs "2*y2>1" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"" + + + +Running test 217 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test217 -mode correlate -resp "PF,PF1" -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type category -data_scaler none -cont_est pearson,spearman,kendall -mi_method correlation -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + + +Running test 220 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test220 -mode correlate -resp "PF,PF1" -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method normalized -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + + +Running test 224 test type: correlate, description: basic test for correlate mode and tests the Shannon mutual information +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test224 -mode correlate -resp y1,y2 -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method shannon -mrmr_pred 0 -plots f -seed 10 -log_time f + + +spec_fn smlp_toy_num_resp_mult_y2_verify.spec +specs_path ../specs + +Running test 154 test type: verify, description: basic nn_keras assertion verification test that uses keras tuner for functional model training +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test154 -mode verify -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nn_keras_tuner hyperband -nn_keras_layers_grid "2,2;3,3,3" -save_model_config f -spec ../specs/smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs "2*y2>1" -sw_coef 4 -sw_exp 5 -sw_int 0.5 + + +Running test 225 test type: correlate, description: basic test for correlate mode and tests the adjusted mutual information +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test225 -mode correlate -resp y1,y2 -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method adjusted -mrmr_pred 0 -plots f -seed 10 -log_time f + + + +Running test 215 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test215 -mode correlate -resp "PF,PF1" -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method correlation -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + + +Running test 219 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test219 -mode correlate -resp "PF,PF1" -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type integer -data_scaler none -cont_est pearson,spearman,kendall -mi_method correlation -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + + +Running test 223 test type: correlate, description: basic test for correlate mode and tests the normalized mutual information +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test223 -mode correlate -resp y1,y2 -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method normalized -mrmr_pred 0 -plots f -seed 10 -log_time f + + +Running test 226 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test226 -mode correlate -resp y1,y2 -discr_algo uniform -discret_num t -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method correlation -mrmr_pred 0 -plots f -seed 10 -log_time f + +../specs + +Running test 156 test type: verify, description: basic nn_keras assertion verification test that uses keras tuner for functional model training; adapts test 154 by consdering multiple responses +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test156 -mode verify -resp y1,y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api f -nn_keras_tuner hyperband -nn_keras_layers_grid "2,2;3" -save_model_config f -spec ../specs/smlp_toy_num_resp_mult_verify.spec -asrt_names asrt1 -asrt_exprs "2*y2>1" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics rmse + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 165 test type: optsyn, description: basic flat tree encoding test for dt_caretin model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test165 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_caret -tree_encoding flat -save_model f -use_model f -compress_rules f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_y2_verify.spec +specs_path ../specs + +Running test 173 test type: verify, description: basic test for nn_keras flat encoding for sequential api +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test173 -mode verify -resp y2 -feat x,p1,p2 -model nn_keras -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -nn_keras_tuner hyperband -nn_keras_layers_grid "2,2;3,3,3" -save_model_config f -spec ../specs/smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs "2*y2>1" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics mae -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat"" + + +Running test 221 test type: correlate, description: basic test for correlate mode +../../src/run_smlp.py -data "../data/smlp_toy_mult_discr.csv" -out_dir ./ -pref Test221 -mode correlate -resp "PF,PF1" -discr_algo uniform -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method shannon -mrmr_pred 0 -plots f -seed 10 -log_time f -pos_val fail -neg_val pass + + +Running test 227 test type: correlate, description: basic test for correlate mode and tests the normalized mutual information +../../src/run_smlp.py -data "../data/smlp_toy_basic.csv" -out_dir ./ -pref Test227 -mode correlate -resp y1,y2 -discr_algo uniform -discret_num t -discr_bins 6 -discr_labels t -discr_type object -data_scaler none -cont_est pearson,spearman,kendall -mi_method normalized -mrmr_pred 0 -plots f -seed 10 -log_time f + + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 164 test type: optimize, description: basic flat tree encoding test for dt_sklearn multi objective pareto optimization +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test164 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding flat -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 169 test type: optimize, description: basic test for et_sklearn with flat tree_encoding and model_per_response t in model exploration mode optimize +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test169 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -rf_sklearn_n_estimators 3 -et_sklearn_bootstrap f -tree_encoding flat -model_per_response t -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 175 test type: optsyn, description: basic layered nn_keras encoding test with model_per_response f nn_keras_seq_api t for nn_keras in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test175 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response f -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 178 test type: optsyn, description: basic layered nn_keras encoding test with model_per_response t nn_keras_seq_api t for nn_keras in model exploration mode optsyn when features are not scaled adapts test 177 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test178 -mode optsyn -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model nn_keras -nn_keras_epochs 20 -nn_keras_seq_api t -nnet_encoding layered -save_model f -use_model f -mrmr_pred 2 -model_per_response t -scale_feat f -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 181 test type: optimize, description: basic flat tree encoding test for dt_sklearn multi objective pareto optimization when features are not scaled modifies test 164 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test181 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding flat -scale_feat f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 187 test type: optimize, description: basic branched tree encoding test for dt_sklearn multi objective pareto optimization adapts test 164 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test187 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 191 test type: optimize, description: basic test for et_sklearn with branched tree_encoding and model_per_response t in model exploration mode optimize adapts test 169 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test191 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -et_sklearn_n_estimators 3 -et_sklearn_bootstrap t -tree_encoding branched -model_per_response t -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 194 test type: optsyn, description: basic branched tree encoding test with model_per_response t for rf_sklearn in model exploration mode optsyn +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test194 -mode optsyn -resp y1,y2 -feat x,p1,p2 -model rf_sklearn -rf_sklearn_max_depth 4 -rf_sklearn_n_estimators 3 -tree_encoding branched -compress_rules t -save_model f -use_model f -mrmr_pred 2 -model_per_response t -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0.05 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_free_inps_beta_objv.spec +specs_path ../specs + +Running test 196 test type: optimize, description: basic branched tree encoding test for dt_sklearn multi objective pareto optimization when features are not scaled modifies test 164 and test 181 +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test196 -mode optimize -pareto t -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model dt_sklearn -dt_sklearn_max_depth 15 -compress_rules f -tree_encoding branched -scale_feat f -spec ../specs/smlp_toy_num_resp_mult_free_inps_beta_objv.spec -data_scaler min_max -epsilon 0.05 -delta_rel 0.01 -save_model_config f -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + +spec_fn smlp_toy_num_resp_mult_optsyn.spec +specs_path ../specs + +Running test 200 test type: optimize, description: basic test for et_sklearn with branched tree_encoding and model_per_response f in model exploration mode optimize adapts test 170 !!!!!!!!! in this test z3 result differs from mathsat and yices results (the latter two give sma results +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test200 -mode optimize -opt_strategy lazy -resp y1,y2 -feat x,p1,p2 -model et_sklearn -et_sklearn_max_depth 2 -et_sklearn_n_estimators 100 -et_sklearn_bootstrap f -tree_encoding branched -model_per_response f -compress_rules t -save_model f -use_model f -mrmr_pred 2 -spec ../specs/smlp_toy_num_resp_mult_optsyn.spec -epsilon 0.1 -delta_rel 0 -solver_path ../../../external/mathsat-5.6.8-linux-x86_64-reentrant/bin/mathsat -plots f -pred_plots f -resp_plots f -seed 10 -log_time f + + +spec_fn smlp_toy_num_resp_mult_y2_verify.spec +specs_path ../specs + +Running test 155 test type: verify, description: basic nn_keras assertion verification test that uses keras tuner with sequrntial models for model training +../../src/run_smlp.py -data "../data/smlp_toy_num_resp_mult.csv" -out_dir ./ -pref Test155 -mode verify -resp y2 -feat x,p1,p2 -model nn_keras -nnet_encoding nested -mrmr_pred 0 -plots f -pred_plots f -resp_plots f -seed 10 -log_time f -nn_keras_epochs 20 -nn_keras_seq_api t -nn_keras_tuner hyperband -nn_keras_layers_grid "2,2;3,3,3" -save_model_config f -spec ../specs/smlp_toy_num_resp_mult_y2_verify.spec -asrt_names asrt1 -asrt_exprs "2*y2>1" -sw_coef 4 -sw_exp 5 -sw_int 0.5 -nn_keras_metrics mae + +Initiating 7 worker... +comparing Test1_smlp_toy_num_resp_mult_y1_dt_caret_tree_rules.txt to master +Passed! +comparing Test1_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test1_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test1_smlp_toy_num_resp_mult_eval_dt_caret_labeled-col-y1.png does not exist +File master Test1_smlp_toy_num_resp_mult_eval_dt_caret_test-col-y1.png does not exist +File master Test1_smlp_toy_num_resp_mult_eval_dt_caret_training-col-y1.png does not exist +comparing Test1_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test1_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test1_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test1_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test1_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +File master Test1_smlp_toy_num_resp_mult_resp-distr.png does not exist +comparing Test1_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test1_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +comparing Test1_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test1_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test2_smlp_toy_num_resp_mult_rf_sklearn_y1_tree_rules.txt does not exist +comparing Test2_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +File master Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_eval_rf_sklearn_labeled-col-y1.png does not exist +File master Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_eval_rf_sklearn_new-col-y1.png does not exist +File master Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_eval_rf_sklearn_test-col-y1.png does not exist +File master Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_eval_rf_sklearn_training-col-y1.png does not exist +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +File master Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_resp-distr.png does not exist +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test2_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled.txt to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_poly_sklearn_formula.txt to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled_labeled_predictions_summary.csv to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled_missing_values_dict.json to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled_new_predictions_summary.csv to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled_test_prediction_precisions.csv to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled_test_predictions_summary.csv to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled_training_prediction_precisions.csv to master +Passed! +comparing Test3_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_unlabeled_training_predictions_summary.csv to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_model_gen.json to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +File master Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test4_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_y1_dt_caret_tree_rules.txt to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test6_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_rf_sklearn_tree_rules.txt to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test7_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_model_gen.json to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +File master Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y1_mse.png does not exist +File master Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_args_config.json to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test9_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing test20_model_data_bounds.json to master +Passed! +comparing test20_model_dt_sklearn_tree_rules.txt to master +Passed! +comparing test20_model_model_features_dict.json to master +Passed! +comparing test20_model_model_levels_dict.json to master +Passed! +comparing test20_model_rerun_model_config.json to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_et_sklearn_tree_rules.txt to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test10_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_poly_sklearn_formula.txt to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test12_smlp_toy_basic.txt to master +Passed! +comparing Test12_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test12_smlp_toy_basic_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test12_smlp_toy_basic_labeled_prediction_precisions.csv to master +Passed! +comparing Test12_smlp_toy_basic_labeled_predictions_summary.csv to master +Passed! +comparing Test12_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test12_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test12_smlp_toy_basic_test_prediction_precisions.csv to master +Passed! +comparing Test12_smlp_toy_basic_test_predictions_summary.csv to master +Passed! +comparing Test12_smlp_toy_basic_training_prediction_precisions.csv to master +Passed! +comparing Test12_smlp_toy_basic_training_predictions_summary.csv to master +Passed! +comparing Test13_smlp_toy_basic.txt to master +Passed! +comparing Test13_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test13_smlp_toy_basic_labeled_prediction_precisions.csv to master +Passed! +comparing Test13_smlp_toy_basic_labeled_predictions_summary.csv to master +Passed! +comparing Test13_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test13_smlp_toy_basic_model_gen.json to master +Passed! +comparing Test13_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test13_smlp_toy_basic_test_prediction_precisions.csv to master +Passed! +comparing Test13_smlp_toy_basic_test_predictions_summary.csv to master +Passed! +File master Test13_smlp_toy_basic_train-reg_y1_mse.png does not exist +File master Test13_smlp_toy_basic_train-reg_y2_mse.png does not exist +comparing Test13_smlp_toy_basic_training_prediction_precisions.csv to master +Passed! +comparing Test13_smlp_toy_basic_training_predictions_summary.csv to master +Passed! +comparing Test14_smlp_toy_basic.txt to master +Passed! +comparing Test14_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test14_smlp_toy_basic_labeled_prediction_precisions.csv to master +Passed! +comparing Test14_smlp_toy_basic_labeled_predictions_summary.csv to master +Passed! +comparing Test14_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test14_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test14_smlp_toy_basic_poly_sklearn_formula.txt to master +Passed! +comparing Test14_smlp_toy_basic_test_prediction_precisions.csv to master +Passed! +comparing Test14_smlp_toy_basic_test_predictions_summary.csv to master +Passed! +comparing Test14_smlp_toy_basic_training_prediction_precisions.csv to master +Passed! +comparing Test14_smlp_toy_basic_training_predictions_summary.csv to master +Passed! +comparing Test15_Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test15_Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test15_Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test15_Test5_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +comparing Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +File new Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File new Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +comparing Test17_Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test17_Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test17_Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test17_Test11_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test18_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing test19_model_data_bounds.json to master +Passed! +comparing test19_model_dt_sklearn_tree_rules.txt to master +Passed! +comparing test19_model_model_features_dict.json to master +Passed! +comparing test19_model_model_levels_dict.json to master +Passed! +comparing test19_model_rerun_model_config.json to master +Passed! +comparing Test19_test19_model_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test19_test19_model_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test19_test19_model_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test19_test19_model_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test20_test20_model_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test20_test20_model_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test20_test20_model_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test20_test20_model_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled.txt to master +Passed! +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_labeled-col-PF .png does not exist +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_labeled-col-|PF |.png does not exist +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_new-col-PF .png does not exist +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_new-col-|PF |.png does not exist +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_test-col-PF .png does not exist +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_test-col-|PF |.png does not exist +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_training-col-PF .png does not exist +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_training-col-|PF |.png does not exist +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_new_predictions_summary.csv to master +Passed! +File master Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_resp-distr.png does not exist +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test21_smlp_toy_num_metasymbol_mult_reg_smlp_toy_num_metasymbol_mult_reg_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing test22_model_data_bounds.json to master +Passed! +comparing test22_model_model_features_dict.json to master +Passed! +comparing test22_model_model_levels_dict.json to master +Passed! +comparing test22_model_poly_sklearn_formula.txt to master +Passed! +comparing test22_model_rerun_model_config.json to master +Passed! +comparing Test22_test22_model_smlp_toy_num_metasymbol_mult_reg_pred_labeled.txt to master +Passed! +File master Test22_test22_model_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_new-col-PF .png does not exist +File master Test22_test22_model_smlp_toy_num_metasymbol_mult_reg_pred_labeled_eval_poly_sklearn_new-col-|PF |.png does not exist +comparing Test22_test22_model_smlp_toy_num_metasymbol_mult_reg_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test22_test22_model_smlp_toy_num_metasymbol_mult_reg_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test22_test22_model_smlp_toy_num_metasymbol_mult_reg_pred_labeled_new_predictions_summary.csv to master +Passed! +File master Test22_test22_model_smlp_toy_num_metasymbol_mult_reg_pred_labeled_resp-distr.png does not exist +File master test24_model_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test23_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing test24_model_data_bounds.json to master +Passed! +File master test24_model_dt_sklearn_y2_tree_rules.txt does not exist +comparing test24_model_model_features_dict.json to master +Passed! +comparing test24_model_model_levels_dict.json to master +Passed! +comparing test24_model_rerun_model_config.json to master +Passed! +comparing Test24_test24_model_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test24_test24_model_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test24_test24_model_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test24_test24_model_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing test26_model_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test25_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing test26_model_data_bounds.json to master +Passed! +comparing test26_model_model_features_dict.json to master +Passed! +comparing test26_model_model_levels_dict.json to master +Passed! +comparing test26_model_rerun_model_config.json to master +Passed! +comparing Test26_test26_model_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test26_test26_model_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test26_test26_model_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test26_test26_model_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_model_gen.json to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +File master Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test27_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_model_gen.json to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv to master +Passed! +File master Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv to master +Passed! +comparing Test28_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv to master +Passed! +comparing Test29_smlp_toy_cls_metasymbol_colnames_mult.txt to master +comparing Test29_smlp_toy_cls_metasymbol_colnames_mult_features_ranking.csv to master +comparing Test29_smlp_toy_cls_metasymbol_colnames_mult_missing_values_dict.json to master +Passed! +comparing Test29_smlp_toy_cls_metasymbol_colnames_mult_ranking_resp_feat.csv to master +comparing Test30_smlp_toy_num_resp_mult.txt to master +comparing Test30_smlp_toy_num_resp_mult_features_ranking.csv to master +comparing Test30_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test30_smlp_toy_num_resp_mult_ranking_resp_feat.csv to master +comparing Test31_smlp_toy_num_resp_mult.txt to master +comparing Test31_smlp_toy_num_resp_mult_args_config.json to master +Passed! +comparing Test31_smlp_toy_num_resp_mult_features_ranking.csv to master +comparing Test31_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test31_smlp_toy_num_resp_mult_ranking_resp_feat.csv to master +comparing Test32_test20_model_smlp_toy_num_resp_mult_pred_labeled.txt to master +Passed! +comparing Test32_test20_model_smlp_toy_num_resp_mult_pred_labeled_args_config.json to master +Passed! +comparing Test32_test20_model_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json to master +Passed! +comparing Test32_test20_model_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv to master +Passed! +comparing Test32_test20_model_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv to master +Passed! +comparing Test33_smlp_toy_num_resp_mult.txt to master +comparing Test33_smlp_toy_num_resp_mult_features_ranking.csv to master +comparing Test33_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test33_smlp_toy_num_resp_mult_ranking_resp_feat.csv to master +comparing Test34_doe_four_levels_real.txt to master +Passed! +comparing Test34_doe_four_levels_real_doe.csv to master +Passed! +comparing Test35_doe_four_levels_real.txt to master +Passed! +comparing Test35_doe_four_levels_real_doe.csv to master +Passed! +comparing Test36_doe_four_levels_real.txt to master +Passed! +comparing Test36_doe_four_levels_real_doe.csv to master +Passed! +comparing Test37_doe_three_levels_real_nan.txt to master +Passed! +comparing Test37_doe_three_levels_real_nan_doe.csv to master +Passed! +comparing Test38_doe_two_levels.txt to master +Passed! +comparing Test38_doe_two_levels_doe.csv to master +Passed! +comparing Test39_doe_two_levels.txt to master +Passed! +comparing Test39_doe_two_levels_doe.csv to master +Passed! +comparing Test40_doe_two_levels.txt to master +Passed! +comparing Test40_doe_two_levels_doe.csv to master +Passed! +comparing Test41_doe_two_levels.txt to master +Passed! +comparing Test42_doe_two_levels.txt to master +Passed! +comparing Test43_doe_two_levels.txt to master +Passed! +comparing Test43_doe_two_levels_doe.csv to master +Passed! +comparing Test44_doe_two_levels.txt to master +Passed! +comparing Test44_doe_two_levels_doe.csv to master +Passed! +comparing Test45_doe_two_levels_real.txt to master +Passed! +comparing Test45_doe_two_levels_real_doe.csv to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult.txt to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_missing_values_dict.json to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_new_prediction_precisions.csv to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_new_predictions_summary.csv to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_test_prediction_precisions.csv to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_test_predictions_summary.csv to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_training_prediction_precisions.csv to master +Passed! +comparing Test46_smlp_toy_pf_mult_smlp_toy_pf_mult_training_predictions_summary.csv to master +Passed! +comparing test47_model_data_bounds.json to master +Passed! +comparing test47_model_model_features_dict.json to master +Passed! +comparing test47_model_model_levels_dict.json to master +Passed! +comparing test47_model_poly_sklearn_formula.txt to master +Passed! +comparing Test47_test47_model_smlp_toy_pf_mult.txt to master +Passed! +comparing Test47_test47_model_smlp_toy_pf_mult_missing_values_dict.json to master +Passed! +comparing Test47_test47_model_smlp_toy_pf_mult_new_prediction_precisions.csv to master +Passed! +comparing Test47_test47_model_smlp_toy_pf_mult_new_predictions_summary.csv to master +Passed! +comparing Test48_smlp_toy_mult_discr.txt to master +Passed! +comparing Test48_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +comparing Test49_smlp_toy_mult_discr.txt to master +Passed! +comparing Test49_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +comparing Test50_smlp_toy_mult_discr.txt to master +Passed! +comparing Test50_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +comparing Test51_smlp_toy_mult_discr.txt to master +Passed! +comparing Test51_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +comparing Test52_smlp_toy_mult_discr.txt to master +Passed! +comparing Test52_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +comparing Test53_smlp_toy_mult_discr.txt to master +Passed! +comparing Test53_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +comparing Test54_smlp_toy_mult_discr.txt to master +Passed! +comparing Test54_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +comparing Test55_smlp_toy_mult_discr.txt to master +Passed! +comparing Test55_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +comparing Test56_smlp_toy_mult_discr.txt to master +Passed! +comparing Test56_smlp_toy_mult_discr_missing_values_dict.json to master +Passed! +Test 57 Failed: +Error in Build stage: +Data file does not exist +comparing Test58_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test58_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test58_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test58_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test58_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test58_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test59_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_model_gen.json to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test59_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test59_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +comparing Test59_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test59_smlp_toy_num_resp_mult_verify_results.json to master +Passed! +File master Test59_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test59_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +comparing Test60_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_model_gen.json to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test60_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test60_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +comparing Test60_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test60_smlp_toy_num_resp_mult_verify_results.json to master +Passed! +File master Test60_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test60_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +Test 61 Failed: +Error in Build stage: +Data file does not exist +Test 62 Failed: +Error in Build stage: +Data file does not exist +File master test63_model_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test63_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test63_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test63_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test63_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test63_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test63_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test63_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test63_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test63_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test63_smlp_toy_num_resp_mult_verify_results.json to master +Passed! +comparing test63_model_data_bounds.json to master +Passed! +comparing test63_model_model_features_dict.json to master +Passed! +comparing test63_model_model_levels_dict.json to master +Passed! +comparing test63_model_rerun_model_config.json to master +Passed! +File master test63_model_y1_smlp_full_model_term.json does not exist +File master test63_model_y1_smlp_model_term.json does not exist +comparing Test64_test63_model.txt to master +Passed! +File master Test64_test63_model_trace.csv does not exist +comparing Test64_test63_model_verify_results.json to master +Passed! +Test 65 Failed: +Error in Build stage: +Data file does not exist +comparing Test66_test65_model.txt to master +File new Test66_test65_model_verify_results.json does not exist +Test 67 Failed: +Error in Build stage: +Data file does not exist +comparing Test68_test67_model.txt to master +File new Test68_test67_model_verify_results.json does not exist +comparing Test69_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test69_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test69_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test69_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test69_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test69_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test69_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test69_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +comparing Test69_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test69_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test69_smlp_toy_num_resp_mult_verify_results.json to master +comparing test69_model_data_bounds.json to master +Passed! +comparing test69_model_model_features_dict.json to master +Passed! +comparing test69_model_model_gen.json to master +Passed! +comparing test69_model_model_levels_dict.json to master +Passed! +comparing test69_model_rerun_model_config.json to master +Passed! +File master test69_model_y2_smlp_full_model_term.json does not exist +File master test69_model_y2_smlp_model_term.json does not exist +comparing Test70_test69_model.txt to master +File master Test70_test69_model_trace.csv does not exist +File new Test70_test69_model_verify_results.json does not exist +Test 71 Failed: +Error in Build stage: +Data file does not exist +comparing Test72_test71_model.txt to master +File new Test72_test71_model_verify_results.json does not exist +Test 73 Failed: +Error in Build stage: +Data file does not exist +Error in Build stage: +New data file does not exist +Test 74 Failed: +Error in Build stage: +New data file does not exist +Test 75 Failed: +Error in Build stage: +New data file does not exist +Test 76 Failed: +Error in Build stage: +Data file does not exist +comparing Test77_test76_model.txt to master +File new Test77_test76_model_verify_results.json does not exist +Test 78 Failed: +Error in Build stage: +Data file does not exist +comparing Test79_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test79_smlp_toy_num_resp_mult_dt_sklearn_y1_tree_rules.txt does not exist +File master Test79_smlp_toy_num_resp_mult_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test79_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_query_results.json to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test79_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test79_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test79_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test79_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test79_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test79_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test79_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +comparing Test80_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test80_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test80_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test80_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test80_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test80_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test81_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test81_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test81_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test81_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test81_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test81_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test82_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test82_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test82_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test82_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test82_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test82_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test83_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test83_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test83_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test83_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test83_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test83_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +Test 84 Failed: +Error in Build stage: +Data file does not exist +comparing Test85_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test85_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test85_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test85_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test85_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test85_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test85_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test86_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test86_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test86_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test86_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test86_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test86_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test87_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_rerun_model_config.json to master +Passed! +File master Test87_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test87_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test87_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test87_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test87_smlp_toy_num_resp_mult_verify_results.json to master +Passed! +comparing Test88_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test88_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test88_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test88_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test88_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test88_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test89_smlp_toy_num_resp_mult_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test89_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test89_smlp_toy_num_resp_mult_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test89_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_query_results.json to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test89_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test89_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test89_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test89_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test89_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test89_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test89_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test90_smlp_toy_num_resp_mult_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test90_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test90_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test90_smlp_toy_num_resp_mult_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test90_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test90_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test90_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test90_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test90_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +File master Test90_smlp_toy_num_resp_mult_optimization_results.json does not exist +comparing Test90_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test90_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test90_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test90_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test90_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test90_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test90_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test90_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test90_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test91_smlp_toy_num_resp_mult_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test91_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test91_smlp_toy_num_resp_mult_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test91_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_query_results.json to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test91_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test91_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test91_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test92_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_rerun_model_config.json to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test92_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test92_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test92_smlp_toy_num_resp_mult_verify_results.json to master +Passed! +File master Test93_smlp_toy_num_resp_mult_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test93_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test93_smlp_toy_num_resp_mult_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test93_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test93_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test93_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test93_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test93_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test93_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test93_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test93_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test94_smlp_toy_num_resp_mult_rf_sklearn_y2_tree_rules.txt does not exist +comparing Test94_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test94_smlp_toy_num_resp_mult_rf_sklearn_y1_tree_rules.txt does not exist +comparing Test94_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test94_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test94_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test94_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test94_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test94_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test94_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test94_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +comparing Test95_smlp_toy_num_resp_mult_y2_dt_caret_tree_rules.txt to master +Passed! +comparing Test95_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test95_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test95_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test95_smlp_toy_num_resp_mult_y1_dt_caret_tree_rules.txt to master +Passed! +File master Test95_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test95_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test95_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test95_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +comparing Test96_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test96_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test96_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test96_smlp_toy_num_resp_mult_y1_rf_caret_tree_rules.txt to master +Passed! +File master Test96_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test96_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +comparing Test96_smlp_toy_num_resp_mult_y2_rf_caret_tree_rules.txt to master +Passed! +File master Test96_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test96_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +comparing Test97_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test97_smlp_toy_num_resp_mult_et_sklearn_y1_tree_rules.txt does not exist +File master Test97_smlp_toy_num_resp_mult_et_sklearn_y2_tree_rules.txt does not exist +comparing Test97_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_query_results.json to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test97_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test97_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test97_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test97_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test97_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test97_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test97_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +comparing Test98_smlp_toy_num_resp_mult_y2_et_caret_tree_rules.txt to master +Passed! +comparing Test98_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test98_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test98_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test98_smlp_toy_num_resp_mult_y1_et_caret_tree_rules.txt to master +Passed! +File master Test98_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test98_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test98_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test98_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +comparing Test99_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test99_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test99_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test99_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test99_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test99_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test99_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test100_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_optimization_progress.csv to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_optimization_progress.json to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_optimization_results.csv to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_optimization_results.json to master +Passed! +File master Test100_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test100_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test100_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test100_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test100_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master test101_model_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test101_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test101_smlp_toy_num_resp_mult_certify_results.json to master +Passed! +comparing Test101_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test101_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test101_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test101_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test101_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test101_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test101_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test101_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing test101_model_data_bounds.json to master +Passed! +File master test101_model_dt_sklearn_y1_tree_rules.txt does not exist +comparing test101_model_model_features_dict.json to master +Passed! +comparing test101_model_model_levels_dict.json to master +Passed! +comparing test101_model_rerun_model_config.json to master +Passed! +File master test101_model_y1_smlp_full_model_term.json does not exist +File master test101_model_y1_smlp_model_term.json does not exist +File master test101_model_y2_smlp_full_model_term.json does not exist +File master test101_model_y2_smlp_model_term.json does not exist +comparing Test102_test101_model.txt to master +Passed! +comparing Test102_test101_model_certify_results.json to master +Passed! +File master Test102_test101_model_trace.csv does not exist +comparing test103_model_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test103_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test103_smlp_toy_num_resp_mult_certify_results.json to master +Passed! +comparing Test103_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test103_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test103_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test103_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test103_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test103_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test103_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test103_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing test103_model_data_bounds.json to master +Passed! +comparing test103_model_model_features_dict.json to master +Passed! +comparing test103_model_model_levels_dict.json to master +Passed! +comparing test103_model_rerun_model_config.json to master +Passed! +File master test103_model_smlp_full_model_term.json does not exist +comparing Test104_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test104_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test104_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test104_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test104_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test105_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +File master Test105_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +comparing Test105_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test105_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test105_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test105_smlp_toy_num_resp_mult_verify_results.json to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test106_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test106_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test106_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +comparing Test106_smlp_toy_num_resp_mult_verify_results.json to master +Passed! +comparing Test107_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test107_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test107_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test107_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test108_smlp_toy_num_resp_mult_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test108_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test108_smlp_toy_num_resp_mult_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test108_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_synthesize_results.json to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test108_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test108_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test108_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test108_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test108_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test108_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test108_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test109_smlp_toy_num_resp_mult_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test109_smlp_toy_num_resp_mult.txt to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_data_bounds.json to master +Passed! +File master Test109_smlp_toy_num_resp_mult_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test109_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_labeled_predictions_summary.csv to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_missing_values_dict.json to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_model_features_dict.json to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_model_levels_dict.json to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_synthesize_results.json to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_test_prediction_precisions.csv to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_test_predictions_summary.csv to master +Passed! +File master Test109_smlp_toy_num_resp_mult_trace.csv does not exist +comparing Test109_smlp_toy_num_resp_mult_training_prediction_precisions.csv to master +Passed! +comparing Test109_smlp_toy_num_resp_mult_training_predictions_summary.csv to master +Passed! +File master Test109_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test109_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test109_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test109_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +comparing Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled.txt to master +Passed! +comparing Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled_labeled_prediction_precisions.csv to master +Passed! +comparing Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled_labeled_predictions_summary.csv to master +Passed! +comparing Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled_new_predictions_summary.csv to master +Passed! +comparing Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled_test_prediction_precisions.csv to master +Passed! +comparing Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled_test_predictions_summary.csv to master +Passed! +comparing Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled_training_prediction_precisions.csv to master +Passed! +comparing Test110_smlp_toy_basic_smlp_toy_basic_pred_unlabeled_training_predictions_summary.csv to master +Passed! +comparing test110_model_data_bounds.json to master +Passed! +comparing test110_model_model_features_dict.json to master +Passed! +comparing test110_model_model_levels_dict.json to master +Passed! +comparing test110_model_poly_sklearn_formula.txt to master +comparing test110_model_rerun_model_config.json to master +Passed! +comparing Test111_test110_model_smlp_toy_basic_pred_unlabeled.txt to master +Passed! +comparing Test111_test110_model_smlp_toy_basic_pred_unlabeled_new_predictions_summary.csv to master +Passed! +comparing Test112_test110_model_smlp_toy_basic_pred_unlabeled.txt to master +Passed! +comparing Test112_test110_model_smlp_toy_basic_pred_unlabeled_new_predictions_summary.csv to master +Passed! +comparing test113_model_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test113_smlp_toy_basic.txt to master +Passed! +comparing Test113_smlp_toy_basic_labeled_prediction_precisions.csv to master +Passed! +comparing Test113_smlp_toy_basic_labeled_predictions_summary.csv to master +Passed! +comparing Test113_smlp_toy_basic_optimization_progress.csv to master +Passed! +comparing Test113_smlp_toy_basic_optimization_progress.json to master +Passed! +comparing Test113_smlp_toy_basic_optimization_results.csv to master +Passed! +comparing Test113_smlp_toy_basic_optimization_results.json to master +Passed! +comparing Test113_smlp_toy_basic_test_prediction_precisions.csv to master +Passed! +comparing Test113_smlp_toy_basic_test_predictions_summary.csv to master +Passed! +File master Test113_smlp_toy_basic_trace.csv does not exist +comparing Test113_smlp_toy_basic_training_prediction_precisions.csv to master +Passed! +comparing Test113_smlp_toy_basic_training_predictions_summary.csv to master +Passed! +comparing test113_model_data_bounds.json to master +Passed! +comparing test113_model_model_features_dict.json to master +Passed! +comparing test113_model_model_levels_dict.json to master +Passed! +comparing test113_model_rerun_model_config.json to master +Passed! +File master test113_model_smlp_full_model_term.json does not exist +comparing Test114_smlp_toy_basic_dt_sklearn_tree_rules.txt to master +Passed! +comparing Test114_smlp_toy_basic.txt to master +Passed! +comparing Test114_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test114_smlp_toy_basic_labeled_prediction_precisions.csv to master +Passed! +comparing Test114_smlp_toy_basic_labeled_predictions_summary.csv to master +Passed! +comparing Test114_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test114_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test114_smlp_toy_basic_optimization_progress.csv to master +Passed! +comparing Test114_smlp_toy_basic_optimization_progress.json to master +Passed! +comparing Test114_smlp_toy_basic_optimization_results.csv to master +Passed! +comparing Test114_smlp_toy_basic_optimization_results.json to master +Passed! +File master Test114_smlp_toy_basic_smlp_full_model_term.json does not exist +comparing Test114_smlp_toy_basic_test_prediction_precisions.csv to master +Passed! +comparing Test114_smlp_toy_basic_test_predictions_summary.csv to master +Passed! +File master Test114_smlp_toy_basic_trace.csv does not exist +comparing Test114_smlp_toy_basic_training_prediction_precisions.csv to master +Passed! +comparing Test114_smlp_toy_basic_training_predictions_summary.csv to master +Passed! +File master Test115_smlp_toy_basic_dt_sklearn_y2_tree_rules.txt does not exist +comparing Test115_smlp_toy_basic.txt to master +Passed! +comparing Test115_smlp_toy_basic_certify_results.json to master +Passed! +comparing Test115_smlp_toy_basic_data_bounds.json to master +Passed! +File master Test115_smlp_toy_basic_dt_sklearn_y1_tree_rules.txt does not exist +comparing Test115_smlp_toy_basic_labeled_prediction_precisions.csv to master +Passed! +comparing Test115_smlp_toy_basic_labeled_predictions_summary.csv to master +Passed! +comparing Test115_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test115_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test115_smlp_toy_basic_test_prediction_precisions.csv to master +Passed! +comparing Test115_smlp_toy_basic_test_predictions_summary.csv to master +Passed! +File master Test115_smlp_toy_basic_trace.csv does not exist +comparing Test115_smlp_toy_basic_training_prediction_precisions.csv to master +Passed! +comparing Test115_smlp_toy_basic_training_predictions_summary.csv to master +Passed! +File master Test115_smlp_toy_basic_y1_smlp_full_model_term.json does not exist +File master Test115_smlp_toy_basic_y1_smlp_model_term.json does not exist +File master Test115_smlp_toy_basic_y2_smlp_full_model_term.json does not exist +File master Test115_smlp_toy_basic_y2_smlp_model_term.json does not exist +comparing Test116_smlp_toy_basic.txt to master +Passed! +comparing Test116_smlp_toy_basic_certify_results.json to master +Passed! +comparing Test116_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test116_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test116_smlp_toy_basic_model_levels_dict.json to master +Passed! +File master Test116_smlp_toy_basic_trace.csv does not exist +comparing Test117_smlp_toy_basic.txt to master +Passed! +comparing Test117_smlp_toy_basic_certify_results.json to master +Passed! +comparing Test117_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test117_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test117_smlp_toy_basic_model_levels_dict.json to master +Passed! +File master Test117_smlp_toy_basic_trace.csv does not exist +comparing Test118_smlp_toy_basic.txt to master +Passed! +comparing Test118_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test118_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test118_smlp_toy_basic_model_levels_dict.json to master +Passed! +File master Test118_smlp_toy_basic_trace.csv does not exist +comparing Test118_smlp_toy_basic_verify_results.json to master +Passed! +comparing Test119_smlp_toy_basic.txt to master +Passed! +comparing Test119_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test119_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test119_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test119_smlp_toy_basic_query_results.json to master +Passed! +File master Test119_smlp_toy_basic_trace.csv does not exist +comparing Test120_smlp_toy_basic.txt to master +Passed! +comparing Test120_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test120_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test120_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test120_smlp_toy_basic_synthesize_results.json to master +Passed! +File master Test120_smlp_toy_basic_trace.csv does not exist +comparing Test121_smlp_toy_basic.txt to master +Passed! +comparing Test121_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test121_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test121_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test121_smlp_toy_basic_synthesize_results.json to master +Passed! +File master Test121_smlp_toy_basic_trace.csv does not exist +comparing Test122_smlp_toy_basic.txt to master +Passed! +comparing Test122_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test122_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test122_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test122_smlp_toy_basic_optimization_progress.csv to master +Passed! +comparing Test122_smlp_toy_basic_optimization_progress.json to master +Passed! +comparing Test122_smlp_toy_basic_optimization_results.json to master +Passed! +File master Test122_smlp_toy_basic_trace.csv does not exist +comparing Test123_smlp_toy_basic.txt to master +Passed! +comparing Test123_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test123_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test123_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test123_smlp_toy_basic_optimization_progress.csv to master +Passed! +comparing Test123_smlp_toy_basic_optimization_progress.json to master +Passed! +comparing Test123_smlp_toy_basic_optimization_results.csv to master +Passed! +comparing Test123_smlp_toy_basic_optimization_results.json to master +Passed! +File master Test123_smlp_toy_basic_sampling_prediction_precisions.csv does not exist +File master Test123_smlp_toy_basic_sampling_predictions_summary.csv does not exist +File master Test123_smlp_toy_basic_trace.csv does not exist +comparing Test124_smlp_toy_basic.txt to master +Passed! +comparing Test124_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test124_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test124_smlp_toy_basic_model_levels_dict.json to master +Passed! +File master Test124_smlp_toy_basic_optimization_results.json does not exist +File master Test124_smlp_toy_basic_trace.csv does not exist +comparing Test125_smlp_toy_basic.txt to master +Passed! +comparing Test125_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test125_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test125_smlp_toy_basic_model_levels_dict.json to master +Passed! +comparing Test125_smlp_toy_basic_optimization_progress.csv to master +Passed! +comparing Test125_smlp_toy_basic_optimization_progress.json to master +Passed! +comparing Test125_smlp_toy_basic_optimization_results.csv to master +Passed! +comparing Test125_smlp_toy_basic_optimization_results.json to master +Passed! +File master Test125_smlp_toy_basic_trace.csv does not exist +comparing Test126_smlp_toy_basic.txt to master +Passed! +comparing Test126_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test126_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test126_smlp_toy_basic_model_levels_dict.json to master +Passed! +File master Test126_smlp_toy_basic_trace.csv does not exist +comparing Test126_smlp_toy_basic_verify_results.json to master +Passed! +comparing Test127_smlp_toy_basic.txt to master +Passed! +comparing Test127_smlp_toy_basic_certify_results.json to master +Passed! +comparing Test127_smlp_toy_basic_data_bounds.json to master +Passed! +comparing Test127_smlp_toy_basic_model_features_dict.json to master +Passed! +comparing Test127_smlp_toy_basic_model_levels_dict.json to master +Passed! +File master Test127_smlp_toy_basic_trace.csv does not exist +comparing Test128_smlp_toy_ctg_num_resp.txt to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_certify_results.json to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_data_bounds.json to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_labeled_prediction_precisions.csv to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_labeled_predictions_summary.csv to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_missing_values_dict.json to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_model_features_dict.json to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_model_levels_dict.json to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_poly_sklearn_formula.txt to master +Passed! +File master Test128_smlp_toy_ctg_num_resp_smlp_full_model_term.json does not exist +comparing Test128_smlp_toy_ctg_num_resp_test_prediction_precisions.csv to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_test_predictions_summary.csv to master +Passed! +File master Test128_smlp_toy_ctg_num_resp_trace.csv does not exist +comparing Test128_smlp_toy_ctg_num_resp_training_prediction_precisions.csv to master +Passed! +comparing Test128_smlp_toy_ctg_num_resp_training_predictions_summary.csv to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp.txt to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_data_bounds.json to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_labeled_prediction_precisions.csv to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_labeled_predictions_summary.csv to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_missing_values_dict.json to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_model_features_dict.json to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_model_levels_dict.json to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_poly_sklearn_formula.txt to master +Passed! +File master Test129_smlp_toy_ctg_num_resp_smlp_full_model_term.json does not exist +comparing Test129_smlp_toy_ctg_num_resp_test_prediction_precisions.csv to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_test_predictions_summary.csv to master +Passed! +File master Test129_smlp_toy_ctg_num_resp_trace.csv does not exist +comparing Test129_smlp_toy_ctg_num_resp_training_prediction_precisions.csv to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_training_predictions_summary.csv to master +Passed! +comparing Test129_smlp_toy_ctg_num_resp_verify_results.json to master +Passed! +Test 130 Failed: +Error in Build stage: +Data file does not exist +Test 131 Failed: +Error in Build stage: +Data file does not exist +Test 132 Failed: +Error in Build stage: +Data file does not exist +Test 133 Failed: +Error in Build stage: +Data file does not exist +Test 134 Failed: +Error in Build stage: +Data file does not exist +Test 135 Failed: +Error in Build stage: +Data file does not exist +Test 136 Failed: +Error in Build stage: +Data file does not exist +Test 137 Failed: +Error in Build stage: +Data file does not exist +Error in Build stage: +New data file does not exist +Test 138 Failed: +Error in Build stage: +Data file does not exist +Test 139 Failed: +Error in Build stage: +Data file does not exist +Error in Build stage: +New data file does not exist +File master Test140_smlp_toy_basic.txt does not exist +File master Test140_smlp_toy_basic_data_bounds.json does not exist +File master Test140_smlp_toy_basic_features_scaler.pkl does not exist +File master Test140_smlp_toy_basic_model_features_dict.json does not exist +File master Test140_smlp_toy_basic_model_levels_dict.json does not exist +File master Test140_smlp_toy_basic_responses_scaler.pkl does not exist +File master Test140_smlp_toy_basic_trace.csv does not exist +File master Test140_smlp_toy_basic_verify_results.json does not exist +File master Test141_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test141_smlp_toy_num_resp_mult.txt does not exist +File master Test141_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test141_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test141_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test141_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test141_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test141_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test141_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test141_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test141_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test141_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test141_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test141_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test141_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test141_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test141_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test141_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test141_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test141_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test141_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test142_smlp_toy_num_resp_mult_rf_sklearn_y1_tree_rules.txt does not exist +File master Test142_smlp_toy_num_resp_mult.txt does not exist +File master Test142_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test142_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test142_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test142_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test142_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test142_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test142_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test142_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test142_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test142_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test142_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test142_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test142_smlp_toy_num_resp_mult_rf_sklearn_y2_tree_rules.txt does not exist +File master Test142_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test142_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test142_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test142_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test142_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test142_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test142_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test142_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test142_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test143_smlp_toy_num_resp_mult_et_sklearn_y2_tree_rules.txt does not exist +File master Test143_smlp_toy_num_resp_mult.txt does not exist +File master Test143_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test143_smlp_toy_num_resp_mult_et_sklearn_y1_tree_rules.txt does not exist +File master Test143_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test143_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test143_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test143_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test143_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test143_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test143_smlp_toy_num_resp_mult_query_results.json does not exist +File master Test143_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test143_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test143_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test143_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test143_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test143_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test143_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test143_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test143_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test143_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +Test 144 Failed: +Error in Build stage: +Data file does not exist +File master Test145_doe_two_levels_opt.txt does not exist +File master Test145_doe_two_levels_opt_trace.csv does not exist +File master Test146_explore_doe_two_levels.txt does not exist +File master Test146_explore_doe_two_levels_trace.csv does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test147_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test147_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test147_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test147_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test147_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test147_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test147_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test147_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test147_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test148_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test148_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test148_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test148_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test148_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test148_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test148_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test148_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_all_responses_mse.png does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test148_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test149_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test149_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test149_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test149_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test149_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test149_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test149_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test149_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test149_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test150_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test150_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test150_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test150_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test150_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test150_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test150_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test150_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y1_mse.png does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test150_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test151_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test151_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test151_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test151_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test151_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test151_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test151_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test151_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test151_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test152_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test152_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test152_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test152_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test152_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test152_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test152_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test152_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y1_mse.png does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mse.png does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test152_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test153_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test153_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test153_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test153_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test153_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test153_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test153_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test153_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_all_responses_mse.png does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test153_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test154_smlp_toy_num_resp_mult.txt does not exist +File master Test154_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test154_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test154_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test154_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test154_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test154_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test154_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test154_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test154_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test154_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test154_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test154_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test154_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +File master Test154_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test154_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test154_smlp_toy_num_resp_mult_verify_results.json does not exist +File master Test154_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test154_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test155_smlp_toy_num_resp_mult.txt does not exist +File master Test155_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test155_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test155_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test155_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test155_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test155_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test155_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test155_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test155_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test155_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test155_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test155_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test155_smlp_toy_num_resp_mult_train-reg_y2_mae.png does not exist +File master Test155_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test155_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test155_smlp_toy_num_resp_mult_verify_results.json does not exist +File master Test155_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test155_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test156_smlp_toy_num_resp_mult.txt does not exist +File master Test156_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test156_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test156_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test156_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test156_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test156_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test156_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test156_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test156_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test156_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test156_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test156_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test156_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test156_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test156_smlp_toy_num_resp_mult_train-reg_y1_rmse.png does not exist +File master Test156_smlp_toy_num_resp_mult_train-reg_y2_rmse.png does not exist +File master Test156_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test156_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test156_smlp_toy_num_resp_mult_verify_results.json does not exist +File master Test157_smlp_toy_num_resp_mult.txt does not exist +File master Test157_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test157_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test157_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test157_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test157_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test157_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test157_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test157_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test157_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test157_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test157_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test157_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test157_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test157_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test157_smlp_toy_num_resp_mult_train-reg_all_responses_logcosh.png does not exist +File master Test157_smlp_toy_num_resp_mult_train-reg_all_responses_rmse.png does not exist +File master Test157_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test157_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test157_smlp_toy_num_resp_mult_verify_results.json does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test158_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test158_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test158_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test158_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test158_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test158_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test158_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y1_rmse.png does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_rmse.png does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test158_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test158_smlp_toy_num_resp_mult_y1_nn_keras_model_complete.h5 does not exist +File master Test158_smlp_toy_num_resp_mult_y2_nn_keras_model_complete.h5 does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test159_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test159_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test159_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test159_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test159_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test159_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test159_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y1_cosine.png does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y1_mae.png does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_cosine.png does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_y2_mae.png does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test159_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +File master Test159_smlp_toy_num_resp_mult_y1_nn_keras_model_complete.h5 does not exist +File master Test159_smlp_toy_num_resp_mult_y2_nn_keras_model_complete.h5 does not exist +File master Test160_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test160_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test160_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test160_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test160_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test160_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test160_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt does not exist +File master Test161_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test161_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test161_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test161_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test161_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test161_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_prediction_precisions.csv does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_labeled_predictions_summary.csv does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_missing_values_dict.json does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_prediction_precisions.csv does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_test_predictions_summary.csv does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_all_responses_logcosh.png does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_train-reg_all_responses_mape.png does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_prediction_precisions.csv does not exist +File master Test161_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_training_predictions_summary.csv does not exist +Test 162 Failed: +Error in Build stage: +Data file does not exist +Error in Build stage: +New data file does not exist +Test 163 Failed: +Error in Build stage: +Data file does not exist +Error in Build stage: +New data file does not exist +File master Test164_smlp_toy_num_resp_mult.txt does not exist +File master Test164_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test164_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test164_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test164_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test164_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test164_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test164_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test164_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test164_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test164_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test164_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test164_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test164_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test164_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test164_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test164_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test164_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test164_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test164_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test164_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test164_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test165_smlp_toy_num_resp_mult_y2_dt_caret_tree_rules.txt does not exist +File master Test165_smlp_toy_num_resp_mult.txt does not exist +File master Test165_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test165_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test165_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test165_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test165_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test165_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test165_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test165_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test165_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test165_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test165_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test165_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test165_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test165_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test165_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test165_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test165_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test165_smlp_toy_num_resp_mult_y1_dt_caret_tree_rules.txt does not exist +File master Test165_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test165_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test165_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test165_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test166_smlp_toy_num_resp_mult_rf_sklearn_tree_rules.txt does not exist +File master Test166_smlp_toy_num_resp_mult.txt does not exist +File master Test166_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test166_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test166_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test166_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test166_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test166_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test166_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test166_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test166_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test166_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test166_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test166_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test166_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test166_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test166_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test166_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test166_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test166_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test166_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test167_smlp_toy_num_resp_mult_rf_sklearn_y1_tree_rules.txt does not exist +File master Test167_smlp_toy_num_resp_mult.txt does not exist +File master Test167_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test167_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test167_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test167_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test167_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test167_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test167_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test167_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test167_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test167_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test167_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test167_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test167_smlp_toy_num_resp_mult_rf_sklearn_y2_tree_rules.txt does not exist +File master Test167_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test167_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test167_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test167_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test167_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test167_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test167_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test167_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test167_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test168_smlp_toy_num_resp_mult_y2_rf_caret_tree_rules.txt does not exist +File master Test168_smlp_toy_num_resp_mult.txt does not exist +File master Test168_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test168_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test168_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test168_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test168_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test168_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test168_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test168_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test168_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test168_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test168_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test168_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test168_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test168_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test168_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test168_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test168_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test168_smlp_toy_num_resp_mult_y1_rf_caret_tree_rules.txt does not exist +File master Test168_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test168_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test168_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test168_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test169_smlp_toy_num_resp_mult_et_sklearn_y2_tree_rules.txt does not exist +File master Test169_smlp_toy_num_resp_mult.txt does not exist +File master Test169_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test169_smlp_toy_num_resp_mult_et_sklearn_y1_tree_rules.txt does not exist +File master Test169_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test169_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test169_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test169_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test169_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test169_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test169_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test169_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test169_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test169_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test169_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test169_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test169_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test169_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test169_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test169_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test169_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test169_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test169_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test169_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test170_smlp_toy_num_resp_mult.txt does not exist +File master Test170_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test170_smlp_toy_num_resp_mult_et_sklearn_tree_rules.txt does not exist +File master Test170_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test170_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test170_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test170_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test170_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test170_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test170_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test170_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test170_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test170_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test170_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test170_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test170_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test170_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test170_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test170_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test170_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test170_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test171_smlp_toy_num_resp_mult_y2_et_caret_tree_rules.txt does not exist +File master Test171_smlp_toy_num_resp_mult.txt does not exist +File master Test171_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test171_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test171_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test171_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test171_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test171_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test171_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test171_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test171_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test171_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test171_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test171_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test171_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test171_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test171_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test171_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test171_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test171_smlp_toy_num_resp_mult_y1_et_caret_tree_rules.txt does not exist +File master Test171_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test171_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test171_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test171_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test172_smlp_toy_num_resp_mult.txt does not exist +File master Test172_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test172_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test172_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test172_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test172_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test172_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test172_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test172_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test172_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test172_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test172_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test172_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test172_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +File master Test172_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test172_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test172_smlp_toy_num_resp_mult_verify_results.json does not exist +File master Test172_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test172_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test173_smlp_toy_num_resp_mult.txt does not exist +File master Test173_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test173_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test173_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test173_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test173_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test173_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test173_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test173_smlp_toy_num_resp_mult_nn_keras_model_complete.h5 does not exist +File master Test173_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test173_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test173_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test173_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test173_smlp_toy_num_resp_mult_train-reg_y2_mae.png does not exist +File master Test173_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test173_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test173_smlp_toy_num_resp_mult_verify_results.json does not exist +File master Test173_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test173_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test174_smlp_toy_num_resp_mult.txt does not exist +File master Test174_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test174_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test174_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test174_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test174_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test174_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test174_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test174_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test174_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test174_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test174_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test174_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test174_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test174_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test174_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test174_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test174_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test174_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test174_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test174_smlp_toy_num_resp_mult_train-reg_y1_mse.png does not exist +File master Test174_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +File master Test174_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test174_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test175_smlp_toy_num_resp_mult.txt does not exist +File master Test175_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test175_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test175_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test175_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test175_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test175_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test175_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test175_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test175_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test175_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test175_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test175_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test175_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test175_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test175_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test175_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test175_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test175_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test175_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test175_smlp_toy_num_resp_mult_train-reg_all_responses_mse.png does not exist +File master Test175_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test175_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test176_smlp_toy_num_resp_mult.txt does not exist +File master Test176_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test176_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test176_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test176_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test176_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test176_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test176_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test176_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test176_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test176_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test176_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test176_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test176_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test176_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test176_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test176_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test176_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test176_smlp_toy_num_resp_mult_train-reg_y1_mse.png does not exist +File master Test176_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +File master Test176_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test176_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test176_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test176_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test176_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test176_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test177_smlp_toy_num_resp_mult.txt does not exist +File master Test177_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test177_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test177_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test177_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test177_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test177_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test177_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test177_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test177_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test177_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test177_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test177_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test177_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test177_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test177_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test177_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test177_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test177_smlp_toy_num_resp_mult_train-reg_y1_mse.png does not exist +File master Test177_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +File master Test177_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test177_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test177_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test177_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test177_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test177_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test178_smlp_toy_num_resp_mult.txt does not exist +File master Test178_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test178_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test178_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test178_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test178_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test178_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test178_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test178_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test178_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test178_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test178_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test178_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test178_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test178_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test178_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test178_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test178_smlp_toy_num_resp_mult_train-reg_y1_mse.png does not exist +File master Test178_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +File master Test178_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test178_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test178_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test178_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test178_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test178_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test179_smlp_toy_num_resp_mult.txt does not exist +File master Test179_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test179_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test179_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test179_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test179_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test179_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test179_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test179_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test179_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test179_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test179_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test179_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test179_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test179_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test179_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test179_smlp_toy_num_resp_mult_train-reg_y1_mse.png does not exist +File master Test179_smlp_toy_num_resp_mult_train-reg_y2_mse.png does not exist +File master Test179_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test179_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test180_smlp_toy_num_resp_mult.txt does not exist +File master Test180_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test180_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test180_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test180_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test180_smlp_toy_num_resp_mult_model_checkpoint.h5 does not exist +File master Test180_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test180_smlp_toy_num_resp_mult_model_gen.json does not exist +File master Test180_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test180_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test180_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test180_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test180_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test180_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test180_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test180_smlp_toy_num_resp_mult_train-reg_all_responses_mse.png does not exist +File master Test180_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test180_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test181_smlp_toy_num_resp_mult.txt does not exist +File master Test181_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test181_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test181_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test181_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test181_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test181_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test181_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test181_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test181_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test181_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test181_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test181_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test181_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test181_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test181_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test181_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test181_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test181_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test181_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test181_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test182_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test182_smlp_toy_num_resp_mult.txt does not exist +File master Test182_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test182_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test182_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test182_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test182_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test182_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test182_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test182_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test182_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test182_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test182_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test182_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test182_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test182_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test182_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test182_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test182_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test182_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test182_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test183_smlp_toy_num_resp_mult.txt does not exist +File master Test183_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test183_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test183_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test183_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test183_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test183_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test183_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test183_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test183_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test183_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test183_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test183_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test183_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test183_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test183_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test183_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test183_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test183_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test183_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +Test 184 Failed: +Error in Build stage: +Data file does not exist +Error in Build stage: +New data file does not exist +Test 185 Failed: +Error in Build stage: +Data file does not exist +Error in Build stage: +New data file does not exist +Test 186 Failed: +Error in Build stage: +Data file does not exist +Error in Build stage: +New data file does not exist +File master Test187_smlp_toy_num_resp_mult.txt does not exist +File master Test187_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test187_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test187_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test187_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test187_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test187_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test187_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test187_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test187_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test187_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test187_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test187_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test187_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test187_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test187_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test187_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test187_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test187_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test187_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test187_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test187_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test188_smlp_toy_num_resp_mult_y2_dt_caret_tree_rules.txt does not exist +File master Test188_smlp_toy_num_resp_mult.txt does not exist +File master Test188_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test188_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test188_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test188_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test188_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test188_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test188_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test188_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test188_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test188_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test188_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test188_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test188_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test188_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test188_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test188_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test188_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test188_smlp_toy_num_resp_mult_y1_dt_caret_tree_rules.txt does not exist +File master Test188_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test188_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test188_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test188_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test189_smlp_toy_num_resp_mult.txt does not exist +File master Test189_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test189_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test189_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test189_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test189_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test189_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test189_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test189_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test189_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test189_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test189_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test189_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test189_smlp_toy_num_resp_mult_rf_sklearn_tree_rules.txt does not exist +File master Test189_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test189_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test189_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test189_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test189_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test189_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test189_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test190_smlp_toy_num_resp_mult_y2_rf_caret_tree_rules.txt does not exist +File master Test190_smlp_toy_num_resp_mult.txt does not exist +File master Test190_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test190_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test190_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test190_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test190_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test190_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test190_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test190_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test190_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test190_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test190_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test190_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test190_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test190_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test190_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test190_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test190_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test190_smlp_toy_num_resp_mult_y1_rf_caret_tree_rules.txt does not exist +File master Test190_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test190_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test190_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test190_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test191_smlp_toy_num_resp_mult_et_sklearn_y1_tree_rules.txt does not exist +File master Test191_smlp_toy_num_resp_mult.txt does not exist +File master Test191_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test191_smlp_toy_num_resp_mult_et_sklearn_y2_tree_rules.txt does not exist +File master Test191_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test191_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test191_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test191_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test191_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test191_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test191_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test191_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test191_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test191_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test191_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test191_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test191_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test191_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test191_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test191_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test191_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test191_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test191_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test191_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test192_smlp_toy_num_resp_mult.txt does not exist +File master Test192_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test192_smlp_toy_num_resp_mult_et_sklearn_tree_rules.txt does not exist +File master Test192_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test192_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test192_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test192_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test192_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test192_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test192_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test192_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test192_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test192_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test192_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test192_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test192_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test192_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test192_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test192_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test192_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test192_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test193_smlp_toy_num_resp_mult_y2_et_caret_tree_rules.txt does not exist +File master Test193_smlp_toy_num_resp_mult.txt does not exist +File master Test193_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test193_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test193_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test193_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test193_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test193_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test193_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test193_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test193_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test193_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test193_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test193_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test193_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test193_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test193_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test193_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test193_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test193_smlp_toy_num_resp_mult_y1_et_caret_tree_rules.txt does not exist +File master Test193_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test193_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test193_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test193_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test194_smlp_toy_num_resp_mult_rf_sklearn_y1_tree_rules.txt does not exist +File master Test194_smlp_toy_num_resp_mult.txt does not exist +File master Test194_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test194_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test194_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test194_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test194_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test194_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test194_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test194_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test194_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test194_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test194_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test194_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test194_smlp_toy_num_resp_mult_rf_sklearn_y2_tree_rules.txt does not exist +File master Test194_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test194_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test194_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test194_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test194_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test194_smlp_toy_num_resp_mult_y1_smlp_full_model_term.json does not exist +File master Test194_smlp_toy_num_resp_mult_y1_smlp_model_term.json does not exist +File master Test194_smlp_toy_num_resp_mult_y2_smlp_full_model_term.json does not exist +File master Test194_smlp_toy_num_resp_mult_y2_smlp_model_term.json does not exist +File master Test195_smlp_toy_num_resp_mult_et_sklearn_tree_rules.txt does not exist +File master Test195_smlp_toy_num_resp_mult.txt does not exist +File master Test195_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test195_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test195_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test195_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test195_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test195_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test195_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test195_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test195_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test195_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test195_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test195_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test195_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test195_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test195_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test195_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test195_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test195_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test195_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test196_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test196_smlp_toy_num_resp_mult.txt does not exist +File master Test196_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test196_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test196_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test196_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test196_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test196_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test196_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test196_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test196_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test196_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test196_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test196_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test196_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test196_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test196_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test196_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test196_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test196_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test196_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test197_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test197_smlp_toy_num_resp_mult.txt does not exist +File master Test197_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test197_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test197_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test197_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test197_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test197_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test197_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test197_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test197_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test197_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test197_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test197_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test197_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test197_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test197_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test197_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test197_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test197_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test197_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test198_smlp_toy_num_resp_mult.txt does not exist +File master Test198_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test198_smlp_toy_num_resp_mult_dt_sklearn_model_complete.pkl does not exist +File master Test198_smlp_toy_num_resp_mult_dt_sklearn_tree_rules.txt does not exist +File master Test198_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test198_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test198_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test198_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test198_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test198_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test198_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test198_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test198_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test198_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test198_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test198_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test198_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test198_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test198_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test198_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test199_smlp_toy_num_resp_mult.txt does not exist +File master Test199_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test199_smlp_toy_num_resp_mult_et_sklearn_tree_rules.txt does not exist +File master Test199_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test199_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test199_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test199_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test199_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test199_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test199_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test199_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test199_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test199_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test199_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test199_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test199_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test199_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test199_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test199_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test199_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test199_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test200_smlp_toy_num_resp_mult.txt does not exist +File master Test200_smlp_toy_num_resp_mult_data_bounds.json does not exist +File master Test200_smlp_toy_num_resp_mult_et_sklearn_tree_rules.txt does not exist +File master Test200_smlp_toy_num_resp_mult_features_scaler.pkl does not exist +File master Test200_smlp_toy_num_resp_mult_labeled_prediction_precisions.csv does not exist +File master Test200_smlp_toy_num_resp_mult_labeled_predictions_summary.csv does not exist +File master Test200_smlp_toy_num_resp_mult_missing_values_dict.json does not exist +File master Test200_smlp_toy_num_resp_mult_model_features_dict.json does not exist +File master Test200_smlp_toy_num_resp_mult_model_levels_dict.json does not exist +File master Test200_smlp_toy_num_resp_mult_optimization_progress.csv does not exist +File master Test200_smlp_toy_num_resp_mult_optimization_progress.json does not exist +File master Test200_smlp_toy_num_resp_mult_optimization_results.csv does not exist +File master Test200_smlp_toy_num_resp_mult_optimization_results.json does not exist +File master Test200_smlp_toy_num_resp_mult_responses_scaler.pkl does not exist +File master Test200_smlp_toy_num_resp_mult_smlp_full_model_term.json does not exist +File master Test200_smlp_toy_num_resp_mult_smlp_model_term.json does not exist +File master Test200_smlp_toy_num_resp_mult_test_prediction_precisions.csv does not exist +File master Test200_smlp_toy_num_resp_mult_test_predictions_summary.csv does not exist +File master Test200_smlp_toy_num_resp_mult_trace.csv does not exist +File master Test200_smlp_toy_num_resp_mult_training_prediction_precisions.csv does not exist +File master Test200_smlp_toy_num_resp_mult_training_predictions_summary.csv does not exist +File master Test201_smlp_toy_num_resp_mult.txt does not exist +File master Test202_smlp_toy_num_resp_mult.txt does not exist +File master Test203_smlp_toy_basic.txt does not exist +File master Test203_smlp_toy_basic_data_bounds.json does not exist +File master Test203_smlp_toy_basic_features_scaler.pkl does not exist +File master Test203_smlp_toy_basic_model_features_dict.json does not exist +File master Test203_smlp_toy_basic_model_levels_dict.json does not exist +File master Test203_smlp_toy_basic_optimization_progress.csv does not exist +File master Test203_smlp_toy_basic_optimization_progress.json does not exist +File master Test203_smlp_toy_basic_optimization_results.json does not exist +File master Test203_smlp_toy_basic_responses_scaler.pkl does not exist +File master Test203_smlp_toy_basic_trace.csv does not exist +File master Test204_smlp_toy_basic.txt does not exist +File master Test204_smlp_toy_basic_data_bounds.json does not exist +File master Test204_smlp_toy_basic_features_scaler.pkl does not exist +File master Test204_smlp_toy_basic_model_features_dict.json does not exist +File master Test204_smlp_toy_basic_model_levels_dict.json does not exist +File master Test204_smlp_toy_basic_optimization_progress.csv does not exist +File master Test204_smlp_toy_basic_optimization_progress.json does not exist +File master Test204_smlp_toy_basic_optimization_results.csv does not exist +File master Test204_smlp_toy_basic_optimization_results.json does not exist +File master Test204_smlp_toy_basic_responses_scaler.pkl does not exist +File master Test204_smlp_toy_basic_sampling_prediction_precisions.csv does not exist +File master Test204_smlp_toy_basic_sampling_predictions_summary.csv does not exist +File master Test204_smlp_toy_basic_trace.csv does not exist +File master Test205_doe_two_levels_opt.txt does not exist +File master Test205_doe_two_levels_opt_trace.csv does not exist +File master Test206_smlp_toy_basic.txt does not exist +File master Test206_smlp_toy_basic_data_bounds.json does not exist +File master Test206_smlp_toy_basic_features_scaler.pkl does not exist +File master Test206_smlp_toy_basic_model_features_dict.json does not exist +File master Test206_smlp_toy_basic_model_levels_dict.json does not exist +File master Test206_smlp_toy_basic_optimization_progress.csv does not exist +File master Test206_smlp_toy_basic_optimization_progress.json does not exist +File master Test206_smlp_toy_basic_optimization_results.csv does not exist +File master Test206_smlp_toy_basic_optimization_results.json does not exist +File master Test206_smlp_toy_basic_responses_scaler.pkl does not exist +File master Test206_smlp_toy_basic_trace.csv does not exist +Test 207 Failed: +Error in Build stage: +Data file does not exist +Test 208 Failed: +Error in Build stage: +Data file does not exist +Test 209 Failed: +Error in Build stage: +Data file does not exist +Test 210 Failed: +Error in Build stage: +Data file does not exist +Test 211 Failed: +Error in Build stage: +Data file does not exist +Test 212 Failed: +Error in Build stage: +Data file does not exist +Test 213 Failed: +Error in Build stage: +Data file does not exist +Test 214 Failed: +Error in Build stage: +Data file does not exist +File master Test215_smlp_toy_mult_discr.txt does not exist +File master Test215_smlp_toy_mult_discr_features_summary.csv does not exist +File master Test215_smlp_toy_mult_discr_missing_values_dict.json does not exist +File master Test216_smlp_toy_basic.txt does not exist +File master Test216_smlp_toy_basic_features_summary.csv does not exist +File master Test217_smlp_toy_mult_discr.txt does not exist +File master Test217_smlp_toy_mult_discr_features_summary.csv does not exist +File master Test217_smlp_toy_mult_discr_missing_values_dict.json does not exist +File master Test218_smlp_toy_mult_discr.txt does not exist +File master Test218_smlp_toy_mult_discr_features_summary.csv does not exist +File master Test218_smlp_toy_mult_discr_missing_values_dict.json does not exist +File master Test219_smlp_toy_mult_discr.txt does not exist +File master Test219_smlp_toy_mult_discr_features_summary.csv does not exist +File master Test219_smlp_toy_mult_discr_missing_values_dict.json does not exist +File master Test220_smlp_toy_mult_discr.txt does not exist +File master Test220_smlp_toy_mult_discr_features_summary.csv does not exist +File master Test220_smlp_toy_mult_discr_missing_values_dict.json does not exist +File master Test221_smlp_toy_mult_discr.txt does not exist +File master Test221_smlp_toy_mult_discr_features_summary.csv does not exist +File master Test221_smlp_toy_mult_discr_missing_values_dict.json does not exist +File master Test222_smlp_toy_mult_discr.txt does not exist +File master Test222_smlp_toy_mult_discr_features_summary.csv does not exist +File master Test222_smlp_toy_mult_discr_missing_values_dict.json does not exist +File master Test223_smlp_toy_basic.txt does not exist +File master Test223_smlp_toy_basic_features_summary.csv does not exist +File master Test224_smlp_toy_basic.txt does not exist +File master Test224_smlp_toy_basic_features_summary.csv does not exist +File master Test225_smlp_toy_basic.txt does not exist +File master Test225_smlp_toy_basic_features_summary.csv does not exist +File master Test226_smlp_toy_basic.txt does not exist +File master Test226_smlp_toy_basic_features_summary.csv does not exist +File master Test227_smlp_toy_basic.txt does not exist +File master Test227_smlp_toy_basic_features_summary.csv does not exist +master log file does not exist! +Do you wish to copy the new log file to master? +(yes/no|y/n): No new tests crashed (not in the masters) +Time: 37.54873522520065 minutes +End of regression diff --git a/smlp_regression/run_smlp_regression_expected_diff_report.log b/smlp_regression/run_smlp_regression_expected_diff_report.log new file mode 100644 index 00000000..31607643 --- /dev/null +++ b/smlp_regression/run_smlp_regression_expected_diff_report.log @@ -0,0 +1,816 @@ +=================== Diff report for: Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt ================================== +87a88,111 +> +> smlp_logger - INFO - PREDICT ON NEW DATA +> +> smlp_logger - INFO - Model prediction: start +> +> smlp_logger - INFO - Model prediction: end +> +> smlp_logger - INFO - Reporting prediction results: start +> +> smlp_logger - INFO - Saving predictions summary into file: +> ./Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv +> +> smlp_logger - INFO - Saving prediction precisions into file: +> ./Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv +> +> smlp_logger - INFO - Prediction on new data -- msqe: 8.026 +> +> smlp_logger - INFO - Prediction on new data -- r2_score: -1.032 +> +> smlp_logger - INFO - Reporting prediction results: end +> +> smlp_logger - INFO - Running SMLP in mode "predict": End +> +> smlp_logger - INFO - Executing run_smlp.py script: End +=================== End of Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled.txt diff report ================================ +=================== Diff report for: Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv ================================== +diff: /home/mdmitry/github/smlp_python313/regr_smlp/code/Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv: No such file or directory +=================== End of Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_predictions_summary.csv diff report ================================ +=================== Diff report for: Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv ================================== +diff: /home/mdmitry/github/smlp_python313/regr_smlp/code/Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv: No such file or directory +=================== End of Test16_Test8_smlp_toy_num_resp_mult_smlp_toy_num_resp_mult_pred_labeled_new_prediction_precisions.csv diff report ================================ +=================== Diff report for: Test29_smlp_toy_cls_metasymbol_colnames_mult.txt ================================== +95,96d94 +< smlp_logger - WARNING - Range plots are not supported in this version of SMLP +< +=================== End of Test29_smlp_toy_cls_metasymbol_colnames_mult.txt diff report ================================ +=================== Diff report for: Test29_smlp_toy_cls_metasymbol_colnames_mult_features_ranking.csv ================================== +2,3c2,8 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,FMAX(xyz)_9.0_9.0_Bin_0__FMAX.xyz._4.0_4.0_Bin_0,FMAX(xyz),9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,FMAX.xyz.,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,5,none,0~~4~~9,1~~2~~3~~6~~7~~8~~10 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,FMAX(xyz)_9.0_9.0_Bin_0__categ__c10,FMAX(xyz),9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,5,none,0~~4~~9,1~~2~~3~~6~~7~~8~~10 +--- +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,p-3_8.0_8.0_Bin_0,p-3,8.0:8.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,4,none,0~~5~~9,1~~2~~3~~6~~7~~8~~10 +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,p-3_3.0_3.0_Bin_0,p-3,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,0,none,4~~5~~9,1~~2~~3~~6~~7~~8~~10 +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,categ__c5,categ,c5,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,4,none,0~~5~~9,1~~2~~3~~6~~7~~8~~10 +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,categ__c14,categ,c14,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,0,none,4~~5~~9,1~~2~~3~~6~~7~~8~~10 +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,categ__c11,categ,c11,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,9,none,0~~4~~5,1~~2~~3~~6~~7~~8~~10 +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,categ__c10,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,5,none,0~~4~~9,1~~2~~3~~6~~7~~8~~10 +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,FMAX.xyz._4.0_4.0_Bin_0__categ__c10,FMAX.xyz.,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,5,none,0~~4~~9,1~~2~~3~~6~~7~~8~~10 +5,6c10,17 +< PF#,0,0,1,0.45454545454545453,0.49792959773196915,FMAX.xyz._3.0_3.0_Bin_0__categ__c19,FMAX.xyz.,3.0:3.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,categ,c19,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,10,none,0~~1~~5~~7,2~~3~~4~~6~~8~~9 +< PF#,0,0,1,0.45454545454545453,0.49792959773196915,FMAX.xyz._3.0_3.0_Bin_0__categ__c4,FMAX.xyz.,3.0:3.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,categ,c4,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,7,none,0~~1~~5~~10,2~~3~~4~~6~~8~~9 +--- +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,FMAX(xyz)_9.0_9.0_Bin_0__categ__c10,FMAX(xyz),9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,5,none,0~~4~~9,1~~2~~3~~6~~7~~8~~10 +> PF 1,0,0,1,0.36363636363636365,0.48104569292083466,FMAX(xyz)_9.0_9.0_Bin_0__FMAX.xyz._4.0_4.0_Bin_0,FMAX(xyz),9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,FMAX.xyz.,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,5,none,0~~4~~9,1~~2~~3~~6~~7~~8~~10 +> PF#,0,0,1,0.45454545454545453,0.49792959773196915,p-3_3.0_3.0_Bin_0,p-3,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~5~~7~~10,2~~3~~4~~6~~8~~9 +> PF#,0,0,1,0.45454545454545453,0.49792959773196915,categ__c4,categ,c4,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,7,none,0~~1~~5~~10,2~~3~~4~~6~~8~~9 +> PF#,0,0,1,0.45454545454545453,0.49792959773196915,categ__c19,categ,c19,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,10,none,0~~1~~5~~7,2~~3~~4~~6~~8~~9 +> PF#,0,0,1,0.45454545454545453,0.49792959773196915,categ__c14,categ,c14,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~5~~7~~10,2~~3~~4~~6~~8~~9 +> PF#,0,0,1,0.45454545454545453,0.49792959773196915,categ__c10,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~7~~10,2~~3~~4~~6~~8~~9 +> PF#,0,0,1,0.45454545454545453,0.49792959773196915,FMAX.xyz._4.0_4.0_Bin_0__categ__c10,FMAX.xyz.,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~7~~10,2~~3~~4~~6~~8~~9 +7a19,20 +> PF#,0,0,1,0.45454545454545453,0.49792959773196915,FMAX.xyz._3.0_3.0_Bin_0__categ__c4,FMAX.xyz.,3.0:3.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,categ,c4,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,7,none,0~~1~~5~~10,2~~3~~4~~6~~8~~9 +> PF#,0,0,1,0.45454545454545453,0.49792959773196915,FMAX.xyz._3.0_3.0_Bin_0__categ__c19,FMAX.xyz.,3.0:3.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,categ,c19,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,10,none,0~~1~~5~~7,2~~3~~4~~6~~8~~9 +9,21d21 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,FMAX.xyz._4.0_4.0_Bin_0__categ__c10,FMAX.xyz.,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,5,none,0~~4~~9,1~~2~~3~~6~~7~~8~~10 +< PF#,0,0,1,0.45454545454545453,0.49792959773196915,FMAX.xyz._4.0_4.0_Bin_0__categ__c10,FMAX.xyz.,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~7~~10,2~~3~~4~~6~~8~~9 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,categ__c10,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,5,none,0~~4~~9,1~~2~~3~~6~~7~~8~~10 +< PF#,0,0,1,0.45454545454545453,0.49792959773196915,categ__c10,categ,c10,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~7~~10,2~~3~~4~~6~~8~~9 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,categ__c11,categ,c11,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,9,none,0~~4~~5,1~~2~~3~~6~~7~~8~~10 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,categ__c14,categ,c14,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,0,none,4~~5~~9,1~~2~~3~~6~~7~~8~~10 +< PF#,0,0,1,0.45454545454545453,0.49792959773196915,categ__c14,categ,c14,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~5~~7~~10,2~~3~~4~~6~~8~~9 +< PF#,0,0,1,0.45454545454545453,0.49792959773196915,categ__c19,categ,c19,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,10,none,0~~1~~5~~7,2~~3~~4~~6~~8~~9 +< PF#,0,0,1,0.45454545454545453,0.49792959773196915,categ__c4,categ,c4,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,7,none,0~~1~~5~~10,2~~3~~4~~6~~8~~9 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,categ__c5,categ,c5,NA:NA,NA,,,,,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,4,none,0~~5~~9,1~~2~~3~~6~~7~~8~~10 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,p-3_3.0_3.0_Bin_0,p-3,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,0,none,4~~5~~9,1~~2~~3~~6~~7~~8~~10 +< PF#,0,0,1,0.45454545454545453,0.49792959773196915,p-3_3.0_3.0_Bin_0,p-3,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~5~~7~~10,2~~3~~4~~6~~8~~9 +< PF 1,0,0,1,0.36363636363636365,0.48104569292083466,p-3_8.0_8.0_Bin_0,p-3,8.0:8.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.75,PSG,3.0,7.0,1.0,0.0,,,0.625,2.75,1.0,0.5289,0.625,0.4,0.34,0.7273,0.6066,4,none,0~~5~~9,1~~2~~3~~6~~7~~8~~10 +=================== End of Test29_smlp_toy_cls_metasymbol_colnames_mult_features_ranking.csv diff report ================================ +=================== Diff report for: Test29_smlp_toy_cls_metasymbol_colnames_mult_ranking_resp_feat.csv ================================== +1,12c1,12 +< FMAX(xyz),FMAX.xyz.,categ,p-3,PF 1,PF# +< 10.0,2.0,c14,3,1,1 +< 12.0,2.0,c15,4,0,1 +< 10.0,3.0,c1,4,0,0 +< 11.0,2.0,c9,6,0,0 +< 10.0,2.0,c5,8,1,0 +< 9.0,4.0,c10,7,1,1 +< 9.0,3.0,c13,6,0,0 +< 10.0,3.0,c4,4,0,1 +< 11.0,4.0,c15,4,0,0 +< 12.0,2.0,c11,7,1,0 +< 10.0,3.0,c19,7,0,1 +--- +> p-3,categ,FMAX.xyz.,FMAX(xyz),PF 1,PF# +> 3,c14,2.0,10.0,1,1 +> 4,c15,2.0,12.0,0,1 +> 4,c1,3.0,10.0,0,0 +> 6,c9,2.0,11.0,0,0 +> 8,c5,2.0,10.0,1,0 +> 7,c10,4.0,9.0,1,1 +> 6,c13,3.0,9.0,0,0 +> 4,c4,3.0,10.0,0,1 +> 4,c15,4.0,11.0,0,0 +> 7,c11,2.0,12.0,1,0 +> 7,c19,3.0,10.0,0,1 +=================== End of Test29_smlp_toy_cls_metasymbol_colnames_mult_ranking_resp_feat.csv diff report ================================ +=================== Diff report for: Test30_smlp_toy_num_resp_mult.txt ================================== +95,96d94 +< smlp_logger - WARNING - Range plots are not supported in this version of SMLP +< +=================== End of Test30_smlp_toy_num_resp_mult.txt diff report ================================ +=================== Diff report for: Test30_smlp_toy_num_resp_mult_features_ranking.csv ================================== +2,13d1 +< y2,0,5,9,6.818181818181818,1.9917183909278766,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y1,0,5,9,6.818181818181818,1.9917183909278766,p1_2.0_2.0_Bin_0__p2_4.0_4.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,4.0:4.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y2,0,5,9,6.818181818181818,1.9917183909278766,p1_2.0_2.0_Bin_0__p2_4.0_4.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,4.0:4.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y1,0,5,9,6.818181818181818,1.9917183909278766,p1_2.0_2.0_Bin_0__p2_8.0_8.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,8.0:8.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y1,0,5,9,6.818181818181818,1.9917183909278766,p1_4.0_4.0_Bin_0,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.32,PSG,3.0,6.0,2.0,0.0,,,0.7,2.2,1.0,0.5496,0.7,0.5714,0.4762,0.7273,0.6749 +< y2,0,5,9,6.818181818181818,1.9917183909278766,p1_4.0_4.0_Bin_0,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.32,PSG,3.0,6.0,2.0,0.0,,,0.7,2.2,1.0,0.5496,0.7,0.5714,0.4762,0.7273,0.6749 +< y2,0,5,9,6.818181818181818,1.9917183909278766,p2_3.0_3.0_Bin_0,p2,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y1,0,5,9,6.818181818181818,1.9917183909278766,p2_4.0_4.0_Bin_0__p1_4.0_4.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y1,0,5,9,6.818181818181818,1.9917183909278766,p2_4.0_4.0_Bin_0__x_11.0_11.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y1,0,5,9,6.818181818181818,1.9917183909278766,p2_4.0_4.0_Bin_0__x_12.0_12.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,12.0:12.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y2,0,5,9,6.818181818181818,1.9917183909278766,p2_7.0_7.0_Bin_0__p1_4.0_4.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y2,0,5,9,6.818181818181818,1.9917183909278766,p2_7.0_7.0_Bin_0__x_9.0_9.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +15,16d2 +< y2,0,5,9,6.818181818181818,1.9917183909278766,x_10.0_10.0_Bin_0__p2_3.0_3.0_Bin_0,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +< y1,0,5,9,6.818181818181818,1.9917183909278766,x_10.0_10.0_Bin_0__p2_7.0_7.0_Bin_0,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +17a4,7 +> y1,0,5,9,6.818181818181818,1.9917183909278766,x_10.0_10.0_Bin_0__p2_7.0_7.0_Bin_0,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y1,0,5,9,6.818181818181818,1.9917183909278766,p2_4.0_4.0_Bin_0__x_12.0_12.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,12.0:12.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y1,0,5,9,6.818181818181818,1.9917183909278766,p2_4.0_4.0_Bin_0__x_11.0_11.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y1,0,5,9,6.818181818181818,1.9917183909278766,p1_4.0_4.0_Bin_0,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.32,PSG,3.0,6.0,2.0,0.0,,,0.7,2.2,1.0,0.5496,0.7,0.5714,0.4762,0.7273,0.6749 +19c9,12 +< y2,0,5,9,6.818181818181818,1.9917183909278766,x_11.0_11.0_Bin_0__p1_4.0_4.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +--- +> y1,0,5,9,6.818181818181818,1.9917183909278766,p2_4.0_4.0_Bin_0__p1_4.0_4.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y1,0,5,9,6.818181818181818,1.9917183909278766,p1_2.0_2.0_Bin_0__p2_8.0_8.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,8.0:8.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y1,0,5,9,6.818181818181818,1.9917183909278766,p1_2.0_2.0_Bin_0__p2_4.0_4.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,4.0:4.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y2,0,5,9,6.818181818181818,1.9917183909278766,p2_7.0_7.0_Bin_0__x_9.0_9.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +20a14,16 +> y2,0,5,9,6.818181818181818,1.9917183909278766,p2_3.0_3.0_Bin_0,p2,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y2,0,5,9,6.818181818181818,1.9917183909278766,x_10.0_10.0_Bin_0__p2_3.0_3.0_Bin_0,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y2,0,5,9,6.818181818181818,1.9917183909278766,p1_4.0_4.0_Bin_0,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.32,PSG,3.0,6.0,2.0,0.0,,,0.7,2.2,1.0,0.5496,0.7,0.5714,0.4762,0.7273,0.6749 +21a18,21 +> y2,0,5,9,6.818181818181818,1.9917183909278766,x_11.0_11.0_Bin_0__p1_4.0_4.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y2,0,5,9,6.818181818181818,1.9917183909278766,p2_7.0_7.0_Bin_0__p1_4.0_4.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y2,0,5,9,6.818181818181818,1.9917183909278766,p1_2.0_2.0_Bin_0__p2_4.0_4.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,4.0:4.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +> y2,0,5,9,6.818181818181818,1.9917183909278766,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.32,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659 +=================== End of Test30_smlp_toy_num_resp_mult_features_ranking.csv diff report ================================ +=================== Diff report for: Test30_smlp_toy_num_resp_mult_ranking_resp_feat.csv ================================== +1,12c1,12 +< p1,p2,x,y1,y2 +< 2.0,3,10.0,5,9 +< 2.0,4,12.0,9,9 +< 3.0,4,10.0,5,9 +< 2.0,6,11.0,5,5 +< 2.0,8,10.0,9,5 +< 4.0,7,9.0,9,9 +< 3.0,6,9.0,5,5 +< 3.0,4,10.0,5,5 +< 4.0,4,11.0,9,9 +< 2.0,7,12.0,5,5 +< 3.0,7,10.0,9,5 +--- +> p2,x,p1,y1,y2 +> 3,10.0,2.0,5,9 +> 4,12.0,2.0,9,9 +> 4,10.0,3.0,5,9 +> 6,11.0,2.0,5,5 +> 8,10.0,2.0,9,5 +> 7,9.0,4.0,9,9 +> 6,9.0,3.0,5,5 +> 4,10.0,3.0,5,5 +> 4,11.0,4.0,9,9 +> 7,12.0,2.0,5,5 +> 7,10.0,3.0,9,5 +=================== End of Test30_smlp_toy_num_resp_mult_ranking_resp_feat.csv diff report ================================ +=================== Diff report for: Test31_smlp_toy_num_resp_mult.txt ================================== +95,96d94 +< smlp_logger - WARNING - Range plots are not supported in this version of SMLP +< +=================== End of Test31_smlp_toy_num_resp_mult.txt diff report ================================ +=================== Diff report for: Test31_smlp_toy_num_resp_mult_features_ranking.csv ================================== +2,7c2,5 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,0,none,2~~3~~6~~7~~9,1~~4~~5~~8~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_4.0_4.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,4.0:4.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,1,none,0~~2~~5~~8,3~~4~~6~~7~~9~~10 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_6.0_6.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,6.0:6.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,3,none,0~~2~~6~~7~~9,1~~4~~5~~8~~10 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_7.0_7.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,9,none,0~~2~~3~~6~~7,1~~4~~5~~8~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_4.0_4.0_Bin_0,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,3.0,6.0,2.0,0.0,,,0.7,2.2,1.0,0.5496,0.7,0.5714,0.4762,0.7273,0.6749,5~~8,none,0~~1~~2,3~~4~~6~~7~~9~~10 +--- +> y1,0,0,1,0.5454545454545454,0.49792959773196915,x_12.0_12.0_Bin_0__p2_7.0_7.0_Bin_0,x,12.0:12.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,9,none,0~~2~~3~~6~~7,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p2_6.0_6.0_Bin_0,p2,6.0:6.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.8333333333333335,PSG,4.0,5.0,2.0,0.0,,,0.6666,1.8333,1.0,0.5413,0.6667,0.5,0.3889,0.6364,0.6286,3~~6,none,0~~2~~7~~9,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,x_11.0_11.0_Bin_0__p2_6.0_6.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,6.0:6.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,3,none,0~~2~~6~~7~~9,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p2_4.0_4.0_Bin_0__x_10.0_10.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,4.0,5.0,2.0,0.0,,,0.6666,1.8333,1.0,0.5413,0.6667,0.5,0.3889,0.6364,0.6286,2~~7,none,0~~3~~6~~9,1~~4~~5~~8~~10 +9d6 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_3.0_3.0_Bin_0,p2,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +11,15d7 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p2_4.0_4.0_Bin_0__x_10.0_10.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,4.0,5.0,2.0,0.0,,,0.6666,1.8333,1.0,0.5413,0.6667,0.5,0.3889,0.6364,0.6286,2~~7,none,0~~3~~6~~9,1~~4~~5~~8~~10 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p2_6.0_6.0_Bin_0,p2,6.0:6.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.8333333333333335,PSG,4.0,5.0,2.0,0.0,,,0.6666,1.8333,1.0,0.5413,0.6667,0.5,0.3889,0.6364,0.6286,3~~6,none,0~~2~~7~~9,1~~4~~5~~8~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_7.0_7.0_Bin_0__p1_4.0_4.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~2~~8,3~~4~~6~~7~~9~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_7.0_7.0_Bin_0__x_9.0_9.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~2~~8,3~~4~~6~~7~~9~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,x_10.0_10.0_Bin_0__p2_3.0_3.0_Bin_0,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +17c9,12 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,x_11.0_11.0_Bin_0__p1_4.0_4.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,8,none,0~~1~~2~~5,3~~4~~6~~7~~9~~10 +--- +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_7.0_7.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,9,none,0~~2~~3~~6~~7,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_6.0_6.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,6.0:6.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,3,none,0~~2~~6~~7~~9,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,0,none,2~~3~~6~~7~~9,1~~4~~5~~8~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_7.0_7.0_Bin_0__x_9.0_9.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~2~~8,3~~4~~6~~7~~9~~10 +19,20c14,16 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,x_11.0_11.0_Bin_0__p2_6.0_6.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,6.0:6.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,3,none,0~~2~~6~~7~~9,1~~4~~5~~8~~10 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,x_12.0_12.0_Bin_0__p2_7.0_7.0_Bin_0,x,12.0:12.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,9,none,0~~2~~3~~6~~7,1~~4~~5~~8~~10 +--- +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_3.0_3.0_Bin_0,p2,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,x_10.0_10.0_Bin_0__p2_3.0_3.0_Bin_0,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_4.0_4.0_Bin_0,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,3.0,6.0,2.0,0.0,,,0.7,2.2,1.0,0.5496,0.7,0.5714,0.4762,0.7273,0.6749,5~~8,none,0~~1~~2,3~~4~~6~~7~~9~~10 +21a18,21 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,x_11.0_11.0_Bin_0__p1_4.0_4.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,8,none,0~~1~~2~~5,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_7.0_7.0_Bin_0__p1_4.0_4.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~2~~8,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_4.0_4.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,4.0:4.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,1,none,0~~2~~5~~8,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +=================== End of Test31_smlp_toy_num_resp_mult_features_ranking.csv diff report ================================ +=================== Diff report for: Test31_smlp_toy_num_resp_mult_ranking_resp_feat.csv ================================== +1,12c1,12 +< p1,p2,x,y1,y2 +< 2.0,3,10.0,1,1 +< 2.0,4,12.0,0,1 +< 3.0,4,10.0,1,1 +< 2.0,6,11.0,1,0 +< 2.0,8,10.0,0,0 +< 4.0,7,9.0,0,1 +< 3.0,6,9.0,1,0 +< 3.0,4,10.0,1,0 +< 4.0,4,11.0,0,1 +< 2.0,7,12.0,1,0 +< 3.0,7,10.0,0,0 +--- +> x,p2,p1,y1,y2 +> 10.0,3,2.0,1,1 +> 12.0,4,2.0,0,1 +> 10.0,4,3.0,1,1 +> 11.0,6,2.0,1,0 +> 10.0,8,2.0,0,0 +> 9.0,7,4.0,0,1 +> 9.0,6,3.0,1,0 +> 10.0,4,3.0,1,0 +> 11.0,4,4.0,0,1 +> 12.0,7,2.0,1,0 +> 10.0,7,3.0,0,0 +=================== End of Test31_smlp_toy_num_resp_mult_ranking_resp_feat.csv diff report ================================ +=================== Diff report for: Test33_smlp_toy_num_resp_mult.txt ================================== +95,96d94 +< smlp_logger - WARNING - Range plots are not supported in this version of SMLP +< +=================== End of Test33_smlp_toy_num_resp_mult.txt diff report ================================ +=================== Diff report for: Test33_smlp_toy_num_resp_mult_features_ranking.csv ================================== +2,7c2,5 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,0,none,2~~3~~6~~7~~9,1~~4~~5~~8~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_4.0_4.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,4.0:4.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,1,none,0~~2~~5~~8,3~~4~~6~~7~~9~~10 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_6.0_6.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,6.0:6.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,3,none,0~~2~~6~~7~~9,1~~4~~5~~8~~10 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_7.0_7.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,9,none,0~~2~~3~~6~~7,1~~4~~5~~8~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_4.0_4.0_Bin_0,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,3.0,6.0,2.0,0.0,,,0.7,2.2,1.0,0.5496,0.7,0.5714,0.4762,0.7273,0.6749,5~~8,none,0~~1~~2,3~~4~~6~~7~~9~~10 +--- +> y1,0,0,1,0.5454545454545454,0.49792959773196915,x_12.0_12.0_Bin_0__p2_7.0_7.0_Bin_0,x,12.0:12.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,9,none,0~~2~~3~~6~~7,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p2_6.0_6.0_Bin_0,p2,6.0:6.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.8333333333333335,PSG,4.0,5.0,2.0,0.0,,,0.6666,1.8333,1.0,0.5413,0.6667,0.5,0.3889,0.6364,0.6286,3~~6,none,0~~2~~7~~9,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,x_11.0_11.0_Bin_0__p2_6.0_6.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,6.0:6.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,3,none,0~~2~~6~~7~~9,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p2_4.0_4.0_Bin_0__x_10.0_10.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,4.0,5.0,2.0,0.0,,,0.6666,1.8333,1.0,0.5413,0.6667,0.5,0.3889,0.6364,0.6286,2~~7,none,0~~3~~6~~9,1~~4~~5~~8~~10 +9d6 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_3.0_3.0_Bin_0,p2,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +11,15d7 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p2_4.0_4.0_Bin_0__x_10.0_10.0_Bin_0,p2,4.0:4.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,4.0,5.0,2.0,0.0,,,0.6666,1.8333,1.0,0.5413,0.6667,0.5,0.3889,0.6364,0.6286,2~~7,none,0~~3~~6~~9,1~~4~~5~~8~~10 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,p2_6.0_6.0_Bin_0,p2,6.0:6.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,1.8333333333333335,PSG,4.0,5.0,2.0,0.0,,,0.6666,1.8333,1.0,0.5413,0.6667,0.5,0.3889,0.6364,0.6286,3~~6,none,0~~2~~7~~9,1~~4~~5~~8~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_7.0_7.0_Bin_0__p1_4.0_4.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~2~~8,3~~4~~6~~7~~9~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_7.0_7.0_Bin_0__x_9.0_9.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~2~~8,3~~4~~6~~7~~9~~10 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,x_10.0_10.0_Bin_0__p2_3.0_3.0_Bin_0,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +17c9,12 +< y2,0,0,1,0.45454545454545453,0.49792959773196915,x_11.0_11.0_Bin_0__p1_4.0_4.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,8,none,0~~1~~2~~5,3~~4~~6~~7~~9~~10 +--- +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_7.0_7.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,9,none,0~~2~~3~~6~~7,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_6.0_6.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,6.0:6.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,3,none,0~~2~~6~~7~~9,1~~4~~5~~8~~10 +> y1,0,0,1,0.5454545454545454,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,0,none,2~~3~~6~~7~~9,1~~4~~5~~8~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_7.0_7.0_Bin_0__x_9.0_9.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,x,9.0:9.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~2~~8,3~~4~~6~~7~~9~~10 +19,20c14,16 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,x_11.0_11.0_Bin_0__p2_6.0_6.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,6.0:6.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,3,none,0~~2~~6~~7~~9,1~~4~~5~~8~~10 +< y1,0,0,1,0.5454545454545454,0.49792959773196915,x_12.0_12.0_Bin_0__p2_7.0_7.0_Bin_0,x,12.0:12.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,7.0:7.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,1.8333333333333335,PSG,5.0,5.0,1.0,0.0,,,0.5834,1.8333,1.0,0.5207,0.5833,0.2858,0.2143,0.5455,0.5333,9,none,0~~2~~3~~6~~7,1~~4~~5~~8~~10 +--- +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_3.0_3.0_Bin_0,p2,3.0:3.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,x_10.0_10.0_Bin_0__p2_3.0_3.0_Bin_0,x,10.0:10.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_4.0_4.0_Bin_0,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,NA,,NA:NA,NA:NA,NA,NA,NA,NA,2.2,PSG,3.0,6.0,2.0,0.0,,,0.7,2.2,1.0,0.5496,0.7,0.5714,0.4762,0.7273,0.6749,5~~8,none,0~~1~~2,3~~4~~6~~7~~9~~10 +21a18,21 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,x_11.0_11.0_Bin_0__p1_4.0_4.0_Bin_0,x,11.0:11.0,1:1,1,9.0,12.0,10.363636363636363,0.9791208740244552,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,8,none,0~~1~~2~~5,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p2_7.0_7.0_Bin_0__p1_4.0_4.0_Bin_0,p2,7.0:7.0,1:1,1,3.0,8.0,5.454545454545454,1.6160353486028343,p1,4.0:4.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,5,none,0~~1~~2~~8,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_4.0_4.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,4.0:4.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,1,none,0~~2~~5~~8,3~~4~~6~~7~~9~~10 +> y2,0,0,1,0.45454545454545453,0.49792959773196915,p1_2.0_2.0_Bin_0__p2_3.0_3.0_Bin_0,p1,2.0:2.0,1:1,1,2.0,4.0,2.727272727272727,0.7496555682941201,p2,3.0:3.0,1:1,1,3,8,5.454545454545454,1.6160353486028343,,NA:NA,NA:NA,NA,NA,NA,NA,NA,2.2,PSG,4.0,6.0,1.0,0.0,,,0.6,2.2,1.0,0.5248,0.6,0.3333,0.2667,0.6364,0.5659,0,none,1~~2~~5~~8,3~~4~~6~~7~~9~~10 +=================== End of Test33_smlp_toy_num_resp_mult_features_ranking.csv diff report ================================ +=================== Diff report for: Test33_smlp_toy_num_resp_mult_ranking_resp_feat.csv ================================== +1,12c1,12 +< p1,p2,x,y1,y2 +< 2.0,3,10.0,1,1 +< 2.0,4,12.0,0,1 +< 3.0,4,10.0,1,1 +< 2.0,6,11.0,1,0 +< 2.0,8,10.0,0,0 +< 4.0,7,9.0,0,1 +< 3.0,6,9.0,1,0 +< 3.0,4,10.0,1,0 +< 4.0,4,11.0,0,1 +< 2.0,7,12.0,1,0 +< 3.0,7,10.0,0,0 +--- +> x,p2,p1,y1,y2 +> 10.0,3,2.0,1,1 +> 12.0,4,2.0,0,1 +> 10.0,4,3.0,1,1 +> 11.0,6,2.0,1,0 +> 10.0,8,2.0,0,0 +> 9.0,7,4.0,0,1 +> 9.0,6,3.0,1,0 +> 10.0,4,3.0,1,0 +> 11.0,4,4.0,0,1 +> 12.0,7,2.0,1,0 +> 10.0,7,3.0,0,0 +=================== End of Test33_smlp_toy_num_resp_mult_ranking_resp_feat.csv diff report ================================ +=================== Diff report for: Test66_test65_model.txt ================================== +0a1,97 +> +> smlp_logger - INFO - Model exploration specification: +> {'version': '1.1', 'spec': [{'label': 'y1', 'type': 'response', 'range': 'float'}, {'label': 'y2', 'type': 'response', 'range': 'float'}, {'label': 'x0', 'type': 'input', 'range': 'float', 'bounds': [0, 10]}, {'label': 'x1', 'type': 'input', 'range': 'float', 'bounds': [0, 10]}, {'label': 'x2', 'type': 'input', 'range': 'float', 'bounds': [3, 7]}], 'alpha': 'x1==1 or x1==4 or x1==7'} +> +> smlp_logger - INFO - Executing run_smlp.py script: Start +> +> smlp_logger - INFO - Running SMLP in mode "verify": Start +> +> smlp_logger - INFO - Computed spec global constraint expressions: +> +> smlp_logger - INFO - Global alpha : x1==1 or x1==4 or x1==7 +> +> smlp_logger - INFO - Global beta : None +> +> smlp_logger - INFO - Radii theta : {} +> +> smlp_logger - INFO - Delta const : {'delta_abs': 0.0, 'delta_rel': 0.01} +> +> smlp_logger - INFO - Assertion asrt1: x0**2+y1>4.3 +> +> smlp_logger - INFO - Assertion asrt2: (y1+x2)/2<6 +> +> smlp_logger - INFO - PREPARE DATA FOR MODELING +> +> smlp_logger - INFO - LOAD TRAINED MODEL +> +> smlp_logger - INFO - Seving model rerun configuration in file ./../models/test65_model_rerun_model_config.json +> +> smlp_logger - INFO - Creating model exploration base components: Start +> +> smlp_logger - INFO - Parsing the SPEC: Start +> +> smlp_logger - INFO - Parsing the SPEC: End +> +> smlp_logger - INFO - Variable domains (alpha): {'y1': {'range': 'float', 'interval': None}, 'y2': {'range': 'float', 'interval': None}, 'x0': {'range': 'float', 'interval': [0, 10]}, 'x1': {'range': 'float', 'interval': [0, 10]}, 'x2': {'range': 'float', 'interval': [3, 7]}} +> +> smlp_logger - INFO - Input bounds (alpha): {'x0': {'min': 0, 'max': 10}, 'x1': {'min': 0, 'max': 10}, 'x2': {'min': 3, 'max': 7}} +> +> smlp_logger - INFO - Knob bounds (eta): {} +> +> smlp_logger - INFO - Knob grids (eta): {} +> +> smlp_logger - INFO - Alpha global constraints: (or (or (= x1 1) (= x1 4)) (= x1 7)) +> +> smlp_logger - INFO - Alpha ranges constraints: (and (and (and true (and (>= x0 0) (<= x0 10))) (and (>= x1 0) (<= x1 10))) (and (>= x2 3) (<= x2 7))) +> +> smlp_logger - INFO - Alpha combined constraints: (and (and (and (and true (and (>= x0 0) (<= x0 10))) (and (>= x1 0) (<= x1 10))) (and (>= x2 3) (<= x2 7))) (or (or (= x1 1) (= x1 4)) (= x1 7))) +> +> smlp_logger - INFO - Beta global constraints: true +> +> smlp_logger - INFO - Eta ranges constraints: true +> +> smlp_logger - INFO - Eta grid constraints: true +> +> smlp_logger - INFO - Eta global constraints: true +> +> smlp_logger - INFO - Eta combined constraints: true +> +> smlp_logger - INFO - Creating model exploration base components: End +> +> smlp_logger - INFO - Input and knob interface constraints are consistent +> +> smlp_logger - INFO - Building model terms: Start +> +> smlp_logger - INFO - Model operator counts for y1: {'add': 1, 'mul': 15, 'ite': 5, 'and': 9, 'prop': 14, 'const': 50, 'sub': 14, 'var': 14} +> +> smlp_logger - INFO - Model operator counts for y2: {'add': 1, 'mul': 15, 'ite': 5, 'and': 9, 'prop': 14, 'const': 50, 'sub': 14, 'var': 14} +> +> smlp_logger - INFO - Building model terms: End +> +> smlp_logger - INFO - Model interface constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt1: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt2: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying assertion asrt1 <-> x0**2+y1>4.3 +> +> smlp_logger - INFO - The configuration is consistent with assertion asrt1 +> +> smlp_logger - INFO - Completed with result: PASS +> +> smlp_logger - INFO - Verifying assertion asrt2 <-> (y1+x2)/2<6 +> +> smlp_logger - INFO - The configuration is consistent with assertion asrt2 +> +> smlp_logger - INFO - Completed with result: FAIL +> +> smlp_logger - INFO - Running SMLP in mode "verify": End +> +> smlp_logger - INFO - Executing run_smlp.py script: End +=================== End of Test66_test65_model.txt diff report ================================ +=================== Diff report for: Test66_test65_model_verify_results.json ================================== +diff: /home/mdmitry/github/smlp_python313/regr_smlp/code/Test66_test65_model_verify_results.json: No such file or directory +=================== End of Test66_test65_model_verify_results.json diff report ================================ +=================== Diff report for: Test68_test67_model.txt ================================== +0a1,97 +> +> smlp_logger - INFO - Model exploration specification: +> {'version': '1.1', 'spec': [{'label': 'y1', 'type': 'response', 'range': 'float'}, {'label': 'y2', 'type': 'response', 'range': 'float'}, {'label': 'x0', 'type': 'input', 'range': 'float', 'bounds': [0, 10]}, {'label': 'x1', 'type': 'input', 'range': 'float', 'bounds': [0, 10]}, {'label': 'x2', 'type': 'input', 'range': 'float', 'bounds': [3, 7]}], 'alpha': 'x1==1 or x1==4 or x1==7'} +> +> smlp_logger - INFO - Executing run_smlp.py script: Start +> +> smlp_logger - INFO - Running SMLP in mode "verify": Start +> +> smlp_logger - INFO - Computed spec global constraint expressions: +> +> smlp_logger - INFO - Global alpha : x1==1 or x1==4 or x1==7 +> +> smlp_logger - INFO - Global beta : None +> +> smlp_logger - INFO - Radii theta : {} +> +> smlp_logger - INFO - Delta const : {'delta_abs': 0.0, 'delta_rel': 0.01} +> +> smlp_logger - INFO - Assertion asrt1: x0**2+y1>4.3 +> +> smlp_logger - INFO - Assertion asrt2: (y1+x2)/2<6 +> +> smlp_logger - INFO - PREPARE DATA FOR MODELING +> +> smlp_logger - INFO - LOAD TRAINED MODEL +> +> smlp_logger - INFO - Seving model rerun configuration in file ./../models/test67_model_rerun_model_config.json +> +> smlp_logger - INFO - Creating model exploration base components: Start +> +> smlp_logger - INFO - Parsing the SPEC: Start +> +> smlp_logger - INFO - Parsing the SPEC: End +> +> smlp_logger - INFO - Variable domains (alpha): {'y1': {'range': 'float', 'interval': None}, 'y2': {'range': 'float', 'interval': None}, 'x0': {'range': 'float', 'interval': [0, 10]}, 'x1': {'range': 'float', 'interval': [0, 10]}, 'x2': {'range': 'float', 'interval': [3, 7]}} +> +> smlp_logger - INFO - Input bounds (alpha): {'x0': {'min': 0, 'max': 10}, 'x1': {'min': 0, 'max': 10}, 'x2': {'min': 3, 'max': 7}} +> +> smlp_logger - INFO - Knob bounds (eta): {} +> +> smlp_logger - INFO - Knob grids (eta): {} +> +> smlp_logger - INFO - Alpha global constraints: (or (or (= x1 1) (= x1 4)) (= x1 7)) +> +> smlp_logger - INFO - Alpha ranges constraints: (and (and (and true (and (>= x0 0) (<= x0 10))) (and (>= x1 0) (<= x1 10))) (and (>= x2 3) (<= x2 7))) +> +> smlp_logger - INFO - Alpha combined constraints: (and (and (and (and true (and (>= x0 0) (<= x0 10))) (and (>= x1 0) (<= x1 10))) (and (>= x2 3) (<= x2 7))) (or (or (= x1 1) (= x1 4)) (= x1 7))) +> +> smlp_logger - INFO - Beta global constraints: true +> +> smlp_logger - INFO - Eta ranges constraints: true +> +> smlp_logger - INFO - Eta grid constraints: true +> +> smlp_logger - INFO - Eta global constraints: true +> +> smlp_logger - INFO - Eta combined constraints: true +> +> smlp_logger - INFO - Creating model exploration base components: End +> +> smlp_logger - INFO - Input and knob interface constraints are consistent +> +> smlp_logger - INFO - Building model terms: Start +> +> smlp_logger - INFO - Model operator counts for y1: {'add': 1, 'mul': 7, 'ite': 3, 'and': 3, 'prop': 6, 'const': 24, 'sub': 6, 'var': 6} +> +> smlp_logger - INFO - Model operator counts for y2: {'add': 1, 'mul': 4, 'ite': 2, 'and': 1, 'prop': 3, 'const': 14, 'sub': 3, 'var': 3} +> +> smlp_logger - INFO - Building model terms: End +> +> smlp_logger - INFO - Model interface constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt1: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt2: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying assertion asrt1 <-> x0**2+y1>4.3 +> +> smlp_logger - INFO - The configuration is consistent with assertion asrt1 +> +> smlp_logger - INFO - Completed with result: PASS +> +> smlp_logger - INFO - Verifying assertion asrt2 <-> (y1+x2)/2<6 +> +> smlp_logger - INFO - The configuration is consistent with assertion asrt2 +> +> smlp_logger - INFO - Completed with result: FAIL +> +> smlp_logger - INFO - Running SMLP in mode "verify": End +> +> smlp_logger - INFO - Executing run_smlp.py script: End +=================== End of Test68_test67_model.txt diff report ================================ +=================== Diff report for: Test68_test67_model_verify_results.json ================================== +diff: /home/mdmitry/github/smlp_python313/regr_smlp/code/Test68_test67_model_verify_results.json: No such file or directory +=================== End of Test68_test67_model_verify_results.json diff report ================================ +=================== Diff report for: Test69_smlp_toy_num_resp_mult_verify_results.json ================================== +6c6 +< "x": 8.601911912781919, +--- +> "x": 8.601911912575982, +9c9 +< "y2": 5.078784555196762 +--- +> "y2": 5.078784562647343 +=================== End of Test69_smlp_toy_num_resp_mult_verify_results.json diff report ================================ +=================== Diff report for: Test70_test69_model.txt ================================== +25a26,82 +> +> smlp_logger - INFO - Creating model exploration base components: Start +> +> smlp_logger - INFO - Parsing the SPEC: Start +> +> smlp_logger - INFO - Parsing the SPEC: End +> +> smlp_logger - INFO - Variable domains (alpha): {'y2': {'range': 'float', 'interval': None}, 'x': {'range': 'float', 'interval': [0, 10]}, 'p1': {'range': 'float', 'interval': [0, 10]}, 'p2': {'range': 'float', 'interval': [3, 7]}} +> +> smlp_logger - INFO - Input bounds (alpha): {'x': {'min': 0, 'max': 10}, 'p1': {'min': 0, 'max': 10}, 'p2': {'min': 3, 'max': 7}} +> +> smlp_logger - INFO - Knob bounds (eta): {} +> +> smlp_logger - INFO - Knob grids (eta): {} +> +> smlp_logger - INFO - Alpha global constraints: (or (or (= p1 1) (= p1 4)) (= p1 7)) +> +> smlp_logger - INFO - Alpha ranges constraints: (and (and (and true (and (>= x 0) (<= x 10))) (and (>= p1 0) (<= p1 10))) (and (>= p2 3) (<= p2 7))) +> +> smlp_logger - INFO - Alpha combined constraints: (and (and (and (and true (and (>= x 0) (<= x 10))) (and (>= p1 0) (<= p1 10))) (and (>= p2 3) (<= p2 7))) (or (or (= p1 1) (= p1 4)) (= p1 7))) +> +> smlp_logger - INFO - Beta global constraints: true +> +> smlp_logger - INFO - Eta ranges constraints: true +> +> smlp_logger - INFO - Eta grid constraints: true +> +> smlp_logger - INFO - Eta global constraints: true +> +> smlp_logger - INFO - Eta combined constraints: true +> +> smlp_logger - INFO - Creating model exploration base components: End +> +> smlp_logger - INFO - Input and knob interface constraints are consistent +> +> smlp_logger - INFO - Building model terms: Start +> +> smlp_logger - INFO - Model operator counts for y2: {'add': 256, 'mul': 472, 'ite': 39, 'prop': 39, 'const': 846, 'sub': 216, 'var': 216} +> +> smlp_logger - INFO - Building model terms: End +> +> smlp_logger - INFO - Model interface constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt1: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying assertion asrt1 <-> (y2**3+p2)/2<6 +> +> smlp_logger - INFO - The configuration is consistent with assertion asrt1 +> +> smlp_logger - INFO - Completed with result: FAIL +> +> smlp_logger - INFO - Running SMLP in mode "verify": End +> +> smlp_logger - INFO - Executing run_smlp.py script: End +=================== End of Test70_test69_model.txt diff report ================================ +=================== Diff report for: Test70_test69_model_verify_results.json ================================== +diff: /home/mdmitry/github/smlp_python313/regr_smlp/code/Test70_test69_model_verify_results.json: No such file or directory +=================== End of Test70_test69_model_verify_results.json diff report ================================ +=================== Diff report for: Test72_test71_model.txt ================================== +0a1,84 +> +> smlp_logger - INFO - Model exploration specification: +> {'version': '1.1', 'spec': [{'label': 'y1', 'type': 'response', 'range': 'float'}, {'label': 'y2', 'type': 'response', 'range': 'float'}, {'label': 'x0', 'type': 'input', 'range': 'float', 'bounds': [0, 10]}, {'label': 'x1', 'type': 'input', 'range': 'float', 'bounds': [0, 10]}, {'label': 'x2', 'type': 'input', 'range': 'float', 'bounds': [3, 7]}], 'alpha': 'x1==1 or x1==4 or x1==7'} +> +> smlp_logger - INFO - Executing run_smlp.py script: Start +> +> smlp_logger - INFO - Running SMLP in mode "verify": Start +> +> smlp_logger - INFO - Computed spec global constraint expressions: +> +> smlp_logger - INFO - Global alpha : x1==1 or x1==4 or x1==7 +> +> smlp_logger - INFO - Global beta : None +> +> smlp_logger - INFO - Radii theta : {} +> +> smlp_logger - INFO - Delta const : {'delta_abs': 0.0, 'delta_rel': 0.01} +> +> smlp_logger - INFO - Assertion asrt1: (y2**3+x2)/2<6 +> +> smlp_logger - INFO - PREPARE DATA FOR MODELING +> +> smlp_logger - INFO - LOAD TRAINED MODEL +> +> smlp_logger - INFO - Seving model rerun configuration in file ./../models/test71_model_rerun_model_config.json +> +> smlp_logger - INFO - Creating model exploration base components: Start +> +> smlp_logger - INFO - Parsing the SPEC: Start +> +> smlp_logger - INFO - Parsing the SPEC: End +> +> smlp_logger - INFO - Variable domains (alpha): {'y1': {'range': 'float', 'interval': None}, 'y2': {'range': 'float', 'interval': None}, 'x0': {'range': 'float', 'interval': [0, 10]}, 'x1': {'range': 'float', 'interval': [0, 10]}, 'x2': {'range': 'float', 'interval': [3, 7]}} +> +> smlp_logger - INFO - Input bounds (alpha): {'x0': {'min': 0, 'max': 10}, 'x1': {'min': 0, 'max': 10}, 'x2': {'min': 3, 'max': 7}} +> +> smlp_logger - INFO - Knob bounds (eta): {} +> +> smlp_logger - INFO - Knob grids (eta): {} +> +> smlp_logger - INFO - Alpha global constraints: (or (or (= x1 1) (= x1 4)) (= x1 7)) +> +> smlp_logger - INFO - Alpha ranges constraints: (and (and (and true (and (>= x0 0) (<= x0 10))) (and (>= x1 0) (<= x1 10))) (and (>= x2 3) (<= x2 7))) +> +> smlp_logger - INFO - Alpha combined constraints: (and (and (and (and true (and (>= x0 0) (<= x0 10))) (and (>= x1 0) (<= x1 10))) (and (>= x2 3) (<= x2 7))) (or (or (= x1 1) (= x1 4)) (= x1 7))) +> +> smlp_logger - INFO - Beta global constraints: true +> +> smlp_logger - INFO - Eta ranges constraints: true +> +> smlp_logger - INFO - Eta grid constraints: true +> +> smlp_logger - INFO - Eta global constraints: true +> +> smlp_logger - INFO - Eta combined constraints: true +> +> smlp_logger - INFO - Creating model exploration base components: End +> +> smlp_logger - INFO - Input and knob interface constraints are consistent +> +> smlp_logger - INFO - Building model terms: Start +> +> smlp_logger - INFO - Model operator counts for y1: {'add': 256, 'mul': 472, 'ite': 39, 'prop': 39, 'const': 846, 'sub': 216, 'var': 216} +> +> smlp_logger - INFO - Model operator counts for y2: {'add': 256, 'mul': 472, 'ite': 39, 'prop': 39, 'const': 846, 'sub': 216, 'var': 216} +> +> smlp_logger - INFO - Building model terms: End +> +> smlp_logger - INFO - Model interface constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt1: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying assertion asrt1 <-> (y2**3+x2)/2<6 +> +> smlp_logger - INFO - The configuration is consistent with assertion asrt1 +> +> smlp_logger - INFO - Completed with result: FAIL +> +> smlp_logger - INFO - Running SMLP in mode "verify": End +> +> smlp_logger - INFO - Executing run_smlp.py script: End +=================== End of Test72_test71_model.txt diff report ================================ +=================== Diff report for: Test72_test71_model_verify_results.json ================================== +diff: /home/mdmitry/github/smlp_python313/regr_smlp/code/Test72_test71_model_verify_results.json: No such file or directory +=================== End of Test72_test71_model_verify_results.json diff report ================================ +=================== Diff report for: Test77_test76_model.txt ================================== +0a1,110 +> +> smlp_logger - INFO - Model exploration specification: +> {'version': '1.1', 'spec': [{'label': 'y1', 'type': 'response', 'range': 'float'}, {'label': 'y2', 'type': 'response', 'range': 'float'}, {'label': 'x0', 'type': 'input', 'range': 'float', 'bounds': [0, 10]}, {'label': 'x1', 'type': 'input', 'range': 'float', 'bounds': [0, 10]}, {'label': 'x2', 'type': 'input', 'range': 'float', 'bounds': [3, 7]}], 'alpha': 'x1==1 or x1==4 or x1==7'} +> +> smlp_logger - INFO - Executing run_smlp.py script: Start +> +> smlp_logger - INFO - Running SMLP in mode "verify": Start +> +> smlp_logger - INFO - Computed spec global constraint expressions: +> +> smlp_logger - INFO - Global alpha : x1==1 or x1==4 or x1==7 +> +> smlp_logger - INFO - Global beta : None +> +> smlp_logger - INFO - Radii theta : {} +> +> smlp_logger - INFO - Delta const : {'delta_abs': 0.0, 'delta_rel': 0.01} +> +> smlp_logger - INFO - Assertion asrt1: (y2**3+x2)/2<6 +> +> smlp_logger - INFO - Assertion asrt2: y1>=9 +> +> smlp_logger - INFO - Assertion asrt3: y2<0 +> +> smlp_logger - INFO - PREPARE DATA FOR MODELING +> +> smlp_logger - INFO - LOAD TRAINED MODEL +> +> smlp_logger - INFO - Seving model rerun configuration in file ./../models/test76_model_rerun_model_config.json +> +> smlp_logger - INFO - Creating model exploration base components: Start +> +> smlp_logger - INFO - Parsing the SPEC: Start +> +> smlp_logger - INFO - Parsing the SPEC: End +> +> smlp_logger - INFO - Variable domains (alpha): {'y1': {'range': 'float', 'interval': None}, 'y2': {'range': 'float', 'interval': None}, 'x0': {'range': 'float', 'interval': [0, 10]}, 'x1': {'range': 'float', 'interval': [0, 10]}, 'x2': {'range': 'float', 'interval': [3, 7]}} +> +> smlp_logger - INFO - Input bounds (alpha): {'x0': {'min': 0, 'max': 10}, 'x1': {'min': 0, 'max': 10}, 'x2': {'min': 3, 'max': 7}} +> +> smlp_logger - INFO - Knob bounds (eta): {} +> +> smlp_logger - INFO - Knob grids (eta): {} +> +> smlp_logger - INFO - Alpha global constraints: (or (or (= x1 1) (= x1 4)) (= x1 7)) +> +> smlp_logger - INFO - Alpha ranges constraints: (and (and true (and (>= x1 0) (<= x1 10))) (and (>= x2 3) (<= x2 7))) +> +> smlp_logger - INFO - Alpha combined constraints: (and (and (and true (and (>= x1 0) (<= x1 10))) (and (>= x2 3) (<= x2 7))) (or (or (= x1 1) (= x1 4)) (= x1 7))) +> +> smlp_logger - INFO - Beta global constraints: true +> +> smlp_logger - INFO - Eta ranges constraints: true +> +> smlp_logger - INFO - Eta grid constraints: true +> +> smlp_logger - INFO - Eta global constraints: true +> +> smlp_logger - INFO - Eta combined constraints: true +> +> smlp_logger - INFO - Creating model exploration base components: End +> +> smlp_logger - INFO - Input and knob interface constraints are consistent +> +> smlp_logger - INFO - Building model terms: Start +> +> smlp_logger - INFO - Model operator counts for y1: {'add': 1, 'mul': 21, 'ite': 6, 'and': 14, 'prop': 20, 'const': 69, 'sub': 20, 'var': 20} +> +> smlp_logger - INFO - Model operator counts for y2: {'add': 1, 'mul': 21, 'ite': 6, 'and': 14, 'prop': 20, 'const': 69, 'sub': 20, 'var': 20} +> +> smlp_logger - INFO - Building model terms: End +> +> smlp_logger - INFO - Model interface constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt1: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt2: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying consistency of configuration for assertion asrt3: +> true +> +> smlp_logger - INFO - Input, knob and configuration constraints are consistent +> +> smlp_logger - INFO - Verifying assertion asrt1 <-> (y2**3+x2)/2<6 +> +> smlp_logger - INFO - The configuration is inconsistent with assertion asrt1 +> +> smlp_logger - INFO - Completed with result: FAIL +> +> smlp_logger - INFO - Verifying assertion asrt2 <-> y1>=9 +> +> smlp_logger - INFO - The configuration is consistent with assertion asrt2 +> +> smlp_logger - INFO - Completed with result: FAIL +> +> smlp_logger - INFO - Verifying assertion asrt3 <-> y2<0 +> +> smlp_logger - INFO - The configuration is inconsistent with assertion asrt3 +> +> smlp_logger - INFO - Completed with result: FAIL +> +> smlp_logger - INFO - Running SMLP in mode "verify": End +> +> smlp_logger - INFO - Executing run_smlp.py script: End +=================== End of Test77_test76_model.txt diff report ================================ +=================== Diff report for: Test77_test76_model_verify_results.json ================================== +diff: /home/mdmitry/github/smlp_python313/regr_smlp/code/Test77_test76_model_verify_results.json: No such file or directory +=================== End of Test77_test76_model_verify_results.json diff report ================================ +=================== Diff report for: test110_model_poly_sklearn_formula.txt ================================== +diff: /home/mdmitry/github/smlp_python313/regr_smlp/code/test110_model_poly_sklearn_formula.txt: No such file or directory +=================== End of test110_model_poly_sklearn_formula.txt diff report ================================ diff --git a/src/smlp_py/smlp_models.py b/src/smlp_py/smlp_models.py index 72b655f6..c012ac28 100644 --- a/src/smlp_py/smlp_models.py +++ b/src/smlp_py/smlp_models.py @@ -6,17 +6,19 @@ import pickle import json import os +from sys import version_info from sklearn.metrics import mean_squared_error, r2_score -from pycaret.regression import predict_model as caret_predict_model -from pycaret.regression import save_model as caret_save_model -from pycaret.regression import load_model as caret_load_model +if version_info.major < 4 and version_info.minor < 14: + from pycaret.regression import predict_model as caret_predict_model + from pycaret.regression import save_model as caret_save_model + from pycaret.regression import load_model as caret_load_model + from smlp_py.train_caret import ModelCaret from keras.models import load_model as keras_load_model from smlp_py.smlp_plots import evaluate_prediction from smlp_py.train_keras import ModelKeras -from smlp_py.train_caret import ModelCaret from smlp_py.train_sklearn import ModelSklearn from smlp_py.smlp_utils import str_to_bool @@ -75,18 +77,23 @@ def __init__(self): } self._instKeras = ModelKeras() self._instSklearn = ModelSklearn() - self._instCaret = ModelCaret() + if version_info.major < 4 and version_info.minor < 14: + self._instCaret = ModelCaret() + self._caret_dict = self._instCaret.get_caret_hparam_default_dict() self._sklearn_dict = self._instSklearn.get_sklearn_hparam_default_dict() - self._caret_dict = self._instCaret.get_caret_hparam_default_dict() self._keras_dict = self._instKeras.get_keras_hparam_default_dict() - self.model_params_dict = self._model_params_common_dict | self._keras_dict | self._sklearn_dict | self._caret_dict + if version_info.major < 4 and version_info.minor < 14: + self.model_params_dict = self._model_params_common_dict | self._keras_dict | self._sklearn_dict | self._caret_dict + else: + self.model_params_dict = self._model_params_common_dict | self._keras_dict | self._sklearn_dict # report_file_prefix is a string used as prefix in all report files of SMLP def set_report_file_prefix(self, report_file_prefix): self.report_file_prefix = report_file_prefix self._instKeras.report_file_prefix = report_file_prefix self._instSklearn.report_file_prefix = report_file_prefix - self._instCaret.report_file_prefix = report_file_prefix + if version_info.major < 4 and version_info.minor < 14: + self._instCaret.report_file_prefix = report_file_prefix # model_file_prefix is a string used as prefix in all outut files of SMLP that are used to # save a trained ML model and to re-run the model on new data (without need for re-training) @@ -94,7 +101,8 @@ def set_model_file_prefix(self, model_file_prefix): self.model_file_prefix = model_file_prefix self._instKeras.model_file_prefix = model_file_prefix self._instSklearn.model_file_prefix = model_file_prefix - self._instCaret.model_file_prefix = model_file_prefix + if version_info.major < 4 and version_info.minor < 14: + self._instCaret.model_file_prefix = model_file_prefix # required for generating file names of the reports containing model prediction results; # might cover multiple models (algorithms like NN, DT, RF) as well as multiple responses @@ -254,7 +262,8 @@ def _compute_sample_weights_vect(self, y_train, sw_coef, sw_exp, sw_int): def set_logger(self, logger): self._model_logger = logger self._instKeras.set_logger(logger) - self._instCaret.set_logger(logger) + if version_info.major < 4 and version_info.minor < 14: + self._instCaret.set_logger(logger) self._instSklearn.set_logger(logger) # generate out_dir/prefix_data_{train/test/labeled/new/}_prediction_precision.csv and diff --git a/src/smlp_py/smlp_terms.py b/src/smlp_py/smlp_terms.py index 5f5f02d6..a6dbdf9f 100644 --- a/src/smlp_py/smlp_terms.py +++ b/src/smlp_py/smlp_terms.py @@ -1308,22 +1308,21 @@ def _nn_dense_layer_terms(self, last_layer_terms, layer_weights, layer_biases, a return curr_layer_terms def _nn_keras_is_sequential(self, model): - try: - # v2.9 has this API - cl = keras.engine.sequential.Sequential - except AttributeError: - # v2.14+ has this API - cl = keras.src.engine.sequential.Sequential - return isinstance(model, cl) + """ + Check if a Keras model is Sequential. + For Keras 3.x versions. + """ + from keras.models import Sequential + return isinstance(model, Sequential) def _nn_keras_is_functional(self, model): - try: - # v2.9 has this API - cl = keras.engine.functional.Functional - except AttributeError: - # v2.14+ has this API - cl = keras.src.engine.functional.Functional - return isinstance(model, cl) + """ + Check if a Keras model is Functional. + For Keras 3.x versions. + """ + from keras.models import Model, Sequential + # Functional models are Model instances but not Sequential + return isinstance(model, Model) and not isinstance(model, Sequential) # determine the model type -- sequential vs functional def get_nn_keras_model_type(self, model): @@ -2286,7 +2285,23 @@ def declare_iternal_node_vars(model, resp_name, resp_names): continue else: curr_layer_nodes_count = getattr(layer, 'units', None) - assert curr_layer_nodes_count == len(list(layer.weights[1])); + + # Get weights properly using get_weights() method + # This returns [weight_matrix, bias_vector] if layer has bias, or [weight_matrix] if not + layer_weights_list = layer.get_weights() + + if len(layer_weights_list) >= 2: + # Layer has biases - use bias vector length + biases = layer_weights_list[1] + assert curr_layer_nodes_count == len(biases) + elif len(layer_weights_list) == 1: + # Layer has no biases - use weight matrix output dimension + weights_matrix = layer_weights_list[0] + assert curr_layer_nodes_count == weights_matrix.shape[1] + else: + # Layer has no weights at all - skip it + continue + for node in range(curr_layer_nodes_count): domain_dict[self._nnKerasTermsInst._nn_keras_node_name(resp_name, l, node)] = smlp.component(self.smlp_real) diff --git a/src/smlp_py/train_keras.py b/src/smlp_py/train_keras.py index cd4424f4..7774f71e 100644 --- a/src/smlp_py/train_keras.py +++ b/src/smlp_py/train_keras.py @@ -303,7 +303,16 @@ def _nn_init_model_functional(self, resp_names:list[str], input_dim:int, optimiz # Initialize the Functional model model = keras.Model(inputs=inputs, outputs=outputs) - model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics) + + # For multi-output models, metrics must be a dict or list of lists + if len(resp_names) > 1: + # Create metrics as a dict mapping each output name to the same metrics + metrics_dict = {resp: metrics for resp in resp_names} + model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics_dict) + else: + # Single output - metrics can be a simple list + model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics) + return model # function for comparing model configurations model.get_config() for sequential vs functional models @@ -364,17 +373,33 @@ def _log_model_summary(self, model, epochs, batch_size, sample_weights, callback # Print optimizer details, Learning rate, Loss function, metrics, model configuration, sample weights self._keras_logger.info("Optimizer: " + str(model.optimizer.get_config())) self._keras_logger.info("Learning rate: " + str(model.optimizer.learning_rate.numpy())) - if isinstance(model.loss, dict): # functiona API, and NN Keras Tuner is not used + if isinstance(model.loss, dict): # functional API, and NN Keras Tuner is not used self._keras_logger.info("Loss function: " + str(model.loss)) else: # sequential API or when NN Keras tuner is used for k, v in self._loss_functions.items(): if str(v) in str(model.loss) or str(k) in str(model.loss): self._keras_logger.info("Loss function: " + str(k)) - if hasattr(model, 'compiled_metrics'): - compiled_metrics = model.compiled_metrics._metrics # Access the private _metrics attribute - self._keras_logger.info("Metrics: " + str([m.name for m in compiled_metrics])) - else: - self._keras_logger.info("Metrics: " + str([])) + + # Fixed metrics logging - compatible with all TensorFlow versions + try: + if hasattr(model, 'compiled_metrics') and model.compiled_metrics is not None: + # Try to get metrics from the compiled_metrics object + if hasattr(model.compiled_metrics, '_metrics'): + # Older TensorFlow versions + compiled_metrics = model.compiled_metrics._metrics + self._keras_logger.info("Metrics: " + str([m.name for m in compiled_metrics])) + elif hasattr(model.compiled_metrics, 'metrics'): + # Newer TensorFlow versions - use public API + compiled_metrics = model.compiled_metrics.metrics + self._keras_logger.info("Metrics: " + str([m.name for m in compiled_metrics])) + else: + # Fallback to model.metrics + self._keras_logger.info("Metrics: " + str([m.name for m in model.metrics if hasattr(m, 'name')])) + else: + # No compiled_metrics available + self._keras_logger.info("Metrics: " + str([])) + except Exception as e: + self._keras_logger.warning(f"Could not retrieve metrics: {str(e)}") #self._keras_logger.info("Metrics: " + str(model.metrics)) self._keras_logger.info("Model configuration: " + str(model.get_config())) self._keras_logger.info("Epochs: " + str(epochs)) @@ -402,7 +427,7 @@ def round_model_weights(self, model:keras.Model, num_decimal_places:int): # Set the rounded weights back to the layer layer.set_weights(rounded_weights) - # train keras NN model + # train keras NN model - FIXED for symbolic tensor issues def _nn_train(self, model, epochs, batch_size, weights_precision, model_checkpoint_path, X_train, X_test, y_train, y_test, sample_weights_dict, sequential_api): checkpointer = None @@ -426,52 +451,84 @@ def _nn_train(self, model, epochs, batch_size, weights_precision, model_checkpoi lr=0.000001, factor=0.1, patience=100) callbacks = [c for c in (checkpointer,earlyStopping,rlrop) if c is not None] - # log model details - #self._log_model_summary(model, epochs, batch_size, sample_weights) + + # Convert DataFrames to numpy arrays to avoid symbolic tensor issues + X_train_array = X_train.to_numpy() if isinstance(X_train, pd.DataFrame) else np.array(X_train) + X_test_array = X_test.to_numpy() if isinstance(X_test, pd.DataFrame) else np.array(X_test) + y_train_array = y_train.to_numpy() if isinstance(y_train, pd.DataFrame) else np.array(y_train) + y_test_array = y_test.to_numpy() if isinstance(y_test, pd.DataFrame) else np.array(y_test) + # train model with sequential or functional API if sequential_api: #SEQUENTIAL_MODEL if sample_weights_dict is not None: sample_weights_df = pd.DataFrame.from_dict(sample_weights_dict) - sample_weights_vect = np.array(list(sample_weights_df.agg('mean', axis=1))) + sample_weights_vect = np.array(list(sample_weights_df.agg('mean', axis=1)), dtype=np.float32) else: sample_weights_vect = None # log model details self._log_model_summary(model, epochs, batch_size, sample_weights_vect, callbacks) - history = model.fit(X_train, y_train, + history = model.fit(X_train_array, y_train_array, epochs=epochs, - validation_data=(X_test, y_test), - #steps_per_epoch=10, + validation_data=(X_test_array, y_test_array), sample_weight=sample_weights_vect, callbacks=callbacks, - batch_size=batch_size) + batch_size=batch_size, + verbose=1) else: - ''' - # this code is for debugging only - sample_weights_df = pd.DataFrame.from_dict(sample_weights_dict) - sample_weights_vect = None if sample_weights_dict is None else np.array(list(sample_weights_df.agg('mean', axis=1))) - #for k in sample_weights_dict.keys(): - # sample_weights_dict[k] = sample_weights_vect - # log model details - self._log_model_summary(model, epochs, batch_size, sample_weights_vect, callbacks) - history = model.fit(X_train, y_train, - epochs=epochs, - validation_data=(X_test, y_test), - #steps_per_epoch=10, - sample_weight=sample_weights_vect, - callbacks=callbacks, #[c for c in (checkpointer,earlyStopping,rlrop) if c is not None], - batch_size=batch_size) - ''' + # CRITICAL FIX: For functional API with multiple outputs, sample_weight + # must be a LIST in the same order as outputs, NOT a dictionary + sample_weights_for_fit = None + if sample_weights_dict is not None: + # Get the output names from the model + output_names = [output.name.split('/')[0] for output in model.outputs] + + # Get the number of training samples + n_samples = len(X_train_array) + + # Create a list of sample weights in the same order as model outputs + sample_weights_list = [] + for output_name in output_names: + if output_name in sample_weights_dict: + weight_data = sample_weights_dict[output_name] + # Convert to numpy array with explicit dtype + if isinstance(weight_data, pd.Series): + sample_weights_list.append(weight_data.to_numpy().astype(np.float32)) + elif isinstance(weight_data, (list, np.ndarray)): + sample_weights_list.append(np.array(weight_data, dtype=np.float32)) + else: + sample_weights_list.append(weight_data) + else: + # If weight not provided for this output, use array of ones + # Keras does NOT accept None in a list, must be an actual array + sample_weights_list.append(np.ones(n_samples, dtype=np.float32)) + + sample_weights_for_fit = sample_weights_list + # log model details - self._log_model_summary(model, epochs, batch_size, sample_weights_dict, callbacks) - history = model.fit(X_train, y_train, + self._log_model_summary(model, epochs, batch_size, sample_weights_for_fit, callbacks) + + # For functional API, y_train should also be a list if multiple outputs + if len(model.outputs) > 1: + # Split y_train into list of arrays, one per output + if isinstance(y_train_array, np.ndarray) and y_train_array.ndim == 2: + y_train_list = [y_train_array[:, i:i+1] for i in range(y_train_array.shape[1])] + y_test_list = [y_test_array[:, i:i+1] for i in range(y_test_array.shape[1])] + else: + y_train_list = y_train_array + y_test_list = y_test_array + else: + y_train_list = y_train_array + y_test_list = y_test_array + + history = model.fit(X_train_array, y_train_list, epochs=epochs, - validation_data=(X_test, y_test), - #steps_per_epoch=10, - sample_weight=sample_weights_dict, + validation_data=(X_test_array, y_test_list), + sample_weight=sample_weights_for_fit, callbacks=callbacks, - batch_size=batch_size) - #''' + batch_size=batch_size, + verbose=1) + if weights_precision is not None: self.round_model_weights(model, int(weights_precision)) return history @@ -661,7 +718,7 @@ def search(self, X_train:pd.DataFrame, y_train:pd.DataFrame, X_val:pd.DataFrame, self._keras_logger.info('Best hyperparameters found: end') self._keras_logger.info('Tuning model hyperparameters using Keras Tuner algorithm ' + str(tuner_algo) + ': end') - # Fit / train model with tuned values of hyperparameters (obtained using Keras Tuner search() and strored within self) + # Fit / train model with tuned values of hyperparameters (obtained using Keras Tuner search() and stored within self) def get_best_model(self, X_train, X_test, y_train, y_test, epochs, weights_coef, batch_size, loss_function_str, learning_rate, sequential_api): best_hps = self.tuner.get_best_hyperparameters(num_trials=1)[0] best_model = self.tuner.hypermodel.build(best_hps) @@ -671,40 +728,70 @@ def get_best_model(self, X_train, X_test, y_train, y_test, epochs, weights_coef, best_batch_size = best_hps.get('batch_size') - ''' - # this code is for debugging - override_best_params = False - if override_best_params: - new_optimizer = keras.optimizers.Adam(learning_rate=learning_rate) - best_model.compile(optimizer=new_optimizer, loss=loss_function_str, metrics=self._DEF_METRICS) - history = best_model.fit( - x=X_train, - y=y_train, - epochs=epochs, - validation_data=(X_test, y_test), - batch_size=batch_size, - sample_weight=weights_coef, - callbacks=callbacks - ) - return best_model - ''' - if sequential_api: #SEQUENTIAL_MODEL + # Convert DataFrames to numpy arrays + X_train_array = X_train.to_numpy() if isinstance(X_train, pd.DataFrame) else X_train + X_test_array = X_test.to_numpy() if isinstance(X_test, pd.DataFrame) else X_test + y_train_array = y_train.to_numpy() if isinstance(y_train, pd.DataFrame) else y_train + y_test_array = y_test.to_numpy() if isinstance(y_test, pd.DataFrame) else y_test + + if sequential_api: if weights_coef is not None: sample_weights_df = pd.DataFrame.from_dict(weights_coef) - sample_weights = np.array(list(sample_weights_df.agg('mean', axis=1))) + sample_weights = np.array(list(sample_weights_df.agg('mean', axis=1)), dtype=np.float32) else: sample_weights = None else: - sample_weights = weights_coef + # CRITICAL FIX: For functional API with multiple outputs, sample_weight + # must be a LIST in the same order as outputs, NOT a dictionary + sample_weights = None + if weights_coef is not None: + # Get the output names from the model + output_names = [output.name.split('/')[0] for output in best_model.outputs] + + # Get the number of training samples + n_samples = len(X_train_array) + + # Create a list of sample weights in the same order as model outputs + sample_weights_list = [] + for output_name in output_names: + if output_name in weights_coef: + weight_data = weights_coef[output_name] + # Convert to numpy array with explicit dtype + if isinstance(weight_data, pd.Series): + sample_weights_list.append(weight_data.to_numpy().astype(np.float32)) + elif isinstance(weight_data, (list, np.ndarray)): + sample_weights_list.append(np.array(weight_data, dtype=np.float32)) + else: + sample_weights_list.append(weight_data) + else: + # If weight not provided for this output, use array of ones + # Keras does NOT accept None in a list, must be an actual array + sample_weights_list.append(np.ones(n_samples, dtype=np.float32)) + + sample_weights = sample_weights_list + + # For functional API with multiple outputs, y must also be a list + if not sequential_api and len(best_model.outputs) > 1: + # Split y_train into list of arrays, one per output + if isinstance(y_train_array, np.ndarray) and y_train_array.ndim == 2: + y_train_list = [y_train_array[:, i:i+1] for i in range(y_train_array.shape[1])] + y_test_list = [y_test_array[:, i:i+1] for i in range(y_test_array.shape[1])] + else: + y_train_list = y_train_array + y_test_list = y_test_array + else: + y_train_list = y_train_array + y_test_list = y_test_array history = best_model.fit( - x=X_train.to_numpy(), - y=y_train.to_numpy(), + x=X_train_array, + y=y_train_list, epochs=epochs, - validation_data=(X_test.to_numpy(), y_test.to_numpy()), + validation_data=(X_test_array, y_test_list), batch_size=best_batch_size, - sample_weight=sample_weights, #weights_coef, - callbacks=None #[keras.callbacks.EarlyStopping(patience=5)] + sample_weight=sample_weights, + callbacks=None, + verbose=1 ) return best_model, history