SCNet/05_network_parameter_optimization.ipynb
2022-06-13 00:51:34 +08:00

446 lines
36 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"# Network Parameter Optimization"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
},
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"shape of data:\n",
"x_train: (5728, 1, 102), y_train: (5728, 1),\n",
"x_val: (2455, 1, 102), y_val: (2455, 1)\n",
"x_test: (3508, 1, 102), y_test: (3508, 1)\n"
]
}
],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"from keras.models import load_model\n",
"from sklearn.metrics import r2_score, mean_squared_error\n",
"from sklearn.model_selection import train_test_split\n",
"from scipy.io import loadmat\n",
"from models import ShortCut11\n",
"from numpy.random import seed\n",
"import tensorflow\n",
"import time\n",
"seed(4750)\n",
"tensorflow.random.set_seed(4750)\n",
"time1 = time.time()\n",
"data = loadmat('./dataset/mango/mango_dm_split.mat')\n",
"x_train, y_train, x_test, y_test = data['x_train'], data['y_train'], data['x_test'], data['y_test']\n",
"x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.3, random_state=12, shuffle=True)\n",
"x_train, x_val, x_test = x_train[:, np.newaxis, :], x_val[:, np.newaxis, :], x_test[:, np.newaxis, :]\n",
"print(f\"shape of data:\\n\"\n",
" f\"x_train: {x_train.shape}, y_train: {y_train.shape},\\n\"\n",
" f\"x_val: {x_val.shape}, y_val: {y_val.shape}\\n\"\n",
" f\"x_test: {x_test.shape}, y_test: {y_test.shape}\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
},
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2022-05-28 22:54:57.730239: W tensorflow/core/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/1024\n",
"90/90 [==============================] - 1s 5ms/step - loss: 0.0262 - val_loss: 0.0274 - lr: 0.0025\n",
"Epoch 2/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0220 - val_loss: 0.0284 - lr: 0.0025\n",
"Epoch 3/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0166 - val_loss: 0.0279 - lr: 0.0025\n",
"Epoch 4/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0120 - val_loss: 0.0358 - lr: 0.0025\n",
"Epoch 5/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0103 - val_loss: 0.0847 - lr: 0.0025\n",
"Epoch 6/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0092 - val_loss: 0.1446 - lr: 0.0025\n",
"Epoch 7/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0085 - val_loss: 0.0410 - lr: 0.0025\n",
"Epoch 8/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0082 - val_loss: 0.2241 - lr: 0.0025\n",
"Epoch 9/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0076 - val_loss: 0.0755 - lr: 0.0025\n",
"Epoch 10/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0071 - val_loss: 0.2266 - lr: 0.0025\n",
"Epoch 11/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0066 - val_loss: 0.1989 - lr: 0.0025\n",
"Epoch 12/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0057 - val_loss: 0.0612 - lr: 0.0025\n",
"Epoch 13/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0053 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 14/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0051 - val_loss: 0.0494 - lr: 0.0025\n",
"Epoch 15/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0045 - val_loss: 0.0220 - lr: 0.0025\n",
"Epoch 16/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0048 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 17/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0045 - val_loss: 0.2282 - lr: 0.0025\n",
"Epoch 18/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0047 - val_loss: 0.2219 - lr: 0.0025\n",
"Epoch 19/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0044 - val_loss: 0.2074 - lr: 0.0025\n",
"Epoch 20/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0044 - val_loss: 0.1128 - lr: 0.0025\n",
"Epoch 21/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0043 - val_loss: 0.1590 - lr: 0.0025\n",
"Epoch 22/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0045 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 23/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0043 - val_loss: 0.1145 - lr: 0.0025\n",
"Epoch 24/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0041 - val_loss: 0.0923 - lr: 0.0025\n",
"Epoch 25/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0041 - val_loss: 0.2192 - lr: 0.0025\n",
"Epoch 26/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1295 - lr: 0.0025\n",
"Epoch 27/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.0876 - lr: 0.0025\n",
"Epoch 28/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.1489 - lr: 0.0025\n",
"Epoch 29/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1198 - lr: 0.0025\n",
"Epoch 30/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.2951 - lr: 0.0025\n",
"Epoch 31/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0043 - val_loss: 0.1440 - lr: 0.0025\n",
"Epoch 32/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0041 - val_loss: 0.2407 - lr: 0.0025\n",
"Epoch 33/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.2239 - lr: 0.0025\n",
"Epoch 34/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 35/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.1126 - lr: 0.0025\n",
"Epoch 36/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1264 - lr: 0.0025\n",
"Epoch 37/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.1036 - lr: 0.0025\n",
"Epoch 38/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.2206 - lr: 0.0025\n",
"Epoch 39/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0040 - val_loss: 0.1827 - lr: 0.0025\n",
"Epoch 40/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0039 - val_loss: 0.0397 - lr: 0.0025\n",
"Epoch 41/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0040 - val_loss: 0.1369 - lr: 0.0012\n",
"Epoch 42/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.1498 - lr: 0.0012\n",
"Epoch 43/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.0496 - lr: 0.0012\n",
"Epoch 44/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.0279 - lr: 0.0012\n",
"Epoch 45/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.2063 - lr: 0.0012\n",
"Epoch 46/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.2871 - lr: 0.0012\n",
"Epoch 47/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1589 - lr: 0.0012\n",
"Epoch 48/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.0689 - lr: 0.0012\n",
"Epoch 49/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0038 - val_loss: 0.2208 - lr: 0.0012\n",
"Epoch 50/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.0737 - lr: 0.0012\n",
"Epoch 51/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.1130 - lr: 0.0012\n",
"Epoch 52/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.1367 - lr: 0.0012\n",
"Epoch 53/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.2286 - lr: 0.0012\n",
"Epoch 54/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.1944 - lr: 0.0012\n",
"Epoch 55/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.0737 - lr: 0.0012\n",
"Epoch 56/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.2995 - lr: 0.0012\n",
"Epoch 57/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.1348 - lr: 0.0012\n",
"Epoch 58/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.0215 - lr: 0.0012\n",
"Epoch 59/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2241 - lr: 0.0012\n",
"Epoch 60/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1410 - lr: 0.0012\n",
"Epoch 61/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.3292 - lr: 0.0012\n",
"Epoch 62/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1807 - lr: 0.0012\n",
"Epoch 63/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.1881 - lr: 0.0012\n",
"Epoch 64/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2342 - lr: 0.0012\n",
"Epoch 65/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3063 - lr: 0.0012\n",
"Epoch 66/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.1621 - lr: 0.0012\n",
"Epoch 67/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3250 - lr: 0.0012\n",
"Epoch 68/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.1367 - lr: 0.0012\n",
"Epoch 69/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.0997 - lr: 0.0012\n",
"Epoch 70/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.1286 - lr: 0.0012\n",
"Epoch 71/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0901 - lr: 0.0012\n",
"Epoch 72/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2270 - lr: 0.0012\n",
"Epoch 73/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.0607 - lr: 0.0012\n",
"Epoch 74/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2278 - lr: 0.0012\n",
"Epoch 75/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3097 - lr: 0.0012\n",
"Epoch 76/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3258 - lr: 0.0012\n",
"Epoch 77/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.0706 - lr: 0.0012\n",
"Epoch 78/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2239 - lr: 0.0012\n",
"Epoch 79/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.0187 - lr: 0.0012\n",
"Epoch 80/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0448 - lr: 0.0012\n",
"Epoch 81/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.2271 - lr: 0.0012\n",
"Epoch 82/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.0075 - lr: 0.0012\n",
"Epoch 83/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.0744 - lr: 0.0012\n",
"Epoch 84/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0631 - lr: 0.0012\n",
"Epoch 85/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.3098 - lr: 0.0012\n",
"Epoch 86/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.3298 - lr: 0.0012\n",
"Epoch 87/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.1986 - lr: 0.0012\n",
"Epoch 88/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2268 - lr: 0.0012\n",
"Epoch 89/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2252 - lr: 0.0012\n",
"Epoch 90/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2258 - lr: 0.0012\n",
"Epoch 91/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3283 - lr: 0.0012\n",
"Epoch 92/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0965 - lr: 0.0012\n",
"Epoch 93/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3204 - lr: 0.0012\n",
"Epoch 94/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2080 - lr: 0.0012\n",
"Epoch 95/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3149 - lr: 0.0012\n",
"Epoch 96/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0237 - lr: 0.0012\n",
"Epoch 97/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2182 - lr: 0.0012\n",
"Epoch 98/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1821 - lr: 0.0012\n",
"Epoch 99/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2133 - lr: 0.0012\n",
"Epoch 100/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.0865 - lr: 0.0012\n",
"Epoch 101/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2045 - lr: 0.0012\n",
"Epoch 102/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1170 - lr: 0.0012\n",
"Epoch 103/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.2241 - lr: 0.0012\n",
"Epoch 104/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.3278 - lr: 0.0012\n",
"Epoch 105/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2543 - lr: 0.0012\n",
"Epoch 106/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1133 - lr: 0.0012\n",
"Epoch 107/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2277 - lr: 0.0012\n",
"Epoch 108/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.1973 - lr: 6.2500e-04\n",
"Epoch 109/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0904 - lr: 6.2500e-04\n",
"Epoch 110/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.3171 - lr: 6.2500e-04\n",
"Epoch 111/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0444 - lr: 6.2500e-04\n",
"Epoch 112/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.1970 - lr: 6.2500e-04\n",
"Epoch 113/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.1268 - lr: 6.2500e-04\n",
"Epoch 114/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.1479 - lr: 6.2500e-04\n",
"Epoch 115/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0683 - lr: 6.2500e-04\n",
"Epoch 116/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0963 - lr: 6.2500e-04\n",
"Epoch 117/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0153 - lr: 6.2500e-04\n",
"Epoch 118/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.1128 - lr: 6.2500e-04\n",
"Epoch 119/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.3028 - lr: 6.2500e-04\n",
"Epoch 120/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0946 - lr: 6.2500e-04\n",
"Epoch 121/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.3050 - lr: 6.2500e-04\n",
"Epoch 122/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.2079 - lr: 6.2500e-04\n",
"Epoch 123/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.2074 - lr: 6.2500e-04\n",
"Epoch 124/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0567 - lr: 6.2500e-04\n",
"Epoch 125/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.1866 - lr: 6.2500e-04\n",
"Epoch 126/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1740 - lr: 6.2500e-04\n",
"Epoch 127/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0270 - lr: 6.2500e-04\n",
"Epoch 128/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.0321 - lr: 6.2500e-04\n",
"Epoch 129/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1747 - lr: 6.2500e-04\n",
"Epoch 130/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0297 - lr: 6.2500e-04\n",
"Epoch 131/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0329 - lr: 6.2500e-04\n",
"Epoch 132/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0515 - lr: 6.2500e-04\n",
"Epoch 133/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.0786 - lr: 3.1250e-04\n",
"Epoch 134/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0983 - lr: 3.1250e-04\n",
"Epoch 135/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1133 - lr: 3.1250e-04\n",
"Epoch 136/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0323 - lr: 3.1250e-04\n",
"Epoch 137/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0029 - val_loss: 0.0484 - lr: 3.1250e-04\n",
"Epoch 138/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0828 - lr: 3.1250e-04\n",
"Epoch 139/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0304 - lr: 3.1250e-04\n",
"Epoch 140/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0792 - lr: 3.1250e-04\n",
"Epoch 141/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.2074 - lr: 3.1250e-04\n",
"Epoch 142/1024\n",
"83/90 [==========================>...] - ETA: 0s - loss: 0.0030"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
"\u001B[0;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
"\u001B[0;32m/var/folders/wh/kr5c3dr12834pfk3j7yqnrq40000gn/T/ipykernel_68464/326725923.py\u001B[0m in \u001B[0;36m<module>\u001B[0;34m\u001B[0m\n\u001B[1;32m 4\u001B[0m \u001B[0;32mfor\u001B[0m \u001B[0mi\u001B[0m \u001B[0;32min\u001B[0m \u001B[0mrange\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;36m2\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;36m1000\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 5\u001B[0m \u001B[0mmodel\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mShortCut11\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mnetwork_parameter\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mi\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0minput_shape\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;36m1\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;36m102\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m----> 6\u001B[0;31m \u001B[0mhistory_shortcut_11\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mmodel\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mfit\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mx_train\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0my_train\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mx_val\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0my_val\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mepoch\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mepoch\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mbatch_size\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mbatch_size\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0msave\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0;34m\"/tmp/temp.hdf5\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 7\u001B[0m \u001B[0mmodel\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mload_model\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m\"/tmp/temp.hdf5\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 8\u001B[0m \u001B[0my_pred\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mmodel\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mpredict\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mx_test\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mreshape\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m-\u001B[0m\u001B[0;36m1\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/PycharmProjects/sccnn/models.py\u001B[0m in \u001B[0;36mfit\u001B[0;34m(self, x, y, x_val, y_val, epoch, batch_size, save)\u001B[0m\n\u001B[1;32m 197\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 198\u001B[0m history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,\n\u001B[0;32m--> 199\u001B[0;31m callbacks=callbacks, batch_size=batch_size)\n\u001B[0m\u001B[1;32m 200\u001B[0m \u001B[0;32mreturn\u001B[0m \u001B[0mhistory\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 201\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/keras/utils/traceback_utils.py\u001B[0m in \u001B[0;36merror_handler\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 62\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 63\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 64\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mfn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwargs\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 65\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m \u001B[0;31m# pylint: disable=broad-except\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 66\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_process_traceback_frames\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0me\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__traceback__\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/keras/engine/training.py\u001B[0m in \u001B[0;36mfit\u001B[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001B[0m\n\u001B[1;32m 1214\u001B[0m _r=1):\n\u001B[1;32m 1215\u001B[0m \u001B[0mcallbacks\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mon_train_batch_begin\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mstep\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m-> 1216\u001B[0;31m \u001B[0mtmp_logs\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mtrain_function\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0miterator\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 1217\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mdata_handler\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mshould_sync\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 1218\u001B[0m \u001B[0mcontext\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0masync_wait\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py\u001B[0m in \u001B[0;36merror_handler\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 148\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 149\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 150\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mfn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwargs\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 151\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 152\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_process_traceback_frames\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0me\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__traceback__\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py\u001B[0m in \u001B[0;36m__call__\u001B[0;34m(self, *args, **kwds)\u001B[0m\n\u001B[1;32m 908\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 909\u001B[0m \u001B[0;32mwith\u001B[0m \u001B[0mOptionalXlaContext\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_jit_compile\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 910\u001B[0;31m \u001B[0mresult\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_call\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwds\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 911\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 912\u001B[0m \u001B[0mnew_tracing_count\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mexperimental_get_tracing_count\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py\u001B[0m in \u001B[0;36m_call\u001B[0;34m(self, *args, **kwds)\u001B[0m\n\u001B[1;32m 940\u001B[0m \u001B[0;31m# In this case we have created variables on the first call, so we run the\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 941\u001B[0m \u001B[0;31m# defunned version which is guaranteed to never create variables.\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 942\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_stateless_fn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwds\u001B[0m\u001B[0;34m)\u001B[0m \u001B[0;31m# pylint: disable=not-callable\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 943\u001B[0m \u001B[0;32melif\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_stateful_fn\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mnot\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 944\u001B[0m \u001B[0;31m# Release the lock early so that multiple threads can perform the call\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36m__call__\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 3128\u001B[0m (graph_function,\n\u001B[1;32m 3129\u001B[0m filtered_flat_args) = self._maybe_define_function(args, kwargs)\n\u001B[0;32m-> 3130\u001B[0;31m return graph_function._call_flat(\n\u001B[0m\u001B[1;32m 3131\u001B[0m filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access\n\u001B[1;32m 3132\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36m_call_flat\u001B[0;34m(self, args, captured_inputs, cancellation_manager)\u001B[0m\n\u001B[1;32m 1957\u001B[0m and executing_eagerly):\n\u001B[1;32m 1958\u001B[0m \u001B[0;31m# No tape is watching; skip to running the function.\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m-> 1959\u001B[0;31m return self._build_call_outputs(self._inference_function.call(\n\u001B[0m\u001B[1;32m 1960\u001B[0m ctx, args, cancellation_manager=cancellation_manager))\n\u001B[1;32m 1961\u001B[0m forward_backward = self._select_forward_and_backward_functions(\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36mcall\u001B[0;34m(self, ctx, args, cancellation_manager)\u001B[0m\n\u001B[1;32m 596\u001B[0m \u001B[0;32mwith\u001B[0m \u001B[0m_InterpolateFunctionError\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 597\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mcancellation_manager\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 598\u001B[0;31m outputs = execute.execute(\n\u001B[0m\u001B[1;32m 599\u001B[0m \u001B[0mstr\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0msignature\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mname\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m,\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 600\u001B[0m \u001B[0mnum_outputs\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_num_outputs\u001B[0m\u001B[0;34m,\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/execute.py\u001B[0m in \u001B[0;36mquick_execute\u001B[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001B[0m\n\u001B[1;32m 56\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 57\u001B[0m \u001B[0mctx\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mensure_initialized\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 58\u001B[0;31m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001B[0m\u001B[1;32m 59\u001B[0m inputs, attrs, num_outputs)\n\u001B[1;32m 60\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mcore\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_NotOkStatusException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;31mKeyboardInterrupt\u001B[0m: "
]
}
],
"source": [
"model_parameter_optimization = {\"neuron num\":[], \"r2\":[], \"rmse\":[]}\n",
"epoch, batch_size = 1024, 64\n",
"\n",
"for i in range(2, 500):\n",
" model = ShortCut11(network_parameter=i, input_shape=(1, 102))\n",
" history_shortcut_11 = model.fit(x_train, y_train, x_val, y_val, epoch=epoch, batch_size=batch_size, save=\"/tmp/temp.hdf5\")\n",
" model = load_model(\"/tmp/temp.hdf5\")\n",
" y_pred = model.predict(x_test).reshape((-1, ))\n",
" model_parameter_optimization['neuron num'].append(i)\n",
" model_parameter_optimization['r2'].append(r2_score(y_test, y_pred))\n",
" model_parameter_optimization['rmse'].append(mean_squared_error(y_test, y_pred))\n",
"pd.DataFrame(model_parameter_optimization).to_csv(\"./dataset/test_result.csv\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
},
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Deepo",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.0"
}
},
"nbformat": 4,
"nbformat_minor": 4
}