evaluation complete

This commit is contained in:
karllzy 2022-06-13 00:51:34 +08:00
parent b53a4d7e84
commit 879b8f7d71
9 changed files with 4229 additions and 2900 deletions

4
.gitignore vendored
View File

@ -1,6 +1,8 @@
preprocess/dataset/*
checkpoints/*
dataset/*
.idea
.DS_Store
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -14,7 +14,13 @@
},
{
"cell_type": "code",
"execution_count": 29,
"execution_count": 16,
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"import numpy as np\n",
@ -25,39 +31,39 @@
"from sklearn.metrics import mean_squared_error\n",
"import matplotlib.pyplot as plt\n",
"%matplotlib inline"
],
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"In this experiment, we load model weights from the experiment1 and evaluate them on test dataset."
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
},
"source": []
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "markdown",
"source": [
"In this experiment, we load model weights from the experiment1 and evaluate them on test dataset."
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
}
},
{
"cell_type": "markdown",
"source": [],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
}
},
{
"cell_type": "code",
"execution_count": 30,
},
"outputs": [
{
"name": "stdout",
@ -71,7 +77,10 @@
}
],
"source": [
"data = loadmat('./preprocess/dataset/mango/mango_dm_split.mat')\n",
"data = loadmat('./dataset/mango/mango_dm_split.mat')\n",
"\n",
"min_value, max_value = data['min_y'][-1][-1], data['max_y'][-1][-1]\n",
"retransform = lambda x: x * (max_value - min_value)\n",
"x_train, y_train, x_test, y_test = data['x_train'], data['y_train'], data['x_test'], data['y_test']\n",
"x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.3, random_state=12, shuffle=True)\n",
"x_train, x_val, x_test = x_train[:, np.newaxis, :], x_val[:, np.newaxis, :], x_test[:, np.newaxis, :]\n",
@ -79,75 +88,85 @@
" f\"x_train: {x_train.shape}, y_train: {y_train.shape},\\n\"\n",
" f\"x_val: {x_val.shape}, y_val: {y_val.shape}\\n\"\n",
" f\"x_test: {x_test.shape}, y_test: {y_test.shape}\")"
],
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"plain 5 mse : 0.05133910188824081\n",
"plain 5 Dry matter content error 0.7758644362065223\n",
"plain 5 r^2 : 0.902928516828363\n",
"plain 11 mse : 0.05200769624271875\n",
"plain 11 Dry matter content error 0.7859685978067217\n",
"plain 11 r^2 : 0.9003837097594369\n",
"shortcut 5 mse : 0.051382735052895194\n",
"shortcut 5 Dry matter content error 0.7765238443272209\n",
"shortcut 5 r^2 : 0.9027634443691182\n",
"shortcut11 mse : 0.05078784364469306\n",
"shortcut11 Dry matter content error 0.7675335217455442\n",
"shortcut11 r^2 : 0.9050019525259844\n"
]
}
],
"source": [
"from sklearn.metrics import r2_score\n",
"\n",
"## Build model and load weights\n",
"plain_5, plain_11 = load_model('./checkpoints/plain5.hdf5'), load_model('./checkpoints/plain11.hdf5')\n",
"shortcut5, shortcut11 = load_model('./checkpoints/shortcut5.hdf5'), load_model('./checkpoints/shortcut11.hdf5')\n",
"models = {'plain 5': plain_5, 'plain 11': plain_11, 'shortcut 5': shortcut5, 'shortcut11': shortcut11}\n",
"results = {model_name: model.predict(x_test).reshape((-1, )) for model_name, model in models.items()}\n",
"for model_name, model_result in results.items():\n",
" print(model_name, \" : \", mean_squared_error(y_test, model_result)*100, \"%\")"
],
" rmse = np.sqrt(mean_squared_error(y_test, model_result))\n",
" print(model_name, \"mse : \", rmse)\n",
" print(model_name, \"Dry matter content error\", retransform(rmse))\n",
" print(model_name, \"r^2 :\", r2_score(y_test, model_result))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
},
"execution_count": 31,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"plain 5 : 0.2707851525589865 %\n",
"plain 11 : 0.26240810192725905 %\n",
"shortcut 5 : 0.28330442301217196 %\n",
"shortcut11 : 0.25743312483685266 %\n"
]
}
]
},
{
"cell_type": "code",
"execution_count": 31,
"outputs": [],
"source": [],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
"source": []
}
],
"metadata": {
"interpreter": {
"hash": "7f619fc91ee8bdab81d49e7c14228037474662e3f2d607687ae505108922fa06"
},
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3.9.7 ('base')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,

152
04_model_comparision.ipynb Normal file
View File

@ -0,0 +1,152 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"collapsed": true,
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"# Model comparison"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"## PLS"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": [
"from sklearn.neural_network import MLPRegressor\n",
"from sklearn.svm import SVR\n",
"import numpy as np\n",
"from scipy.io import loadmat\n",
"from sklearn.cross_decomposition import PLSRegression\n",
"from sklearn.metrics import mean_squared_error, r2_score\n",
"\n",
"data = loadmat('./dataset/mango/mango_dm_split.mat')\n",
"min_value, max_value = data['min_y'][-1][-1], data['max_y'][-1][-1]\n",
"retransform = lambda x: x * (max_value - min_value)"
]
},
{
"cell_type": "code",
"execution_count": 56,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"shape of data:\n",
"x_train: (8183, 102), y_train: (8183, 1),\n",
"x_test: (3508, 102), y_test: (3508, 1)\n"
]
}
],
"source": [
"x_train, y_train, x_test, y_test = data['x_train'], data['y_train'], data['x_test'], data['y_test']\n",
"print(f\"shape of data:\\n\"\n",
" f\"x_train: {x_train.shape}, y_train: {y_train.shape},\\n\"\n",
" f\"x_test: {x_test.shape}, y_test: {y_test.shape}\")"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"PLS RMSE: 0.05722520296881164\n",
"PLS Dry matter content error 0.8648183977750965\n",
"PLS R^2: 0.8793937498230511\n",
"SVR RMSE: 0.1139650997574326\n",
"SVR Dry matter content error 1.7223025845485895\n",
"SVR R^2: 0.5216575965112935\n",
"MLP RMSE: 0.15508626630172465\n",
"MLP Dry matter content error 2.343748023280531\n",
"MLP R^2: 0.11418748397100065\n"
]
}
],
"source": [
"pls = PLSRegression(n_components=90)\n",
"svr = SVR(kernel=\"rbf\", degree=30, gamma=\"scale\")\n",
"mlp = MLPRegressor(hidden_layer_sizes=(60, 50, ))\n",
"pls = pls.fit(x_train, y_train.ravel())\n",
"svr = svr.fit(x_train, y_train.ravel())\n",
"mlp = mlp.fit(x_train, y_train.ravel())\n",
"\n",
"models = {'PLS': pls, \"SVR\": svr, \"MLP\": mlp}\n",
"results = {model_name: model.predict(x_test).reshape((-1, )) for model_name, model in models.items()}\n",
"for model_name, model_result in results.items():\n",
" rmse = np.sqrt(mean_squared_error(y_test, model_result))\n",
" print(model_name, \"RMSE: \", rmse)\n",
" print(model_name, \"Dry matter content error\", retransform(rmse))\n",
" print(model_name, \"R^2: \", r2_score(y_test, model_result))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": []
}
],
"metadata": {
"interpreter": {
"hash": "7f619fc91ee8bdab81d49e7c14228037474662e3f2d607687ae505108922fa06"
},
"kernelspec": {
"display_name": "Python 3.9.7 ('base')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@ -0,0 +1,446 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"# Network Parameter Optimization"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
},
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"shape of data:\n",
"x_train: (5728, 1, 102), y_train: (5728, 1),\n",
"x_val: (2455, 1, 102), y_val: (2455, 1)\n",
"x_test: (3508, 1, 102), y_test: (3508, 1)\n"
]
}
],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"from keras.models import load_model\n",
"from sklearn.metrics import r2_score, mean_squared_error\n",
"from sklearn.model_selection import train_test_split\n",
"from scipy.io import loadmat\n",
"from models import ShortCut11\n",
"from numpy.random import seed\n",
"import tensorflow\n",
"import time\n",
"seed(4750)\n",
"tensorflow.random.set_seed(4750)\n",
"time1 = time.time()\n",
"data = loadmat('./dataset/mango/mango_dm_split.mat')\n",
"x_train, y_train, x_test, y_test = data['x_train'], data['y_train'], data['x_test'], data['y_test']\n",
"x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.3, random_state=12, shuffle=True)\n",
"x_train, x_val, x_test = x_train[:, np.newaxis, :], x_val[:, np.newaxis, :], x_test[:, np.newaxis, :]\n",
"print(f\"shape of data:\\n\"\n",
" f\"x_train: {x_train.shape}, y_train: {y_train.shape},\\n\"\n",
" f\"x_val: {x_val.shape}, y_val: {y_val.shape}\\n\"\n",
" f\"x_test: {x_test.shape}, y_test: {y_test.shape}\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
},
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2022-05-28 22:54:57.730239: W tensorflow/core/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/1024\n",
"90/90 [==============================] - 1s 5ms/step - loss: 0.0262 - val_loss: 0.0274 - lr: 0.0025\n",
"Epoch 2/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0220 - val_loss: 0.0284 - lr: 0.0025\n",
"Epoch 3/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0166 - val_loss: 0.0279 - lr: 0.0025\n",
"Epoch 4/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0120 - val_loss: 0.0358 - lr: 0.0025\n",
"Epoch 5/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0103 - val_loss: 0.0847 - lr: 0.0025\n",
"Epoch 6/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0092 - val_loss: 0.1446 - lr: 0.0025\n",
"Epoch 7/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0085 - val_loss: 0.0410 - lr: 0.0025\n",
"Epoch 8/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0082 - val_loss: 0.2241 - lr: 0.0025\n",
"Epoch 9/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0076 - val_loss: 0.0755 - lr: 0.0025\n",
"Epoch 10/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0071 - val_loss: 0.2266 - lr: 0.0025\n",
"Epoch 11/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0066 - val_loss: 0.1989 - lr: 0.0025\n",
"Epoch 12/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0057 - val_loss: 0.0612 - lr: 0.0025\n",
"Epoch 13/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0053 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 14/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0051 - val_loss: 0.0494 - lr: 0.0025\n",
"Epoch 15/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0045 - val_loss: 0.0220 - lr: 0.0025\n",
"Epoch 16/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0048 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 17/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0045 - val_loss: 0.2282 - lr: 0.0025\n",
"Epoch 18/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0047 - val_loss: 0.2219 - lr: 0.0025\n",
"Epoch 19/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0044 - val_loss: 0.2074 - lr: 0.0025\n",
"Epoch 20/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0044 - val_loss: 0.1128 - lr: 0.0025\n",
"Epoch 21/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0043 - val_loss: 0.1590 - lr: 0.0025\n",
"Epoch 22/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0045 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 23/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0043 - val_loss: 0.1145 - lr: 0.0025\n",
"Epoch 24/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0041 - val_loss: 0.0923 - lr: 0.0025\n",
"Epoch 25/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0041 - val_loss: 0.2192 - lr: 0.0025\n",
"Epoch 26/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1295 - lr: 0.0025\n",
"Epoch 27/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.0876 - lr: 0.0025\n",
"Epoch 28/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.1489 - lr: 0.0025\n",
"Epoch 29/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1198 - lr: 0.0025\n",
"Epoch 30/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.2951 - lr: 0.0025\n",
"Epoch 31/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0043 - val_loss: 0.1440 - lr: 0.0025\n",
"Epoch 32/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0041 - val_loss: 0.2407 - lr: 0.0025\n",
"Epoch 33/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.2239 - lr: 0.0025\n",
"Epoch 34/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 35/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.1126 - lr: 0.0025\n",
"Epoch 36/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1264 - lr: 0.0025\n",
"Epoch 37/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.1036 - lr: 0.0025\n",
"Epoch 38/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.2206 - lr: 0.0025\n",
"Epoch 39/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0040 - val_loss: 0.1827 - lr: 0.0025\n",
"Epoch 40/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0039 - val_loss: 0.0397 - lr: 0.0025\n",
"Epoch 41/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0040 - val_loss: 0.1369 - lr: 0.0012\n",
"Epoch 42/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.1498 - lr: 0.0012\n",
"Epoch 43/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.0496 - lr: 0.0012\n",
"Epoch 44/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.0279 - lr: 0.0012\n",
"Epoch 45/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.2063 - lr: 0.0012\n",
"Epoch 46/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.2871 - lr: 0.0012\n",
"Epoch 47/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1589 - lr: 0.0012\n",
"Epoch 48/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.0689 - lr: 0.0012\n",
"Epoch 49/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0038 - val_loss: 0.2208 - lr: 0.0012\n",
"Epoch 50/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.0737 - lr: 0.0012\n",
"Epoch 51/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.1130 - lr: 0.0012\n",
"Epoch 52/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.1367 - lr: 0.0012\n",
"Epoch 53/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.2286 - lr: 0.0012\n",
"Epoch 54/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.1944 - lr: 0.0012\n",
"Epoch 55/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.0737 - lr: 0.0012\n",
"Epoch 56/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.2995 - lr: 0.0012\n",
"Epoch 57/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.1348 - lr: 0.0012\n",
"Epoch 58/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.0215 - lr: 0.0012\n",
"Epoch 59/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2241 - lr: 0.0012\n",
"Epoch 60/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1410 - lr: 0.0012\n",
"Epoch 61/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.3292 - lr: 0.0012\n",
"Epoch 62/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1807 - lr: 0.0012\n",
"Epoch 63/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.1881 - lr: 0.0012\n",
"Epoch 64/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2342 - lr: 0.0012\n",
"Epoch 65/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3063 - lr: 0.0012\n",
"Epoch 66/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.1621 - lr: 0.0012\n",
"Epoch 67/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3250 - lr: 0.0012\n",
"Epoch 68/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.1367 - lr: 0.0012\n",
"Epoch 69/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.0997 - lr: 0.0012\n",
"Epoch 70/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.1286 - lr: 0.0012\n",
"Epoch 71/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0901 - lr: 0.0012\n",
"Epoch 72/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2270 - lr: 0.0012\n",
"Epoch 73/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.0607 - lr: 0.0012\n",
"Epoch 74/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2278 - lr: 0.0012\n",
"Epoch 75/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3097 - lr: 0.0012\n",
"Epoch 76/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3258 - lr: 0.0012\n",
"Epoch 77/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.0706 - lr: 0.0012\n",
"Epoch 78/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2239 - lr: 0.0012\n",
"Epoch 79/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.0187 - lr: 0.0012\n",
"Epoch 80/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0448 - lr: 0.0012\n",
"Epoch 81/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.2271 - lr: 0.0012\n",
"Epoch 82/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.0075 - lr: 0.0012\n",
"Epoch 83/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.0744 - lr: 0.0012\n",
"Epoch 84/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0631 - lr: 0.0012\n",
"Epoch 85/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.3098 - lr: 0.0012\n",
"Epoch 86/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.3298 - lr: 0.0012\n",
"Epoch 87/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.1986 - lr: 0.0012\n",
"Epoch 88/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2268 - lr: 0.0012\n",
"Epoch 89/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2252 - lr: 0.0012\n",
"Epoch 90/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2258 - lr: 0.0012\n",
"Epoch 91/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3283 - lr: 0.0012\n",
"Epoch 92/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0965 - lr: 0.0012\n",
"Epoch 93/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3204 - lr: 0.0012\n",
"Epoch 94/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2080 - lr: 0.0012\n",
"Epoch 95/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3149 - lr: 0.0012\n",
"Epoch 96/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0237 - lr: 0.0012\n",
"Epoch 97/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2182 - lr: 0.0012\n",
"Epoch 98/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1821 - lr: 0.0012\n",
"Epoch 99/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2133 - lr: 0.0012\n",
"Epoch 100/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.0865 - lr: 0.0012\n",
"Epoch 101/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2045 - lr: 0.0012\n",
"Epoch 102/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1170 - lr: 0.0012\n",
"Epoch 103/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.2241 - lr: 0.0012\n",
"Epoch 104/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.3278 - lr: 0.0012\n",
"Epoch 105/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2543 - lr: 0.0012\n",
"Epoch 106/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1133 - lr: 0.0012\n",
"Epoch 107/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2277 - lr: 0.0012\n",
"Epoch 108/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.1973 - lr: 6.2500e-04\n",
"Epoch 109/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0904 - lr: 6.2500e-04\n",
"Epoch 110/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.3171 - lr: 6.2500e-04\n",
"Epoch 111/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0444 - lr: 6.2500e-04\n",
"Epoch 112/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.1970 - lr: 6.2500e-04\n",
"Epoch 113/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.1268 - lr: 6.2500e-04\n",
"Epoch 114/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.1479 - lr: 6.2500e-04\n",
"Epoch 115/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0683 - lr: 6.2500e-04\n",
"Epoch 116/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0963 - lr: 6.2500e-04\n",
"Epoch 117/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0153 - lr: 6.2500e-04\n",
"Epoch 118/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.1128 - lr: 6.2500e-04\n",
"Epoch 119/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.3028 - lr: 6.2500e-04\n",
"Epoch 120/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0946 - lr: 6.2500e-04\n",
"Epoch 121/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.3050 - lr: 6.2500e-04\n",
"Epoch 122/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.2079 - lr: 6.2500e-04\n",
"Epoch 123/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.2074 - lr: 6.2500e-04\n",
"Epoch 124/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0567 - lr: 6.2500e-04\n",
"Epoch 125/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.1866 - lr: 6.2500e-04\n",
"Epoch 126/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1740 - lr: 6.2500e-04\n",
"Epoch 127/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0270 - lr: 6.2500e-04\n",
"Epoch 128/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.0321 - lr: 6.2500e-04\n",
"Epoch 129/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1747 - lr: 6.2500e-04\n",
"Epoch 130/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0297 - lr: 6.2500e-04\n",
"Epoch 131/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0329 - lr: 6.2500e-04\n",
"Epoch 132/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0515 - lr: 6.2500e-04\n",
"Epoch 133/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.0786 - lr: 3.1250e-04\n",
"Epoch 134/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0983 - lr: 3.1250e-04\n",
"Epoch 135/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1133 - lr: 3.1250e-04\n",
"Epoch 136/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0323 - lr: 3.1250e-04\n",
"Epoch 137/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0029 - val_loss: 0.0484 - lr: 3.1250e-04\n",
"Epoch 138/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0828 - lr: 3.1250e-04\n",
"Epoch 139/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0304 - lr: 3.1250e-04\n",
"Epoch 140/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0792 - lr: 3.1250e-04\n",
"Epoch 141/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.2074 - lr: 3.1250e-04\n",
"Epoch 142/1024\n",
"83/90 [==========================>...] - ETA: 0s - loss: 0.0030"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
"\u001B[0;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
"\u001B[0;32m/var/folders/wh/kr5c3dr12834pfk3j7yqnrq40000gn/T/ipykernel_68464/326725923.py\u001B[0m in \u001B[0;36m<module>\u001B[0;34m\u001B[0m\n\u001B[1;32m 4\u001B[0m \u001B[0;32mfor\u001B[0m \u001B[0mi\u001B[0m \u001B[0;32min\u001B[0m \u001B[0mrange\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;36m2\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;36m1000\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 5\u001B[0m \u001B[0mmodel\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mShortCut11\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mnetwork_parameter\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mi\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0minput_shape\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;36m1\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;36m102\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m----> 6\u001B[0;31m \u001B[0mhistory_shortcut_11\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mmodel\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mfit\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mx_train\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0my_train\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mx_val\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0my_val\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mepoch\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mepoch\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mbatch_size\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mbatch_size\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0msave\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0;34m\"/tmp/temp.hdf5\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 7\u001B[0m \u001B[0mmodel\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mload_model\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m\"/tmp/temp.hdf5\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 8\u001B[0m \u001B[0my_pred\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mmodel\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mpredict\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mx_test\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mreshape\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m-\u001B[0m\u001B[0;36m1\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/PycharmProjects/sccnn/models.py\u001B[0m in \u001B[0;36mfit\u001B[0;34m(self, x, y, x_val, y_val, epoch, batch_size, save)\u001B[0m\n\u001B[1;32m 197\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 198\u001B[0m history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,\n\u001B[0;32m--> 199\u001B[0;31m callbacks=callbacks, batch_size=batch_size)\n\u001B[0m\u001B[1;32m 200\u001B[0m \u001B[0;32mreturn\u001B[0m \u001B[0mhistory\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 201\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/keras/utils/traceback_utils.py\u001B[0m in \u001B[0;36merror_handler\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 62\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 63\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 64\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mfn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwargs\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 65\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m \u001B[0;31m# pylint: disable=broad-except\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 66\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_process_traceback_frames\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0me\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__traceback__\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/keras/engine/training.py\u001B[0m in \u001B[0;36mfit\u001B[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001B[0m\n\u001B[1;32m 1214\u001B[0m _r=1):\n\u001B[1;32m 1215\u001B[0m \u001B[0mcallbacks\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mon_train_batch_begin\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mstep\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m-> 1216\u001B[0;31m \u001B[0mtmp_logs\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mtrain_function\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0miterator\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 1217\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mdata_handler\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mshould_sync\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 1218\u001B[0m \u001B[0mcontext\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0masync_wait\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py\u001B[0m in \u001B[0;36merror_handler\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 148\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 149\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 150\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mfn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwargs\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 151\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 152\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_process_traceback_frames\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0me\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__traceback__\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py\u001B[0m in \u001B[0;36m__call__\u001B[0;34m(self, *args, **kwds)\u001B[0m\n\u001B[1;32m 908\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 909\u001B[0m \u001B[0;32mwith\u001B[0m \u001B[0mOptionalXlaContext\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_jit_compile\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 910\u001B[0;31m \u001B[0mresult\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_call\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwds\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 911\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 912\u001B[0m \u001B[0mnew_tracing_count\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mexperimental_get_tracing_count\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py\u001B[0m in \u001B[0;36m_call\u001B[0;34m(self, *args, **kwds)\u001B[0m\n\u001B[1;32m 940\u001B[0m \u001B[0;31m# In this case we have created variables on the first call, so we run the\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 941\u001B[0m \u001B[0;31m# defunned version which is guaranteed to never create variables.\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 942\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_stateless_fn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwds\u001B[0m\u001B[0;34m)\u001B[0m \u001B[0;31m# pylint: disable=not-callable\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 943\u001B[0m \u001B[0;32melif\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_stateful_fn\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mnot\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 944\u001B[0m \u001B[0;31m# Release the lock early so that multiple threads can perform the call\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36m__call__\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 3128\u001B[0m (graph_function,\n\u001B[1;32m 3129\u001B[0m filtered_flat_args) = self._maybe_define_function(args, kwargs)\n\u001B[0;32m-> 3130\u001B[0;31m return graph_function._call_flat(\n\u001B[0m\u001B[1;32m 3131\u001B[0m filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access\n\u001B[1;32m 3132\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36m_call_flat\u001B[0;34m(self, args, captured_inputs, cancellation_manager)\u001B[0m\n\u001B[1;32m 1957\u001B[0m and executing_eagerly):\n\u001B[1;32m 1958\u001B[0m \u001B[0;31m# No tape is watching; skip to running the function.\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m-> 1959\u001B[0;31m return self._build_call_outputs(self._inference_function.call(\n\u001B[0m\u001B[1;32m 1960\u001B[0m ctx, args, cancellation_manager=cancellation_manager))\n\u001B[1;32m 1961\u001B[0m forward_backward = self._select_forward_and_backward_functions(\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36mcall\u001B[0;34m(self, ctx, args, cancellation_manager)\u001B[0m\n\u001B[1;32m 596\u001B[0m \u001B[0;32mwith\u001B[0m \u001B[0m_InterpolateFunctionError\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 597\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mcancellation_manager\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 598\u001B[0;31m outputs = execute.execute(\n\u001B[0m\u001B[1;32m 599\u001B[0m \u001B[0mstr\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0msignature\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mname\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m,\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 600\u001B[0m \u001B[0mnum_outputs\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_num_outputs\u001B[0m\u001B[0;34m,\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/execute.py\u001B[0m in \u001B[0;36mquick_execute\u001B[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001B[0m\n\u001B[1;32m 56\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 57\u001B[0m \u001B[0mctx\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mensure_initialized\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 58\u001B[0;31m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001B[0m\u001B[1;32m 59\u001B[0m inputs, attrs, num_outputs)\n\u001B[1;32m 60\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mcore\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_NotOkStatusException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;31mKeyboardInterrupt\u001B[0m: "
]
}
],
"source": [
"model_parameter_optimization = {\"neuron num\":[], \"r2\":[], \"rmse\":[]}\n",
"epoch, batch_size = 1024, 64\n",
"\n",
"for i in range(2, 500):\n",
" model = ShortCut11(network_parameter=i, input_shape=(1, 102))\n",
" history_shortcut_11 = model.fit(x_train, y_train, x_val, y_val, epoch=epoch, batch_size=batch_size, save=\"/tmp/temp.hdf5\")\n",
" model = load_model(\"/tmp/temp.hdf5\")\n",
" y_pred = model.predict(x_test).reshape((-1, ))\n",
" model_parameter_optimization['neuron num'].append(i)\n",
" model_parameter_optimization['r2'].append(r2_score(y_test, y_pred))\n",
" model_parameter_optimization['rmse'].append(mean_squared_error(y_test, y_pred))\n",
"pd.DataFrame(model_parameter_optimization).to_csv(\"./dataset/test_result.csv\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
},
"pycharm": {
"name": "#%%\n"
}
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Deepo",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.0"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -1,24 +1,25 @@
# SCNet: A deep learning network framework for analyzing near-infrared spectroscopy using short-cut
## Pre-processing
Since the method we proposed is a regression model, the classification dataset weat kernel is not used in this work.
The other three dataset (corn, marzipan, soil) were preprocessed manually with Matlab and saved in the sub dictionary of `./preprocess` dir. The original dataset of these three dataset were stored in the `./preprocess/dataset/`.
The other three dataset (corn, marzipan, soil) were preprocessed manually with Matlab and saved in the sub dictionary of `./dataset` dir. The original dataset of these three dataset were stored in the `./dataset/`. And the data are shared with google drive with this [link](https://drive.google.com/drive/folders/1RFREskNcI2sDv6p7lvLhxFRLUgVTwho6?usp=sharing)
The mango dataset is not in Matlab .m file format, so we save them with the `process.py`.
Meanwhile, we drop the useless part and only save the data between  684 and 900 nm.
All these datasets are available at this [link](https://drive.google.com/drive/folders/1RFREskNcI2sDv6p7lvLhxFRLUgVTwho6?usp=sharing)
Meanwhile, we drop the useless part and only save the data between 684 and 900 nm.
> The data set used in this study comprises a total of 11,691 NIR spectra (684990 nm in 3 nm sampling with a total 103 variables) and DM measurements performed on 4675 mango fruit across 4 harvest seasons 2015, 2016, 2017 and 2018 [24].
The detailed preprocessing progress can be found in [./preprocess.ipynb](./preprocess.ipynb)
The detailed preprocessing progress can be found in [./preprocess.ipynb](./01_preprocess.ipynb)
## Network Training
In order to show our network can prevent degration problem, we hold the experiment which contains the training loss curve of four models. The detailed information can be found in [model_training.ipynb](./model_training.ipynb).
In order to show our network can prevent degration problem, we hold the experiment which contains the training loss curve of four models. The detailed information can be found in [model_training.ipynb](./02_model_training.ipynb).
The training results were saved on the google drive, here is the [link](https://drive.google.com/drive/folders/1-p1SPg-6lt7i6NkgzUOf5GDhh0cDePsr?usp=sharing])
## Network evaluation
After training our model on training set, we evaluate the models on testing dataset that spared before. The evaluation is done with [model_evaluation.ipynb](model_evaluating.ipynb).
After training our model on training set, we evaluate the models on testing dataset that spared before. The evaluation is done with [model_evaluation.ipynb](03_model_evaluating.ipynb).

View File

@ -1,3 +1,4 @@
from tkinter import N
import keras.callbacks
import keras.layers as KL
from keras import Model
@ -9,7 +10,6 @@ class Plain5(object):
self.model = None
self.input_shape = input_shape
if model_path is not None:
# TODO: loading from the file
pass
else:
self.model = self.build_model()
@ -135,9 +135,10 @@ class ShortCut5(object):
class ShortCut11(object):
def __init__(self, model_path=None, input_shape=None):
def __init__(self, model_path=None, input_shape=None, network_parameter=200):
self.model = None
self.input_shape = input_shape
self.network_parameter = network_parameter
if model_path is not None:
# TODO: loading from the file
pass
@ -177,21 +178,25 @@ class ShortCut11(object):
fx3 = KL.Activation('relu')(x)
x = KL.Concatenate(axis=2)([x_raw, fx1, fx2, fx3])
x = KL.Dense(200, activation='relu', name='dense1')(x)
x = KL.Dense(self.network_parameter, activation='relu', name='dense1')(x)
x = KL.Dense(1, activation='sigmoid', name='output')(x)
model = Model(input_layer, x)
return model
def fit(self, x, y, x_val, y_val, epoch, batch_size):
def fit(self, x, y, x_val, y_val, epoch, batch_size, save='checkpoints/shortcut11.hdf5', is_show=True):
self.model.compile(loss='mse', optimizer=adam_v2.Adam(learning_rate=0.01 * (batch_size / 256)))
checkpoint = keras.callbacks.ModelCheckpoint(filepath='checkpoints/shortcut11.hdf5', monitor='val_loss',
mode="min", save_best_only=True)
callbacks = []
checkpoint = keras.callbacks.ModelCheckpoint(filepath=save, monitor='val_loss',
mode="min", save_best_only=True)
callbacks.append(checkpoint)
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-6,
patience=200, verbose=0, mode='auto')
lr_decay = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=25, min_delta=1e-6)
callbacks = [checkpoint, early_stop, lr_decay]
history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,
callbacks.append(early_stop)
callbacks.append(lr_decay)
verbose_num = 1 if is_show else 0
history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=verbose_num,
callbacks=callbacks, batch_size=batch_size)
return history
@ -257,8 +262,56 @@ class Plain11(object):
return history
class SimpleCNN(object):
def __init__(self, model_path=None, input_shape=None):
self.model = None
self.input_shape = input_shape
if model_path is not None:
pass
else:
self.model = self.build_model()
def build_model(self):
input_layer = KL.Input(self.input_shape, name='input')
x = KL.Conv1D(8, 7, padding='same', strides=3, name='Conv1')(input_layer)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', strides=3, name='Conv2')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', strides=1, name='Conv3')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 9, padding='same', strides=3, name='Conv4')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Dense(20, activation='relu', name='dense')(x)
x = KL.Dense(1, activation='sigmoid', name='output')(x)
model = Model(input_layer, x)
return model
def fit(self, x, y, x_val, y_val, epoch, batch_size):
self.model.compile(loss='mse', optimizer=adam_v2.Adam(learning_rate=0.01 * (batch_size / 256)))
checkpoint = keras.callbacks.ModelCheckpoint(filepath='checkpoints/plain5.hdf5', monitor='val_loss',
mode="min", save_best_only=True)
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=1000, verbose=0, mode='auto')
lr_decay = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=25, min_delta=1e-6)
callbacks = [checkpoint, early_stop, lr_decay]
history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,
callbacks=callbacks, batch_size=batch_size)
return history
if __name__ == '__main__':
# plain5 = Plain5(model_path=None, input_shape=(1, 102))
# plain11 = Plain11(model_path=None, input_shape=(1, 102))
residual5 = Residual5(model_path=None, input_shape=(1, 102))
short5 = ShortCut5(model_path=None, input_shape=(1, 102))
sample = SimpleCNN(model_path=None, input_shape=(1, 102))