repair commit

This commit is contained in:
karllzy 2022-05-28 23:03:14 +08:00
commit ec4296de9b
16 changed files with 8721 additions and 0 deletions

148
.gitignore vendored Normal file
View File

@ -0,0 +1,148 @@
dataset/*
checkpoints/*
.idea
.ipynb_checkpoints
.DS_Store
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
!/dataset/
!/dataset/

127
01_preprocess.ipynb Normal file
View File

@ -0,0 +1,127 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "dd2c8c55",
"metadata": {},
"source": [
"# Preprocessing"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "716880ac",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from sklearn.model_selection import train_test_split\n",
"from scipy.io import savemat, loadmat\n",
"import os"
]
},
{
"cell_type": "markdown",
"id": "4d7dc4a0",
"metadata": {},
"source": [
"## Step 1: \n",
"Convert the dataset to mat format for Matlab."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "711356a2",
"metadata": {},
"outputs": [],
"source": [
"dataset = pd.read_csv('preprocess/dataset/mango/NAnderson2020MendeleyMangoNIRData.csv')\n",
"y = dataset.DM\n",
"x = dataset.loc[:, '684': '990']\n",
"savemat('preprocess/dataset/mango/mango_origin.mat', {'x': x.values, 'y': y.values})"
]
},
{
"cell_type": "markdown",
"id": "3e41e8e6",
"metadata": {},
"source": []
},
{
"cell_type": "markdown",
"id": "ea5e54fd",
"metadata": {},
"source": [
"## Step3:\n",
"Data split with train test split."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "6eac026e",
"metadata": {},
"outputs": [],
"source": [
"data = loadmat('preprocess/dataset/mango/mango_preprocessed.mat')\n",
"x, y = data['x'], data['y']\n",
"x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=24)\n",
"if not os.path.exists('mango'):\n",
" os.makedirs('mango')\n",
"savemat('preprocess/dataset/mango/mango_dm_split.mat',{'x_train':x_train, 'y_train':y_train, 'x_test':x_test, 'y_test':y_test,\n",
" 'max_y': data['max_y'], 'min_y': data['min_y'],\n",
" 'min_x':data['min_x'], 'max_x':data['max_x']})"
]
},
{
"cell_type": "markdown",
"id": "b2977dae",
"metadata": {},
"source": [
"## Step 4:\n",
"Show data with pictures\n",
"use `draw_pics_origin` to draw original spectra\n",
"![img](./preprocess/pics/raw.png)"
]
},
{
"cell_type": "markdown",
"source": [
"use `draw_pics_preprocessed.m` to draw proprecessed spectra\n",
"![img](./preprocess/pics/preprocessed.png)"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
}
}
],
"metadata": {
"interpreter": {
"hash": "7f619fc91ee8bdab81d49e7c14228037474662e3f2d607687ae505108922fa06"
},
"kernelspec": {
"display_name": "Python 3.9.7 64-bit ('base': conda)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

7135
02_model_training.ipynb Normal file

File diff suppressed because one or more lines are too long

162
03_model_evaluating.ipynb Normal file
View File

@ -0,0 +1,162 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"collapsed": true,
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"# Experiment 2: Model Evaluating"
]
},
{
"cell_type": "code",
"execution_count": 11,
"outputs": [],
"source": [
"import numpy as np\n",
"from keras.models import load_model\n",
"from matplotlib import ticker\n",
"from scipy.io import loadmat\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import mean_squared_error\n",
"import matplotlib.pyplot as plt\n",
"%matplotlib inline"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "markdown",
"source": [
"In this experiment, we load model weights from the experiment1 and evaluate them on test dataset."
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
}
},
{
"cell_type": "markdown",
"source": [],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
}
},
{
"cell_type": "code",
"execution_count": 12,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"shape of data:\n",
"x_train: (5728, 1, 102), y_train: (5728, 1),\n",
"x_val: (2455, 1, 102), y_val: (2455, 1)\n",
"x_test: (3508, 1, 102), y_test: (3508, 1)\n"
]
}
],
"source": [
"data = loadmat('./dataset/mango/mango_dm_split.mat')\n",
"x_train, y_train, x_test, y_test = data['x_train'], data['y_train'], data['x_test'], data['y_test']\n",
"x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.3, random_state=12, shuffle=True)\n",
"x_train, x_val, x_test = x_train[:, np.newaxis, :], x_val[:, np.newaxis, :], x_test[:, np.newaxis, :]\n",
"print(f\"shape of data:\\n\"\n",
" f\"x_train: {x_train.shape}, y_train: {y_train.shape},\\n\"\n",
" f\"x_val: {x_val.shape}, y_val: {y_val.shape}\\n\"\n",
" f\"x_test: {x_test.shape}, y_test: {y_test.shape}\")"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"source": [
"from sklearn.metrics import r2_score\n",
"\n",
"## Build model and load weights\n",
"plain_5, plain_11 = load_model('./checkpoints/plain5.hdf5'), load_model('./checkpoints/plain11.hdf5')\n",
"shortcut5, shortcut11 = load_model('./checkpoints/shortcut5.hdf5'), load_model('./checkpoints/shortcut11.hdf5')\n",
"models = {'plain 5': plain_5, 'plain 11': plain_11, 'shortcut 5': shortcut5, 'shortcut11': shortcut11}\n",
"results = {model_name: model.predict(x_test).reshape((-1, )) for model_name, model in models.items()}\n",
"for model_name, model_result in results.items():\n",
" print(model_name, \" : \", (1 - mean_squared_error(y_test, model_result)/np.mean(y_test))*100, \"%\")\n",
" print(model_name, \":\", r2_score(y_test, model_result))"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
},
"execution_count": 13,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"plain 5 : 99.41745314181642 %\n",
"plain 5 : 0.902928516828363\n",
"plain 11 : 99.4021812070087 %\n",
"plain 11 : 0.9003837097594369\n",
"shortcut 5 : 99.41646250646849 %\n",
"shortcut 5 : 0.9027634443691182\n",
"shortcut11 : 99.42989627559609 %\n",
"shortcut11 : 0.9050019525259844\n"
]
}
]
},
{
"cell_type": "code",
"execution_count": 13,
"outputs": [],
"source": [],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

134
04_model_comparision.ipynb Normal file
View File

@ -0,0 +1,134 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"collapsed": true,
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"# Model comparison"
]
},
{
"cell_type": "markdown",
"source": [
"## PLS"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
}
},
{
"cell_type": "code",
"execution_count": 44,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"shape of data:\n",
"x_train: (8183, 102), y_train: (8183, 1),\n",
"x_test: (3508, 102), y_test: (3508, 1)\n"
]
}
],
"source": [
"from sklearn.neural_network import MLPRegressor\n",
"from sklearn.svm import SVR\n",
"import numpy as np\n",
"from scipy.io import loadmat\n",
"from sklearn.cross_decomposition import PLSRegression\n",
"from sklearn.metrics import mean_squared_error, r2_score\n",
"\n",
"data = loadmat('./dataset/mango/mango_dm_split.mat')\n",
"x_train, y_train, x_test, y_test = data['x_train'], data['y_train'], data['x_test'], data['y_test']\n",
"print(f\"shape of data:\\n\"\n",
" f\"x_train: {x_train.shape}, y_train: {y_train.shape},\\n\"\n",
" f\"x_test: {x_test.shape}, y_test: {y_test.shape}\")"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 45,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"PLS RMSE: 0.7512262994028881 %\n",
"PLS R^2: 0.8748209692384972\n",
"SVR RMSE: 2.870635692210643 %\n",
"SVR R^2: 0.5216575965112935\n",
"MLP RMSE: 4.919371298214537 %\n",
"MLP R^2: 0.18027080314424337\n"
]
}
],
"source": [
"pls = PLSRegression(n_components=20)\n",
"svr = SVR(kernel=\"rbf\", degree=30, gamma=\"scale\")\n",
"mlp = MLPRegressor(hidden_layer_sizes=(60, 50, ))\n",
"pls = pls.fit(x_train, y_train.ravel())\n",
"svr = svr.fit(x_train, y_train.ravel())\n",
"mlp = mlp.fit(x_train, y_train.ravel())\n",
"\n",
"models = {'PLS': pls, \"SVR\": svr, \"MLP\": mlp}\n",
"results = {model_name: model.predict(x_test).reshape((-1, )) for model_name, model in models.items()}\n",
"for model_name, model_result in results.items():\n",
" print(model_name, \"RMSE: \", mean_squared_error(y_test, model_result)/np.mean(y_test)*100, \"%\")\n",
" print(model_name, \"R^2: \", r2_score(y_test, model_result))"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 45,
"outputs": [],
"source": [],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@ -0,0 +1,438 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"collapsed": true,
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"# Network Parameter Optimization"
]
},
{
"cell_type": "code",
"execution_count": 2,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"shape of data:\n",
"x_train: (5728, 1, 102), y_train: (5728, 1),\n",
"x_val: (2455, 1, 102), y_val: (2455, 1)\n",
"x_test: (3508, 1, 102), y_test: (3508, 1)\n"
]
}
],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"from keras.models import load_model\n",
"from sklearn.metrics import r2_score, mean_squared_error\n",
"from sklearn.model_selection import train_test_split\n",
"from scipy.io import loadmat\n",
"from models import ShortCut11\n",
"from numpy.random import seed\n",
"import tensorflow\n",
"import time\n",
"seed(4750)\n",
"tensorflow.random.set_seed(4750)\n",
"time1 = time.time()\n",
"data = loadmat('./dataset/mango/mango_dm_split.mat')\n",
"x_train, y_train, x_test, y_test = data['x_train'], data['y_train'], data['x_test'], data['y_test']\n",
"x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.3, random_state=12, shuffle=True)\n",
"x_train, x_val, x_test = x_train[:, np.newaxis, :], x_val[:, np.newaxis, :], x_test[:, np.newaxis, :]\n",
"print(f\"shape of data:\\n\"\n",
" f\"x_train: {x_train.shape}, y_train: {y_train.shape},\\n\"\n",
" f\"x_val: {x_val.shape}, y_val: {y_val.shape}\\n\"\n",
" f\"x_test: {x_test.shape}, y_test: {y_test.shape}\")"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": 4,
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2022-05-28 22:54:57.730239: W tensorflow/core/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/1024\n",
"90/90 [==============================] - 1s 5ms/step - loss: 0.0262 - val_loss: 0.0274 - lr: 0.0025\n",
"Epoch 2/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0220 - val_loss: 0.0284 - lr: 0.0025\n",
"Epoch 3/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0166 - val_loss: 0.0279 - lr: 0.0025\n",
"Epoch 4/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0120 - val_loss: 0.0358 - lr: 0.0025\n",
"Epoch 5/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0103 - val_loss: 0.0847 - lr: 0.0025\n",
"Epoch 6/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0092 - val_loss: 0.1446 - lr: 0.0025\n",
"Epoch 7/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0085 - val_loss: 0.0410 - lr: 0.0025\n",
"Epoch 8/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0082 - val_loss: 0.2241 - lr: 0.0025\n",
"Epoch 9/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0076 - val_loss: 0.0755 - lr: 0.0025\n",
"Epoch 10/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0071 - val_loss: 0.2266 - lr: 0.0025\n",
"Epoch 11/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0066 - val_loss: 0.1989 - lr: 0.0025\n",
"Epoch 12/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0057 - val_loss: 0.0612 - lr: 0.0025\n",
"Epoch 13/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0053 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 14/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0051 - val_loss: 0.0494 - lr: 0.0025\n",
"Epoch 15/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0045 - val_loss: 0.0220 - lr: 0.0025\n",
"Epoch 16/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0048 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 17/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0045 - val_loss: 0.2282 - lr: 0.0025\n",
"Epoch 18/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0047 - val_loss: 0.2219 - lr: 0.0025\n",
"Epoch 19/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0044 - val_loss: 0.2074 - lr: 0.0025\n",
"Epoch 20/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0044 - val_loss: 0.1128 - lr: 0.0025\n",
"Epoch 21/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0043 - val_loss: 0.1590 - lr: 0.0025\n",
"Epoch 22/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0045 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 23/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0043 - val_loss: 0.1145 - lr: 0.0025\n",
"Epoch 24/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0041 - val_loss: 0.0923 - lr: 0.0025\n",
"Epoch 25/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0041 - val_loss: 0.2192 - lr: 0.0025\n",
"Epoch 26/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1295 - lr: 0.0025\n",
"Epoch 27/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.0876 - lr: 0.0025\n",
"Epoch 28/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.1489 - lr: 0.0025\n",
"Epoch 29/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1198 - lr: 0.0025\n",
"Epoch 30/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.2951 - lr: 0.0025\n",
"Epoch 31/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0043 - val_loss: 0.1440 - lr: 0.0025\n",
"Epoch 32/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0041 - val_loss: 0.2407 - lr: 0.0025\n",
"Epoch 33/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.2239 - lr: 0.0025\n",
"Epoch 34/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.2283 - lr: 0.0025\n",
"Epoch 35/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0040 - val_loss: 0.1126 - lr: 0.0025\n",
"Epoch 36/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0042 - val_loss: 0.1264 - lr: 0.0025\n",
"Epoch 37/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.1036 - lr: 0.0025\n",
"Epoch 38/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0039 - val_loss: 0.2206 - lr: 0.0025\n",
"Epoch 39/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0040 - val_loss: 0.1827 - lr: 0.0025\n",
"Epoch 40/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0039 - val_loss: 0.0397 - lr: 0.0025\n",
"Epoch 41/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0040 - val_loss: 0.1369 - lr: 0.0012\n",
"Epoch 42/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.1498 - lr: 0.0012\n",
"Epoch 43/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.0496 - lr: 0.0012\n",
"Epoch 44/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.0279 - lr: 0.0012\n",
"Epoch 45/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.2063 - lr: 0.0012\n",
"Epoch 46/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0038 - val_loss: 0.2871 - lr: 0.0012\n",
"Epoch 47/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1589 - lr: 0.0012\n",
"Epoch 48/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.0689 - lr: 0.0012\n",
"Epoch 49/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0038 - val_loss: 0.2208 - lr: 0.0012\n",
"Epoch 50/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.0737 - lr: 0.0012\n",
"Epoch 51/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.1130 - lr: 0.0012\n",
"Epoch 52/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.1367 - lr: 0.0012\n",
"Epoch 53/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.2286 - lr: 0.0012\n",
"Epoch 54/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0037 - val_loss: 0.1944 - lr: 0.0012\n",
"Epoch 55/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.0737 - lr: 0.0012\n",
"Epoch 56/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.2995 - lr: 0.0012\n",
"Epoch 57/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.1348 - lr: 0.0012\n",
"Epoch 58/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.0215 - lr: 0.0012\n",
"Epoch 59/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2241 - lr: 0.0012\n",
"Epoch 60/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1410 - lr: 0.0012\n",
"Epoch 61/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.3292 - lr: 0.0012\n",
"Epoch 62/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0037 - val_loss: 0.1807 - lr: 0.0012\n",
"Epoch 63/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.1881 - lr: 0.0012\n",
"Epoch 64/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2342 - lr: 0.0012\n",
"Epoch 65/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3063 - lr: 0.0012\n",
"Epoch 66/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.1621 - lr: 0.0012\n",
"Epoch 67/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3250 - lr: 0.0012\n",
"Epoch 68/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.1367 - lr: 0.0012\n",
"Epoch 69/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.0997 - lr: 0.0012\n",
"Epoch 70/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.1286 - lr: 0.0012\n",
"Epoch 71/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0901 - lr: 0.0012\n",
"Epoch 72/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2270 - lr: 0.0012\n",
"Epoch 73/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.0607 - lr: 0.0012\n",
"Epoch 74/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2278 - lr: 0.0012\n",
"Epoch 75/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3097 - lr: 0.0012\n",
"Epoch 76/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.3258 - lr: 0.0012\n",
"Epoch 77/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.0706 - lr: 0.0012\n",
"Epoch 78/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2239 - lr: 0.0012\n",
"Epoch 79/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.0187 - lr: 0.0012\n",
"Epoch 80/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0448 - lr: 0.0012\n",
"Epoch 81/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0036 - val_loss: 0.2271 - lr: 0.0012\n",
"Epoch 82/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.0075 - lr: 0.0012\n",
"Epoch 83/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.0744 - lr: 0.0012\n",
"Epoch 84/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0631 - lr: 0.0012\n",
"Epoch 85/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.3098 - lr: 0.0012\n",
"Epoch 86/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0036 - val_loss: 0.3298 - lr: 0.0012\n",
"Epoch 87/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.1986 - lr: 0.0012\n",
"Epoch 88/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.2268 - lr: 0.0012\n",
"Epoch 89/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2252 - lr: 0.0012\n",
"Epoch 90/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0035 - val_loss: 0.2258 - lr: 0.0012\n",
"Epoch 91/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3283 - lr: 0.0012\n",
"Epoch 92/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.0965 - lr: 0.0012\n",
"Epoch 93/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3204 - lr: 0.0012\n",
"Epoch 94/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2080 - lr: 0.0012\n",
"Epoch 95/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.3149 - lr: 0.0012\n",
"Epoch 96/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0237 - lr: 0.0012\n",
"Epoch 97/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0034 - val_loss: 0.2182 - lr: 0.0012\n",
"Epoch 98/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1821 - lr: 0.0012\n",
"Epoch 99/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2133 - lr: 0.0012\n",
"Epoch 100/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.0865 - lr: 0.0012\n",
"Epoch 101/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2045 - lr: 0.0012\n",
"Epoch 102/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1170 - lr: 0.0012\n",
"Epoch 103/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.2241 - lr: 0.0012\n",
"Epoch 104/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.3278 - lr: 0.0012\n",
"Epoch 105/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2543 - lr: 0.0012\n",
"Epoch 106/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0035 - val_loss: 0.1133 - lr: 0.0012\n",
"Epoch 107/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0034 - val_loss: 0.2277 - lr: 0.0012\n",
"Epoch 108/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.1973 - lr: 6.2500e-04\n",
"Epoch 109/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0904 - lr: 6.2500e-04\n",
"Epoch 110/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.3171 - lr: 6.2500e-04\n",
"Epoch 111/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0444 - lr: 6.2500e-04\n",
"Epoch 112/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0033 - val_loss: 0.1970 - lr: 6.2500e-04\n",
"Epoch 113/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.1268 - lr: 6.2500e-04\n",
"Epoch 114/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.1479 - lr: 6.2500e-04\n",
"Epoch 115/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0683 - lr: 6.2500e-04\n",
"Epoch 116/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0963 - lr: 6.2500e-04\n",
"Epoch 117/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0153 - lr: 6.2500e-04\n",
"Epoch 118/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.1128 - lr: 6.2500e-04\n",
"Epoch 119/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.3028 - lr: 6.2500e-04\n",
"Epoch 120/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0946 - lr: 6.2500e-04\n",
"Epoch 121/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.3050 - lr: 6.2500e-04\n",
"Epoch 122/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.2079 - lr: 6.2500e-04\n",
"Epoch 123/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.2074 - lr: 6.2500e-04\n",
"Epoch 124/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0567 - lr: 6.2500e-04\n",
"Epoch 125/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.1866 - lr: 6.2500e-04\n",
"Epoch 126/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1740 - lr: 6.2500e-04\n",
"Epoch 127/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0033 - val_loss: 0.0270 - lr: 6.2500e-04\n",
"Epoch 128/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0032 - val_loss: 0.0321 - lr: 6.2500e-04\n",
"Epoch 129/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1747 - lr: 6.2500e-04\n",
"Epoch 130/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0297 - lr: 6.2500e-04\n",
"Epoch 131/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0032 - val_loss: 0.0329 - lr: 6.2500e-04\n",
"Epoch 132/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0031 - val_loss: 0.0515 - lr: 6.2500e-04\n",
"Epoch 133/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.0786 - lr: 3.1250e-04\n",
"Epoch 134/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0983 - lr: 3.1250e-04\n",
"Epoch 135/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0030 - val_loss: 0.1133 - lr: 3.1250e-04\n",
"Epoch 136/1024\n",
"90/90 [==============================] - 0s 2ms/step - loss: 0.0031 - val_loss: 0.0323 - lr: 3.1250e-04\n",
"Epoch 137/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0029 - val_loss: 0.0484 - lr: 3.1250e-04\n",
"Epoch 138/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0828 - lr: 3.1250e-04\n",
"Epoch 139/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0304 - lr: 3.1250e-04\n",
"Epoch 140/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.0792 - lr: 3.1250e-04\n",
"Epoch 141/1024\n",
"90/90 [==============================] - 0s 1ms/step - loss: 0.0030 - val_loss: 0.2074 - lr: 3.1250e-04\n",
"Epoch 142/1024\n",
"83/90 [==========================>...] - ETA: 0s - loss: 0.0030"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
"\u001B[0;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
"\u001B[0;32m/var/folders/wh/kr5c3dr12834pfk3j7yqnrq40000gn/T/ipykernel_68464/326725923.py\u001B[0m in \u001B[0;36m<module>\u001B[0;34m\u001B[0m\n\u001B[1;32m 4\u001B[0m \u001B[0;32mfor\u001B[0m \u001B[0mi\u001B[0m \u001B[0;32min\u001B[0m \u001B[0mrange\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;36m2\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;36m1000\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 5\u001B[0m \u001B[0mmodel\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mShortCut11\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mnetwork_parameter\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mi\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0minput_shape\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;36m1\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;36m102\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m----> 6\u001B[0;31m \u001B[0mhistory_shortcut_11\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mmodel\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mfit\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mx_train\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0my_train\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mx_val\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0my_val\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mepoch\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mepoch\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mbatch_size\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mbatch_size\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0msave\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0;34m\"/tmp/temp.hdf5\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 7\u001B[0m \u001B[0mmodel\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mload_model\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m\"/tmp/temp.hdf5\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 8\u001B[0m \u001B[0my_pred\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mmodel\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mpredict\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mx_test\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mreshape\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m-\u001B[0m\u001B[0;36m1\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/PycharmProjects/sccnn/models.py\u001B[0m in \u001B[0;36mfit\u001B[0;34m(self, x, y, x_val, y_val, epoch, batch_size, save)\u001B[0m\n\u001B[1;32m 197\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 198\u001B[0m history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,\n\u001B[0;32m--> 199\u001B[0;31m callbacks=callbacks, batch_size=batch_size)\n\u001B[0m\u001B[1;32m 200\u001B[0m \u001B[0;32mreturn\u001B[0m \u001B[0mhistory\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 201\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/keras/utils/traceback_utils.py\u001B[0m in \u001B[0;36merror_handler\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 62\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 63\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 64\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mfn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwargs\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 65\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m \u001B[0;31m# pylint: disable=broad-except\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 66\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_process_traceback_frames\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0me\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__traceback__\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/keras/engine/training.py\u001B[0m in \u001B[0;36mfit\u001B[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001B[0m\n\u001B[1;32m 1214\u001B[0m _r=1):\n\u001B[1;32m 1215\u001B[0m \u001B[0mcallbacks\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mon_train_batch_begin\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mstep\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m-> 1216\u001B[0;31m \u001B[0mtmp_logs\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mtrain_function\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0miterator\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 1217\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mdata_handler\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mshould_sync\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 1218\u001B[0m \u001B[0mcontext\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0masync_wait\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py\u001B[0m in \u001B[0;36merror_handler\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 148\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 149\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 150\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mfn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwargs\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 151\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 152\u001B[0m \u001B[0mfiltered_tb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_process_traceback_frames\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0me\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__traceback__\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py\u001B[0m in \u001B[0;36m__call__\u001B[0;34m(self, *args, **kwds)\u001B[0m\n\u001B[1;32m 908\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 909\u001B[0m \u001B[0;32mwith\u001B[0m \u001B[0mOptionalXlaContext\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_jit_compile\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 910\u001B[0;31m \u001B[0mresult\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_call\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwds\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 911\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 912\u001B[0m \u001B[0mnew_tracing_count\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mexperimental_get_tracing_count\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py\u001B[0m in \u001B[0;36m_call\u001B[0;34m(self, *args, **kwds)\u001B[0m\n\u001B[1;32m 940\u001B[0m \u001B[0;31m# In this case we have created variables on the first call, so we run the\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 941\u001B[0m \u001B[0;31m# defunned version which is guaranteed to never create variables.\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 942\u001B[0;31m \u001B[0;32mreturn\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_stateless_fn\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m*\u001B[0m\u001B[0margs\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m**\u001B[0m\u001B[0mkwds\u001B[0m\u001B[0;34m)\u001B[0m \u001B[0;31m# pylint: disable=not-callable\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 943\u001B[0m \u001B[0;32melif\u001B[0m \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_stateful_fn\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mnot\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 944\u001B[0m \u001B[0;31m# Release the lock early so that multiple threads can perform the call\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36m__call__\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m 3128\u001B[0m (graph_function,\n\u001B[1;32m 3129\u001B[0m filtered_flat_args) = self._maybe_define_function(args, kwargs)\n\u001B[0;32m-> 3130\u001B[0;31m return graph_function._call_flat(\n\u001B[0m\u001B[1;32m 3131\u001B[0m filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access\n\u001B[1;32m 3132\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36m_call_flat\u001B[0;34m(self, args, captured_inputs, cancellation_manager)\u001B[0m\n\u001B[1;32m 1957\u001B[0m and executing_eagerly):\n\u001B[1;32m 1958\u001B[0m \u001B[0;31m# No tape is watching; skip to running the function.\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m-> 1959\u001B[0;31m return self._build_call_outputs(self._inference_function.call(\n\u001B[0m\u001B[1;32m 1960\u001B[0m ctx, args, cancellation_manager=cancellation_manager))\n\u001B[1;32m 1961\u001B[0m forward_backward = self._select_forward_and_backward_functions(\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/function.py\u001B[0m in \u001B[0;36mcall\u001B[0;34m(self, ctx, args, cancellation_manager)\u001B[0m\n\u001B[1;32m 596\u001B[0m \u001B[0;32mwith\u001B[0m \u001B[0m_InterpolateFunctionError\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 597\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mcancellation_manager\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 598\u001B[0;31m outputs = execute.execute(\n\u001B[0m\u001B[1;32m 599\u001B[0m \u001B[0mstr\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0msignature\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mname\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m,\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 600\u001B[0m \u001B[0mnum_outputs\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_num_outputs\u001B[0m\u001B[0;34m,\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;32m~/miniforge3/lib/python3.9/site-packages/tensorflow/python/eager/execute.py\u001B[0m in \u001B[0;36mquick_execute\u001B[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001B[0m\n\u001B[1;32m 56\u001B[0m \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 57\u001B[0m \u001B[0mctx\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mensure_initialized\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 58\u001B[0;31m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001B[0m\u001B[1;32m 59\u001B[0m inputs, attrs, num_outputs)\n\u001B[1;32m 60\u001B[0m \u001B[0;32mexcept\u001B[0m \u001B[0mcore\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_NotOkStatusException\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
"\u001B[0;31mKeyboardInterrupt\u001B[0m: "
]
}
],
"source": [
"model_parameter_optimization = {\"neuron num\":[], \"r2\":[], \"rmse\":[]}\n",
"epoch, batch_size = 1024, 64\n",
"\n",
"for i in range(2, 500):\n",
" model = ShortCut11(network_parameter=i, input_shape=(1, 102))\n",
" history_shortcut_11 = model.fit(x_train, y_train, x_val, y_val, epoch=epoch, batch_size=batch_size, save=\"/tmp/temp.hdf5\")\n",
" model = load_model(\"/tmp/temp.hdf5\")\n",
" y_pred = model.predict(x_test).reshape((-1, ))\n",
" model_parameter_optimization['neuron num'].append(i)\n",
" model_parameter_optimization['r2'].append(r2_score(y_test, y_pred))\n",
" model_parameter_optimization['rmse'].append(mean_squared_error(y_test, y_pred))\n",
"pd.DataFrame(model_parameter_optimization).to_csv(\"./dataset/test_result.csv\")"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%%\n"
}
}
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

21
README.md Normal file
View File

@ -0,0 +1,21 @@
# SCNet: A deep learning network framework for analyzing near-infrared spectroscopy using short-cut
## Pre-processing
Since the method we proposed is a regression model, the classification dataset weat kernel is not used in this work.
The other three dataset (corn, marzipan, soil) were preprocessed manually with Matlab and saved in the sub dictionary of `./preprocess` dir. The original dataset of these three dataset were stored in the `./preprocess/dataset/`.
The mango dataset is not in Matlab .m file format, so we save them with the `process.py`.
Meanwhile, we drop the useless part and only save the data between  684 and 900 nm.
> The data set used in this study comprises a total of 11,691 NIR spectra (684990 nm in 3 nm sampling with a total 103 variables) and DM measurements performed on 4675 mango fruit across 4 harvest seasons 2015, 2016, 2017 and 2018 [24].
The detailed preprocessing progress can be found in [./preprocess.ipynb](./01_preprocess.ipynb)
## Network Training
In order to show our network can prevent degration problem, we hold the experiment which contains the training loss curve of four models. The detailed information can be found in [model_training.ipynb](./02_model_training.ipynb).
## Network evaluation
After training our model on training set, we evaluate the models on testing dataset that spared before. The evaluation is done with [model_evaluation.ipynb](03_model_evaluating.ipynb).

272
models.py Normal file
View File

@ -0,0 +1,272 @@
import keras.callbacks
import keras.layers as KL
from keras import Model
from keras.optimizers import adam_v2
class Plain5(object):
def __init__(self, model_path=None, input_shape=None):
self.model = None
self.input_shape = input_shape
if model_path is not None:
pass
else:
self.model = self.build_model()
def build_model(self):
input_layer = KL.Input(self.input_shape, name='input')
x = KL.Conv1D(8, 3, padding='same', name='Conv1')(input_layer)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv2')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv3')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Dense(20, activation='relu', name='dense')(x)
x = KL.Dense(1, activation='sigmoid', name='output')(x)
model = Model(input_layer, x)
return model
def fit(self, x, y, x_val, y_val, epoch, batch_size):
self.model.compile(loss='mse', optimizer=adam_v2.Adam(learning_rate=0.01 * (batch_size / 256)))
checkpoint = keras.callbacks.ModelCheckpoint(filepath='checkpoints/plain5.hdf5', monitor='val_loss',
mode="min", save_best_only=True)
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=1000, verbose=0, mode='auto')
lr_decay = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=25, min_delta=1e-6)
callbacks = [checkpoint, early_stop, lr_decay]
history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,
callbacks=callbacks, batch_size=batch_size)
return history
class Residual5(object):
def __init__(self, model_path=None, input_shape=None):
self.model = None
self.input_shape = input_shape
if model_path is not None:
# TODO: loading from the file
pass
else:
self.model = self.build_model()
def build_model(self):
input_layer = KL.Input(self.input_shape, name='input')
fx = KL.Conv1D(8, 3, padding='same', name='Conv1')(input_layer)
fx = KL.BatchNormalization()(fx)
x = KL.Activation('relu')(fx)
fx = KL.Conv1D(8, 3, padding='same', name='Conv2')(x)
fx = KL.BatchNormalization()(fx)
fx = KL.Activation('relu')(fx)
x = fx + x
fx = KL.Conv1D(8, 3, padding='same', name='Conv3')(x)
fx = KL.BatchNormalization()(fx)
fx = KL.Activation('relu')(fx)
x = fx + x
x = KL.Dense(20, activation='relu', name='dense')(x)
x = KL.Dense(1, activation='sigmoid', name='output')(x)
model = Model(input_layer, x)
return model
def fit(self, x, y, x_val, y_val, epoch, batch_size):
self.model.compile(loss='mse', optimizer=adam_v2.Adam(learning_rate=0.01 * (batch_size / 256)))
checkpoint = keras.callbacks.ModelCheckpoint(filepath='checkpoints/res5.hdf5', monitor='val_loss',
mode="min", save_best_only=True)
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=1000, verbose=0, mode='auto')
lr_decay = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=25, min_delta=1e-6)
callbacks = [checkpoint, early_stop, lr_decay]
history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,
callbacks=callbacks, batch_size=batch_size)
return history
class ShortCut5(object):
def __init__(self, model_path=None, input_shape=None):
self.model = None
self.input_shape = input_shape
if model_path is not None:
# TODO: loading from the file
pass
else:
self.model = self.build_model()
def build_model(self):
input_layer = KL.Input(self.input_shape, name='input')
x_raw = KL.Conv1D(8, 3, padding='same', name='Conv1')(input_layer)
fx1 = KL.BatchNormalization()(x_raw)
fx1 = KL.Activation('relu')(fx1)
fx2 = KL.Conv1D(8, 3, padding='same', name='Conv2')(fx1)
fx2 = KL.BatchNormalization()(fx2)
fx2 = KL.Activation('relu')(fx2)
fx3 = KL.Conv1D(8, 3, padding='same', name='Conv3')(fx2)
fx3 = KL.BatchNormalization()(fx3)
fx3 = KL.Activation('relu')(fx3)
x = KL.Concatenate(axis=2)([x_raw, fx1, fx2, fx3])
x = KL.Dense(20, activation='relu', name='dense')(x)
x = KL.Dense(1, activation='sigmoid', name='output')(x)
model = Model(input_layer, x)
return model
def fit(self, x, y, x_val, y_val, epoch, batch_size):
self.model.compile(loss='mse', optimizer=adam_v2.Adam(learning_rate=0.01 * (batch_size / 256)))
checkpoint = keras.callbacks.ModelCheckpoint(filepath='checkpoints/shortcut5.hdf5', monitor='val_loss',
mode="min", save_best_only=True)
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=1000, verbose=0, mode='auto')
lr_decay = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=25, min_delta=1e-6)
callbacks = [checkpoint, early_stop, lr_decay]
history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,
callbacks=callbacks, batch_size=batch_size)
return history
class ShortCut11(object):
def __init__(self, model_path=None, input_shape=None, network_parameter=200):
self.model = None
self.input_shape = input_shape
self.network_parameter = network_parameter
if model_path is not None:
# TODO: loading from the file
pass
else:
self.model = self.build_model()
def build_model(self):
input_layer = KL.Input(self.input_shape, name='input')
x_raw = KL.Conv1D(8, 3, padding='same', name='Conv1_1')(input_layer)
x = KL.BatchNormalization()(x_raw)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv1_2')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv1_3')(x)
x = KL.BatchNormalization()(x)
fx1 = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv2_1')(fx1)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv2_2')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv2_3')(x)
x = KL.BatchNormalization()(x)
fx2 = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv3_1')(fx2)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv3_2')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv3_3')(x)
x = KL.BatchNormalization()(x)
fx3 = KL.Activation('relu')(x)
x = KL.Concatenate(axis=2)([x_raw, fx1, fx2, fx3])
x = KL.Dense(self.network_parameter, activation='relu', name='dense1')(x)
x = KL.Dense(1, activation='sigmoid', name='output')(x)
model = Model(input_layer, x)
return model
def fit(self, x, y, x_val, y_val, epoch, batch_size, save='checkpoints/shortcut11.hdf5'):
self.model.compile(loss='mse', optimizer=adam_v2.Adam(learning_rate=0.01 * (batch_size / 256)))
callbacks = []
checkpoint = keras.callbacks.ModelCheckpoint(filepath=save, monitor='val_loss',
mode="min", save_best_only=True)
callbacks.append(checkpoint)
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-6,
patience=200, verbose=0, mode='auto')
lr_decay = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=25, min_delta=1e-6)
callbacks.append(early_stop)
callbacks.append(lr_decay)
history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,
callbacks=callbacks, batch_size=batch_size)
return history
class Plain11(object):
def __init__(self, model_path=None, input_shape=None):
self.model = None
self.input_shape = input_shape
if model_path is not None:
# TODO: loading from the file
pass
else:
self.model = self.build_model()
def build_model(self):
input_layer = KL.Input(self.input_shape, name='input')
x = KL.Conv1D(8, 3, padding='same', name='Conv1_1')(input_layer)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv1_2')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv1_3')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv2_1')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv2_2')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv2_3')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv3_1')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv3_2')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Conv1D(8, 3, padding='same', name='Conv3_3')(x)
x = KL.BatchNormalization()(x)
x = KL.Activation('relu')(x)
x = KL.Dense(200, activation='relu', name='dense1')(x)
x = KL.Dense(1, activation='sigmoid', name='output')(x)
model = Model(input_layer, x)
return model
def fit(self, x, y, x_val, y_val, epoch, batch_size):
self.model.compile(loss='mse', optimizer=adam_v2.Adam(learning_rate=0.01 * (batch_size / 256)))
checkpoint = keras.callbacks.ModelCheckpoint(filepath='checkpoints/plain11.hdf5', monitor='val_loss',
mode="min", save_best_only=True)
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-6,
patience=200, verbose=0, mode='auto')
lr_decay = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=25, min_delta=1e-6)
callbacks = [checkpoint, early_stop, lr_decay]
history = self.model.fit(x, y, validation_data=(x_val, y_val), epochs=epoch, verbose=1,
callbacks=callbacks, batch_size=batch_size)
return history
if __name__ == '__main__':
# plain5 = Plain5(model_path=None, input_shape=(1, 102))
# plain11 = Plain11(model_path=None, input_shape=(1, 102))
residual5 = Residual5(model_path=None, input_shape=(1, 102))
short5 = ShortCut5(model_path=None, input_shape=(1, 102))

45
preprocess/draw_pics_origin.m Executable file
View File

@ -0,0 +1,45 @@
set(gca,'LooseInset',get(gca,'TightInset'))
f = figure;
f.Position(3:4) = [1331 331];
%%% draw the pic of corn spectra
load('dataset/corn.mat');
x = m5spec.data;
wave_length = m5spec.axisscale{2, 1};
subplot(1, 4, 1)
plot(wave_length, x');
xlim([wave_length(1) wave_length(end)]);
xlabel('Wavelength(nm)');
ylabel('Absorbance');
clear
%%% draw the pic of Marzipan spectra
load('dataset/marzipan.mat');
x = NIRS1;
wave_length = NIRS1_axis;
subplot(1, 4, 2)
plot(wave_length, x');
xlim([wave_length(1) wave_length(end)]);
xlabel('Wavelength(nm)');
ylabel('Absorbance');
clear
%%% draw the pic of Marzipan spectra
load('dataset/soil.mat');
x = soil.data;
wave_length = soil.axisscale{2, 1};
subplot(1, 4, 3)
plot(wave_length, x');
xlim([wave_length(1) wave_length(end)]);
xlabel('Wavelength(nm)');
ylabel('Absorbance');
clear
% draw the pic of Mango spectra
load('dataset/mango/mango_origin.mat');
wave_length = 684: 3: 990;
subplot(1, 4, 4)
plot(wave_length, x');
xlim([wave_length(1) wave_length(end)]);
xlabel('Wavelength(nm)');
ylabel('Signal intensity');
clear

View File

@ -0,0 +1,48 @@
set(gca,'LooseInset',get(gca,'TightInset'))
f = figure;
f.Position(3:4) = [1331 331];
%%% draw the pic of corn spectra
load('dataset/corn.mat');
x = m5spec.data;
wave_length = m5spec.axisscale{2, 1};
preprocess;
subplot(1, 4, 1)
plot(wave_length(1, 1:end-1), x');
xlim([wave_length(1) wave_length(end)]);
xlabel('Wavelength(nm)');
ylabel('Absorbance');
clear
%%% draw the pic of Marzipan spectra
load('dataset/marzipan.mat');
x = NIRS1;
wave_length = NIRS1_axis;
preprocess;
subplot(1, 4, 2)
plot(wave_length(1, 1:end-1), x');
xlim([wave_length(1) wave_length(end)]);
xlabel('Wavelength(nm)');
ylabel('Absorbance');
clear
%%% draw the pic of Marzipan spectra
load('dataset/soil.mat');
x = soil.data;
wave_length = soil.axisscale{2, 1};
preprocess;
subplot(1, 4, 3)
plot(wave_length(1, 1:end-1), x');
xlim([wave_length(1) wave_length(end)]);
xlabel('Wavelength(nm)');
ylabel('Absorbance');
clear
% draw the pic of Mango spectra
load('dataset/mango/mango_preprocessed.mat');
wave_length = 687: 3: 990;
subplot(1, 4, 4)
plot(wave_length, x');
xlim([wave_length(1) wave_length(end)]);
xlabel('Wavelength(nm)');
ylabel('Signal intensity');
clear

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

BIN
preprocess/pics/raw.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

8
preprocess/preprocess.m Executable file
View File

@ -0,0 +1,8 @@
%% x preprocessing
x = x';
x = sgolayfilt(x,2,17);
x =diff(x);
max_x=max(max(x));
min_x=min(min(x));
x=(x-min_x)/(max_x-min_x);
x = x';

15
preprocess/preprocess_mango.m Executable file
View File

@ -0,0 +1,15 @@
%% x preprocessing
clear;
load('dataset/mango/mango_origin.mat')
x = x';
x = sgolayfilt(x,2,17);
x =diff(x);
max_x=max(max(x));
min_x=min(min(x));
x=(x-min_x)/(max_x-min_x);
x = x';
y = y';
min_y = min(min(y));
max_y = max(max(y));
y = (y-min_y)/(max_y-min_y);
save('dataset/mango/mango_preprocessed.mat')

15
preprocess/train_test_split.m Executable file
View File

@ -0,0 +1,15 @@
data=[x,y];
test_rate = 0.3;
data_num = size(x, 1);
train_num = round((1-test_rate) * data_num);
idx=randperm(data_num);
train_idx=idx(1:train_num);
test_idx=idx(train_num+1:data_num);
data_train=data(train_idx,:);
x_train=data_train(:,1:size(x, 2));
y_train=data_train(:,size(x, 2)+1);
test_data=data(test_idx,:);
x_test=test_data(:,1:size(x, 2));
y_test=test_data(:,size(x, 2)+1);
clear data_num train_num idx train_idx test_idx test_data train_data x y;
clear data data_train test_rate;

153
utils.py Executable file
View File

@ -0,0 +1,153 @@
from scipy.io import loadmat
import numpy as np
from sklearn.model_selection import train_test_split
import os
import shutil
def load_data(data_path='./pine_water_cc.mat', validation_rate=0.25):
if data_path == './pine_water_cc.mat':
data = loadmat(data_path)
y_train, y_test = data['value_train'], data['value_test']
print('Value train shape: ', y_train.shape, 'Value test shape', y_test.shape)
y_max_value, y_min_value = data['value_max'], data['value_min']
x_train, x_test = data['DL_train'], data['DL_test']
elif data_path == './N_100_leaf_cc.mat':
data = loadmat(data_path)
y_train, y_test = data['y_train'], data['y_test']
x_train, x_test = data['x_train'], data['x_test']
y_max_value, y_min_value = data['max_y'], data['min_y']
x_train = np.expand_dims(x_train, axis=1)
x_test = np.expand_dims(x_test, axis=1)
x_validation, y_validation = x_test, y_test
return x_train, x_test, x_validation, y_train, y_test, y_validation, y_max_value, y_min_value
else:
data = loadmat(data_path)
y_train, y_test = data['y_train'], data['y_test']
x_train, x_test = data['x_train'], data['x_test']
y_max_value, y_min_value = data['max_y'], data['min_y']
x_train = np.expand_dims(x_train, axis=1)
x_test = np.expand_dims(x_test, axis=1)
print('SG17 DATA train shape: ', x_train.shape, 'SG17 DATA test shape', x_test.shape)
print('Mini value: %s, Max value %s.' % (y_min_value, y_max_value))
x_train, x_validation, y_train, y_validation = train_test_split(x_train, y_train, test_size=validation_rate,
random_state=8)
return x_train, x_test, x_validation, y_train, y_test, y_validation, y_max_value, y_min_value
def mkdir_if_not_exist(dir_name, is_delete=False):
"""
创建文件夹
:param dir_name: 文件夹
:param is_delete: 是否删除
:return: 是否成功
"""
try:
if is_delete:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
print('[Info] 文件夹 "%s" 存在, 删除文件夹.' % dir_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
print('[Info] 文件夹 "%s" 不存在, 创建文件夹.' % dir_name)
return True
except Exception as e:
print('[Exception] %s' % e)
return False
class Config:
def __init__(self):
# 数据有关的参数
self.validation_rate = 0.2
# 训练有关参数
self.train_epoch = 20000
self.batch_size = 20
# 是否训练的参数
self.train_cnn = True
self.train_ms_cnn = True
self.train_ms_sc_cnn = True
# 是否评估参数
self.evaluate_cnn = True
self.evaluate_ms_cnn = True
self.evaluate_ms_sc_cnn = True
# 要评估的保存好的模型列表
self.evaluate_cnn_name_list = []
self.evaluate_ms_cnn_name_list = []
self.evaluate_ms_sc_cnn_name_list = []
# 存储训练出的模型和图片的文件夹
self.img_dir = './pictures0331'
self.checkpoint_dir = './check_points0331'
# 数据集选择
self.data_set = './dataset_preprocess/corn/corn_mositure.mat'
def show_yourself(self, to_text_file=None):
line_width = 36
content = '\n'
# create line
line_text = 'Data Parameters'
line = '='*((line_width-len(line_text))//2) + line_text + '='*((line_width-len(line_text))//2)
line.ljust(line_width, '=')
content += line + '\n'
content += 'Validation Rate: ' + str(self.validation_rate) + '\n'
# create line
line_text = 'Training Parameters'
line = '=' * ((line_width - len(line_text)) // 2) + line_text + '=' * ((line_width - len(line_text)) // 2)
line.ljust(line_width, '=')
content += line + '\n'
content += 'Train CNN: ' + str(self.train_cnn) + '\n'
content += 'Train Ms CNN: ' + str(self.train_ms_cnn) + '\n'
content += 'Train Ms Sc CNN: ' + str(self.train_ms_sc_cnn) + '\n'
# create line
line_text = 'Evaluate Parameters'
line = '=' * ((line_width - len(line_text)) // 2) + line_text + '=' * ((line_width - len(line_text)) // 2)
line.ljust(line_width, '=')
content += line + '\n'
content += 'Train Epoch: ' + str(self.train_epoch) + '\n'
content += 'Train Batch Size: ' + str(self.batch_size) + '\n'
content += 'Evaluate CNN: ' + str(self.evaluate_cnn) + '\n'
if len(self.evaluate_cnn_name_list) >=1:
content += 'Saved CNNs to Evaluate:\n'
for models in self.evaluate_cnn_name_list:
content += models + '\n'
content += 'Evaluate Ms CNN: ' + str(self.evaluate_ms_cnn) + '\n'
if len(self.evaluate_ms_cnn_name_list) >= 1:
content += 'Saved Ms CNNs to Evaluate:\n'
for models in self.evaluate_ms_cnn_name_list:
content += models + '\n'
content += 'Evaluate Ms Sc CNN: ' + str(self.evaluate_ms_cnn) + '\n'
if len(self.evaluate_ms_sc_cnn_name_list) >= 1:
content += 'Saved Ms Sc CNNs to Evaluate:\n'
for models in self.evaluate_ms_sc_cnn_name_list:
content += models + '\n'
# create line
line_text = 'Saving Dir'
line = '=' * ((line_width - len(line_text)) // 2) + line_text + '=' * ((line_width - len(line_text)) // 2)
line.ljust(line_width, '=')
content += line + '\n'
content += 'Image Dir: ' + str(self.img_dir) + '\n'
content += 'Check Point Dir: ' + str(self.img_dir) + '\n'
print(content)
if to_text_file:
with open(to_text_file, 'w') as f:
f.write(content)
return content
if __name__ == '__main__':
config = Config()
config.show_yourself(to_text_file='name.txt')
x_train, x_test, x_validation, y_train, y_test, y_validation, y_max_value, y_min_value = \
load_data(data_path='./yaowan_calibrate.mat', validation_rate=0.25)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape, x_validation.shape, y_validation.shape,
y_max_value, y_min_value)