#libraries
import pandas as pd
import numpy as np
import datetime
from datetime import datetime
import csv
import os
from os import listdir
import json
import csv
import sys
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import RNN
#from keras.utils.np_utils import to_categorical
import keras.backend as K
from keras import regularizers,optimizers
from keras.models import load_model
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn import tree
#from sklearn.externals.six import StringIO
#import six
from six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
np.random.seed(2018)
2023-08-21 08:13:42.732112: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used. 2023-08-21 08:13:42.872680: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used. 2023-08-21 08:13:42.875091: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-08-21 08:13:45.095519: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
import pymysql
#establish the connection to the mysql database
host = "192.168.88.187"
port = "3306"
user = "backblaze"
password = "Testing.2023"
database = "backblaze_ml_full"
conn = pymysql.connect(
host=host,
port=int(3306),
user=user,
passwd=password,
db=database,
charset='utf8mb4')
sqldf = pd.read_sql_query("select date, serial_number, model, capacity_bytes, days_to_failure, failure, smart_1_normalized, smart_3_normalized, smart_5_normalized, smart_7_normalized, smart_9_normalized, smart_187_normalized, smart_189_normalized, smart_194_normalized, smart_197_normalized from drive_stats where date >= '2014-03-01' and serial_number in (select distinct(serial_number) from drive_stats where failure=1 and date >= '2014-03-01')", conn)
sqldf
/tmp/ipykernel_2178234/1261091465.py:1: UserWarning: pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy. sqldf = pd.read_sql_query("select date, serial_number, model, capacity_bytes, days_to_failure, failure, smart_1_normalized, smart_3_normalized, smart_5_normalized, smart_7_normalized, smart_9_normalized, smart_187_normalized, smart_189_normalized, smart_194_normalized, smart_197_normalized from drive_stats where date >= '2014-03-01' and serial_number in (select distinct(serial_number) from drive_stats where failure=1 and date >= '2014-03-01')", conn)
date | serial_number | model | capacity_bytes | days_to_failure | failure | smart_1_normalized | smart_3_normalized | smart_5_normalized | smart_7_normalized | smart_9_normalized | smart_187_normalized | smart_189_normalized | smart_194_normalized | smart_197_normalized | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2014-03-01 | MJ1311YNG36USA | Hitachi HDS5C3030ALA630 | 3000592982016 | 991 | 0 | 100.0 | 138.0 | 100.0 | 100.0 | 98.0 | NaN | NaN | 253.0 | 100.0 |
1 | 2014-03-01 | MJ1311YNG733NA | Hitachi HDS5C3030ALA630 | 3000592982016 | 840 | 0 | 100.0 | 100.0 | 100.0 | 100.0 | 98.0 | NaN | NaN | 250.0 | 100.0 |
2 | 2014-03-01 | W3009AX6 | ST4000DM000 | 4000787030016 | 54 | 0 | 119.0 | 91.0 | 100.0 | 87.0 | 93.0 | 100.0 | 99.0 | 26.0 | 100.0 |
3 | 2014-03-01 | WD-WCAV5M690585 | WDC WD10EADS | 1000204886016 | 409 | 0 | 200.0 | 191.0 | 200.0 | 100.0 | 68.0 | NaN | NaN | 127.0 | 200.0 |
4 | 2014-03-01 | S1F0CSW2 | ST3000DM001 | 3000592982016 | 229 | 0 | 114.0 | 92.0 | 100.0 | 89.0 | 84.0 | 100.0 | 100.0 | 23.0 | 100.0 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
14769522 | 2023-03-31 | 7LZ01G30 | Seagate BarraCuda SSD ZA250CM10002 | 250059350016 | 0 | 0 | 100.0 | NaN | NaN | NaN | 100.0 | NaN | NaN | 83.0 | NaN |
14769523 | 2023-03-31 | 9JG4657T | WDC WUH721414ALE6L4 | 14000519643136 | 0 | 0 | 100.0 | 85.0 | 100.0 | 100.0 | 98.0 | NaN | NaN | 55.0 | 100.0 |
14769524 | 2023-03-31 | 6090A00RFVKG | TOSHIBA MG08ACA16TA | 16000900661248 | 0 | 0 | 100.0 | 100.0 | 100.0 | 100.0 | 87.0 | NaN | NaN | 100.0 | 100.0 |
14769525 | 2023-03-31 | 51R0A2Q8FVGG | TOSHIBA MG08ACA16TE | 16000900661248 | 0 | 0 | 100.0 | 100.0 | 100.0 | 100.0 | 70.0 | NaN | NaN | 100.0 | 100.0 |
14769526 | 2023-03-31 | 7QT032NR | Seagate BarraCuda 120 SSD ZA250CM10003 | 250059350016 | 0 | 0 | 100.0 | NaN | NaN | NaN | 100.0 | NaN | NaN | 96.0 | NaN |
14769527 rows × 15 columns
def computeDay(group):
group = group.sort_values('date') #ordino in base ai giorni... dal più recente al meno
group['DayToFailure'] = list(range(group.shape[0]-1, -1,-1 ))
return group
#override the series_to_supervised method to work without classes
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = data.shape[1]
cols, names = list(), list()
dataclass = data[data.columns[-1:]]
data = data.drop(columns= ['serial_number', 'DayToFailure'], axis = 1)
columns = data.columns
# input sequence (t-n, ... t-1) #non arrivo all'osservazione corrente
for i in range(n_in-1, 0, -1):
cols.append(data.shift(i))
names += [(element + '(t-%d)' % (i)) for element in columns]
for i in range(0, n_out):
cols.append(data.shift(-i))
if i == 0:
names += [(element+'(t)') for element in columns]
else:
names += [(element +'(t+%d)' % (i)) for element in columns]
cols.append(dataclass) #appendo le ultime cinque colonne
names += ['DayToFailure']
agg = pd.concat(cols, axis=1)
agg.columns = names
if dropnan:
agg.dropna(inplace=True)
return agg
#Preprocessing
df = sqldf.copy()
df = df.drop('model', axis=1)
df = df.drop('capacity_bytes', axis=1)
df.date = pd.to_datetime(df.date, format='%Y-%m-%d').dt.date
scaler = MinMaxScaler(feature_range = (-1,1))
df[['smart_1_normalized', 'smart_3_normalized', 'smart_5_normalized', 'smart_7_normalized',
'smart_9_normalized', 'smart_187_normalized', 'smart_189_normalized', 'smart_194_normalized',
'smart_197_normalized']] = scaler.fit_transform(df[['smart_1_normalized', 'smart_3_normalized',
'smart_5_normalized', 'smart_7_normalized', 'smart_9_normalized', 'smart_187_normalized',
'smart_189_normalized', 'smart_194_normalized', 'smart_197_normalized']])
df
date | serial_number | days_to_failure | failure | smart_1_normalized | smart_3_normalized | smart_5_normalized | smart_7_normalized | smart_9_normalized | smart_187_normalized | smart_189_normalized | smart_194_normalized | smart_197_normalized | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2014-03-01 | MJ1311YNG36USA | 991 | 0 | -0.005025 | -0.256831 | -0.211155 | -0.211155 | -0.227092 | NaN | NaN | 1.000000 | -0.211155 |
1 | 2014-03-01 | MJ1311YNG733NA | 840 | 0 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.227092 | NaN | NaN | 0.975104 | -0.211155 |
2 | 2014-03-01 | W3009AX6 | 54 | 0 | 0.185930 | -0.770492 | -0.211155 | -0.314741 | -0.266932 | 1.0 | 0.979798 | -0.883817 | -0.211155 |
3 | 2014-03-01 | WD-WCAV5M690585 | 409 | 0 | 1.000000 | 0.322404 | 0.585657 | -0.211155 | -0.466135 | NaN | NaN | -0.045643 | 0.585657 |
4 | 2014-03-01 | S1F0CSW2 | 229 | 0 | 0.135678 | -0.759563 | -0.211155 | -0.298805 | -0.338645 | 1.0 | 1.000000 | -0.908714 | -0.211155 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
14769522 | 2023-03-31 | 7LZ01G30 | 0 | 0 | -0.005025 | NaN | NaN | NaN | -0.211155 | NaN | NaN | -0.410788 | NaN |
14769523 | 2023-03-31 | 9JG4657T | 0 | 0 | -0.005025 | -0.836066 | -0.211155 | -0.211155 | -0.227092 | NaN | NaN | -0.643154 | -0.211155 |
14769524 | 2023-03-31 | 6090A00RFVKG | 0 | 0 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.314741 | NaN | NaN | -0.269710 | -0.211155 |
14769525 | 2023-03-31 | 51R0A2Q8FVGG | 0 | 0 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.450199 | NaN | NaN | -0.269710 | -0.211155 |
14769526 | 2023-03-31 | 7QT032NR | 0 | 0 | -0.005025 | NaN | NaN | NaN | -0.211155 | NaN | NaN | -0.302905 | NaN |
14769527 rows × 13 columns
#cleanup garbage entries -> apparently there are entries which have a failure reported on a date
#and then they still report measurements after that date -> these need to be cleared
test=df.copy();
#test
test2 = pd.DataFrame({'serial_number':test.loc[test['failure'] == 1]['serial_number'], 'failure_date':test.loc[test['failure'] == 1]['date']})
#test2
test3 = test.join(test2.set_index('serial_number'), on='serial_number')
#test3
clean = test3.drop(test3[test3['date'] > test3['failure_date']].index)
clean = clean.drop('failure_date', axis=1)
clean
date | serial_number | days_to_failure | failure | smart_1_normalized | smart_3_normalized | smart_5_normalized | smart_7_normalized | smart_9_normalized | smart_187_normalized | smart_189_normalized | smart_194_normalized | smart_197_normalized | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2014-03-01 | MJ1311YNG36USA | 991 | 0 | -0.005025 | -0.256831 | -0.211155 | -0.211155 | -0.227092 | NaN | NaN | 1.000000 | -0.211155 |
1 | 2014-03-01 | MJ1311YNG733NA | 840 | 0 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.227092 | NaN | NaN | 0.975104 | -0.211155 |
2 | 2014-03-01 | W3009AX6 | 54 | 0 | 0.185930 | -0.770492 | -0.211155 | -0.314741 | -0.266932 | 1.000000 | 0.979798 | -0.883817 | -0.211155 |
3 | 2014-03-01 | WD-WCAV5M690585 | 409 | 0 | 1.000000 | 0.322404 | 0.585657 | -0.211155 | -0.466135 | NaN | NaN | -0.045643 | 0.585657 |
4 | 2014-03-01 | S1F0CSW2 | 229 | 0 | 0.135678 | -0.759563 | -0.211155 | -0.298805 | -0.338645 | 1.000000 | 1.000000 | -0.908714 | -0.211155 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
14769333 | 2023-03-30 | 2AGMNB7Y | 0 | 1 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.243028 | NaN | NaN | 0.078838 | -0.211155 |
14769335 | 2023-03-30 | 8HH0KRGH | 0 | 1 | -0.246231 | -0.672131 | -0.211155 | -0.211155 | -0.235060 | NaN | NaN | 0.278008 | -0.211155 |
14769341 | 2023-03-30 | ZLW16KEQ | 0 | 1 | -0.226131 | -0.781421 | -0.211155 | -0.362550 | -0.402390 | 0.979798 | NaN | -0.809129 | -0.211155 |
14769343 | 2023-03-30 | X0GE5KSC | 0 | 1 | -0.005025 | -0.737705 | -0.211155 | -0.211155 | -0.235060 | NaN | NaN | -0.551867 | -0.211155 |
14769346 | 2023-03-30 | 61B0A03NF97G | 0 | 1 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.498008 | NaN | NaN | -0.269710 | -0.211155 |
14442321 rows × 13 columns
df=clean.copy()
df = df.drop(columns= ['days_to_failure'], axis = 1)
df
date | serial_number | failure | smart_1_normalized | smart_3_normalized | smart_5_normalized | smart_7_normalized | smart_9_normalized | smart_187_normalized | smart_189_normalized | smart_194_normalized | smart_197_normalized | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 2014-03-01 | MJ1311YNG36USA | 0 | -0.005025 | -0.256831 | -0.211155 | -0.211155 | -0.227092 | NaN | NaN | 1.000000 | -0.211155 |
1 | 2014-03-01 | MJ1311YNG733NA | 0 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.227092 | NaN | NaN | 0.975104 | -0.211155 |
2 | 2014-03-01 | W3009AX6 | 0 | 0.185930 | -0.770492 | -0.211155 | -0.314741 | -0.266932 | 1.000000 | 0.979798 | -0.883817 | -0.211155 |
3 | 2014-03-01 | WD-WCAV5M690585 | 0 | 1.000000 | 0.322404 | 0.585657 | -0.211155 | -0.466135 | NaN | NaN | -0.045643 | 0.585657 |
4 | 2014-03-01 | S1F0CSW2 | 0 | 0.135678 | -0.759563 | -0.211155 | -0.298805 | -0.338645 | 1.000000 | 1.000000 | -0.908714 | -0.211155 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
14769333 | 2023-03-30 | 2AGMNB7Y | 1 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.243028 | NaN | NaN | 0.078838 | -0.211155 |
14769335 | 2023-03-30 | 8HH0KRGH | 1 | -0.246231 | -0.672131 | -0.211155 | -0.211155 | -0.235060 | NaN | NaN | 0.278008 | -0.211155 |
14769341 | 2023-03-30 | ZLW16KEQ | 1 | -0.226131 | -0.781421 | -0.211155 | -0.362550 | -0.402390 | 0.979798 | NaN | -0.809129 | -0.211155 |
14769343 | 2023-03-30 | X0GE5KSC | 1 | -0.005025 | -0.737705 | -0.211155 | -0.211155 | -0.235060 | NaN | NaN | -0.551867 | -0.211155 |
14769346 | 2023-03-30 | 61B0A03NF97G | 1 | -0.005025 | -0.672131 | -0.211155 | -0.211155 | -0.498008 | NaN | NaN | -0.269710 | -0.211155 |
14442321 rows × 12 columns
dfHour = df.groupby(['serial_number']).apply(computeDay)
dfHour = dfHour[dfHour.DayToFailure <= 45]
dfHour = dfHour.drop(columns = ['date'])
dfHour= dfHour.drop(columns= ['failure','serial_number'], axis=1)
dfHour=dfHour.reset_index()
dfHour= dfHour.drop(columns= ['level_1'], axis=1)
window=15
print('Creating the sequence')
dfHourSequence = dfHour.groupby(['serial_number']).apply(series_to_supervised, n_in=window, n_out=1, dropnan=True)
dfHourSequence
Creating the sequence
smart_1_normalized(t-14) | smart_3_normalized(t-14) | smart_5_normalized(t-14) | smart_7_normalized(t-14) | smart_9_normalized(t-14) | smart_187_normalized(t-14) | smart_189_normalized(t-14) | smart_194_normalized(t-14) | smart_197_normalized(t-14) | smart_1_normalized(t-13) | ... | smart_1_normalized(t) | smart_3_normalized(t) | smart_5_normalized(t) | smart_7_normalized(t) | smart_9_normalized(t) | smart_187_normalized(t) | smart_189_normalized(t) | smart_194_normalized(t) | smart_197_normalized(t) | DayToFailure | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
serial_number | ||||||||||||||||||||||
5VML01P0 | 42815 | 0.195980 | -0.704918 | -0.211155 | -0.314741 | -0.593625 | 1.0 | 1.0 | -0.875519 | -0.211155 | 0.165829 | ... | 0.165829 | -0.704918 | -0.211155 | -0.314741 | -0.601594 | 1.000000 | 1.0 | -0.875519 | -0.211155 | 31 |
42816 | 0.165829 | -0.704918 | -0.211155 | -0.314741 | -0.593625 | 1.0 | 1.0 | -0.875519 | -0.211155 | 0.015075 | ... | 0.105528 | -0.704918 | -0.211155 | -0.314741 | -0.601594 | 1.000000 | 1.0 | -0.875519 | -0.211155 | 30 | |
42817 | 0.015075 | -0.704918 | -0.211155 | -0.314741 | -0.593625 | 1.0 | 1.0 | -0.883817 | -0.211155 | 0.165829 | ... | 0.175879 | -0.704918 | -0.211155 | -0.314741 | -0.601594 | 1.000000 | 1.0 | -0.875519 | -0.211155 | 29 | |
42818 | 0.165829 | -0.704918 | -0.211155 | -0.314741 | -0.593625 | 1.0 | 1.0 | -0.875519 | -0.211155 | 0.095477 | ... | 0.135678 | -0.704918 | -0.211155 | -0.314741 | -0.601594 | 1.000000 | 1.0 | -0.875519 | -0.211155 | 28 | |
42819 | 0.095477 | -0.704918 | -0.211155 | -0.314741 | -0.593625 | 1.0 | 1.0 | -0.875519 | -0.211155 | 0.165829 | ... | 0.175879 | -0.704918 | -0.211155 | -0.314741 | -0.601594 | 1.000000 | 1.0 | -0.875519 | -0.211155 | 27 | |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
ZTT3STWF | 785169 | -0.226131 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 1.0 | 1.0 | -0.941909 | -0.211155 | -0.175879 | ... | -0.507538 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 0.414141 | 1.0 | -0.950207 | -0.211155 | 4 |
785170 | -0.175879 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 1.0 | 1.0 | -0.941909 | -0.211155 | -0.185930 | ... | -0.507538 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 0.414141 | 1.0 | -0.950207 | -0.211155 | 3 | |
785171 | -0.185930 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 1.0 | 1.0 | -0.941909 | -0.211155 | -0.195980 | ... | -0.507538 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 0.414141 | 1.0 | -0.950207 | -0.211155 | 2 | |
785172 | -0.195980 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 1.0 | 1.0 | -0.941909 | -0.211155 | -0.195980 | ... | -0.507538 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 0.414141 | 1.0 | -0.950207 | -0.211155 | 1 | |
785173 | -0.195980 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 1.0 | 1.0 | -0.941909 | -0.211155 | -0.185930 | ... | -0.507538 | -0.693989 | -0.211155 | -0.330677 | -0.274900 | 0.414141 | 1.0 | -0.950207 | -0.211155 | 0 |
297652 rows × 136 columns
print('Dividing into train test')
X_train, X_rim, y_train, y_rim = train_test_split(dfHourSequence[dfHourSequence.columns[:-1]],
dfHourSequence[dfHourSequence.columns[-1:]] ,
stratify=dfHourSequence[dfHourSequence.columns[-1:]],
test_size=0.30)
Dividing into train test
print(y_train)
print(y_train.columns)
DayToFailure serial_number W300R8E5 276228 0 S300Z7TB 208856 2 Z303XYVL 423370 7 ZA11TPF5 517945 6 ZA13YP2L 543729 2 ... ... Z305D68J 486279 8 ZA180ZZE 587034 2 9XW04MBA 94294 8 ZA13KB6X 538436 14 Z303WPCM 422614 18 [208356 rows x 1 columns] Index(['DayToFailure'], dtype='object')
X_val, X_test, y_val, y_test = train_test_split(X_rim, y_rim ,stratify=y_rim, test_size=0.50)
X_train = pd.concat([X_train, pd.DataFrame(columns = ['DayToFailure'])], sort = True)
X_val = pd.concat([X_val, pd.DataFrame(columns = ['DayToFailure'])], sort = True)
X_test = pd.concat([X_test, pd.DataFrame(columns = ['DayToFailure'])], sort = True)
X_train[['DayToFailure']] = y_train.values
X_val[['DayToFailure']] = y_val.values
X_test[['DayToFailure']] = y_test.values
X_train
#X_val
#X_test
DayToFailure | smart_187_normalized(t) | smart_187_normalized(t-1) | smart_187_normalized(t-10) | smart_187_normalized(t-11) | smart_187_normalized(t-12) | smart_187_normalized(t-13) | smart_187_normalized(t-14) | smart_187_normalized(t-2) | smart_187_normalized(t-3) | ... | smart_9_normalized(t-13) | smart_9_normalized(t-14) | smart_9_normalized(t-2) | smart_9_normalized(t-3) | smart_9_normalized(t-4) | smart_9_normalized(t-5) | smart_9_normalized(t-6) | smart_9_normalized(t-7) | smart_9_normalized(t-8) | smart_9_normalized(t-9) | ||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
W300R8E5 | 276228 | 0 | 0.939394 | 0.959596 | 0.959596 | 0.959596 | 0.959596 | 0.959596 | 0.959596 | 0.959596 | 0.959596 | ... | -0.338645 | -0.338645 | -0.338645 | -0.338645 | -0.338645 | -0.338645 | -0.338645 | -0.338645 | -0.338645 | -0.338645 |
S300Z7TB | 208856 | 2 | 0.919192 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | ... | -0.386454 | -0.386454 | -0.386454 | -0.386454 | -0.386454 | -0.386454 | -0.386454 | -0.386454 | -0.386454 | -0.386454 |
Z303XYVL | 423370 | 7 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | ... | -0.537849 | -0.537849 | -0.537849 | -0.537849 | -0.537849 | -0.537849 | -0.537849 | -0.537849 | -0.537849 | -0.537849 |
ZA11TPF5 | 517945 | 6 | 0.959596 | 0.979798 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 0.979798 | 1.000000 | ... | -0.482072 | -0.482072 | -0.482072 | -0.482072 | -0.482072 | -0.482072 | -0.482072 | -0.482072 | -0.482072 | -0.482072 |
ZA13YP2L | 543729 | 2 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | ... | -0.617530 | -0.617530 | -0.617530 | -0.617530 | -0.617530 | -0.617530 | -0.617530 | -0.617530 | -0.617530 | -0.617530 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
Z305D68J | 486279 | 8 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | ... | -0.322709 | -0.322709 | -0.322709 | -0.322709 | -0.322709 | -0.322709 | -0.322709 | -0.322709 | -0.322709 | -0.322709 |
ZA180ZZE | 587034 | 2 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | ... | -0.513944 | -0.513944 | -0.521912 | -0.521912 | -0.521912 | -0.521912 | -0.521912 | -0.521912 | -0.521912 | -0.521912 |
9XW04MBA | 94294 | 8 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | ... | -0.442231 | -0.442231 | -0.442231 | -0.442231 | -0.442231 | -0.442231 | -0.442231 | -0.442231 | -0.442231 | -0.442231 |
ZA13KB6X | 538436 | 14 | 0.939394 | 0.939394 | 0.939394 | 0.939394 | 0.939394 | 0.939394 | 0.939394 | 0.939394 | 0.939394 | ... | -0.553785 | -0.553785 | -0.553785 | -0.553785 | -0.553785 | -0.553785 | -0.553785 | -0.553785 | -0.553785 | -0.553785 |
Z303WPCM | 422614 | 18 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | 1.000000 | ... | -0.211155 | -0.211155 | -0.211155 | -0.211155 | -0.211155 | -0.211155 | -0.211155 | -0.211155 | -0.211155 | -0.211155 |
208356 rows × 136 columns
X_train.shape
(208356, 136)
X_val.shape
(44648, 136)
Xtrain = X_train.copy()
Xtrain = Xtrain.drop(columns=['DayToFailure'], axis=1 )
Xtrain.shape
#Xtrain
(208356, 135)
Xval = X_val.copy()
Xval = Xval.drop(columns=['DayToFailure'], axis=1 )
Xval.shape
(44648, 135)
yTest = X_test[['DayToFailure']].values
#yTest
Xtest = X_test.drop(columns=['DayToFailure'], axis=1 )
#Xtest
#reshape with window
Xtrain = Xtrain.values.reshape(Xtrain.shape[0], window, int(Xtrain.shape[1]/window))
Xval = Xval.values.reshape(Xval.shape[0], window, int(Xval.shape[1]/window))
Xtest= Xtest.values.reshape(Xtest.shape[0], window, int(Xtest.shape[1]/window))
ytrain = X_train[['DayToFailure']].values
yVal = X_val[['DayToFailure']].values
print(Xtrain.shape)
print(Xval.shape)
print(Xtest.shape)
print(ytrain.shape)
print(yVal.shape)
print(yTest.shape)
(208356, 15, 9) (44648, 15, 9) (44648, 15, 9) (208356, 1) (44648, 1) (44648, 1)
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
import keras
#same as experiment6 - BiLSTM
def build_model():
dp_lvl = 0.1
model = Sequential()
model.add(Bidirectional(LSTM(128, input_shape=(window, 9), return_sequences = True, activation = "tanh" )))
model.add(Bidirectional(LSTM(64, return_sequences = True, activation = "tanh" )))
model.add(Bidirectional(LSTM(32, activation="tanh")))
model.add(Dense(96, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(1))
return model
#same as experiment6 - LSTM
epoch = 150
historyvet =[]
model = build_model()
best_acc= 0.0
#adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0,amsgrad=False)
adam = tf.keras.optimizers.legacy.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0,amsgrad=False)
#adam=tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(loss='mse', optimizer=adam)
for epoch in range(0,epoch):
print('Epoch {%d}' %(epoch))
#model.fit(Xtrain, ytrain, epochs=1, batch_size=500, validation_data= (Xval,yVal), shuffle=True)
history = model.fit(Xtrain, ytrain, epochs=1, batch_size=500, validation_data= (Xval,yVal))
historyvet.append(history.history)
model.save('bilstm_predict_rul_experiment8_extended_full_take1.h5')
model.summary()
/usr/local/lib/python3.10/dist-packages/keras/src/optimizers/legacy/adam.py:118: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead. super().__init__(name, **kwargs)
Epoch {0} 417/417 [==============================] - 77s 152ms/step - loss: 92.7738 - val_loss: 85.0679 Epoch {1} 417/417 [==============================] - 59s 142ms/step - loss: 84.9058 - val_loss: 84.3463 Epoch {2} 417/417 [==============================] - 60s 144ms/step - loss: 84.4310 - val_loss: 84.2781 Epoch {3} 417/417 [==============================] - 59s 142ms/step - loss: 84.3702 - val_loss: 84.1577 Epoch {4} 417/417 [==============================] - 60s 144ms/step - loss: 84.2015 - val_loss: 83.9575 Epoch {5} 417/417 [==============================] - 60s 144ms/step - loss: 84.0918 - val_loss: 84.2412 Epoch {6} 417/417 [==============================] - 60s 143ms/step - loss: 84.1157 - val_loss: 83.4742 Epoch {7} 417/417 [==============================] - 60s 144ms/step - loss: 83.7835 - val_loss: 83.4925 Epoch {8} 417/417 [==============================] - 60s 143ms/step - loss: 83.8147 - val_loss: 84.3453 Epoch {9} 417/417 [==============================] - 61s 145ms/step - loss: 84.3719 - val_loss: 83.6851 Epoch {10} 417/417 [==============================] - 60s 144ms/step - loss: 83.7955 - val_loss: 83.4136 Epoch {11} 417/417 [==============================] - 60s 144ms/step - loss: 83.6265 - val_loss: 83.2850 Epoch {12} 417/417 [==============================] - 60s 143ms/step - loss: 83.6535 - val_loss: 83.3932 Epoch {13} 417/417 [==============================] - 60s 143ms/step - loss: 83.4743 - val_loss: 83.1930 Epoch {14} 417/417 [==============================] - 60s 145ms/step - loss: 83.4758 - val_loss: 83.2547 Epoch {15} 417/417 [==============================] - 60s 144ms/step - loss: 83.3398 - val_loss: 83.1598 Epoch {16} 417/417 [==============================] - 59s 141ms/step - loss: 83.3910 - val_loss: 83.4111 Epoch {17} 417/417 [==============================] - 60s 143ms/step - loss: 83.3572 - val_loss: 83.0150 Epoch {18} 417/417 [==============================] - 60s 144ms/step - loss: 83.3223 - val_loss: 82.9693 Epoch {19} 417/417 [==============================] - 60s 143ms/step - loss: 83.3011 - val_loss: 83.3447 Epoch {20} 417/417 [==============================] - 60s 144ms/step - loss: 83.2284 - val_loss: 82.9084 Epoch {21} 417/417 [==============================] - 60s 144ms/step - loss: 83.2942 - val_loss: 82.9613 Epoch {22} 417/417 [==============================] - 59s 142ms/step - loss: 83.3120 - val_loss: 83.1172 Epoch {23} 417/417 [==============================] - 60s 143ms/step - loss: 83.1626 - val_loss: 82.9673 Epoch {24} 417/417 [==============================] - 60s 143ms/step - loss: 83.1042 - val_loss: 82.8575 Epoch {25} 417/417 [==============================] - 60s 143ms/step - loss: 83.1136 - val_loss: 82.8171 Epoch {26} 417/417 [==============================] - 59s 143ms/step - loss: 83.1160 - val_loss: 82.7070 Epoch {27} 417/417 [==============================] - 59s 142ms/step - loss: 83.2719 - val_loss: 83.0980 Epoch {28} 417/417 [==============================] - 59s 142ms/step - loss: 83.1043 - val_loss: 82.8322 Epoch {29} 417/417 [==============================] - 60s 144ms/step - loss: 83.3038 - val_loss: 83.2858 Epoch {30} 417/417 [==============================] - 60s 144ms/step - loss: 83.2308 - val_loss: 83.0850 Epoch {31} 417/417 [==============================] - 60s 144ms/step - loss: 83.2239 - val_loss: 83.0807 Epoch {32} 417/417 [==============================] - 60s 144ms/step - loss: 83.0301 - val_loss: 83.3579 Epoch {33} 417/417 [==============================] - 68s 162ms/step - loss: 82.9915 - val_loss: 82.8932 Epoch {34} 417/417 [==============================] - 67s 160ms/step - loss: 83.0412 - val_loss: 82.8101 Epoch {35} 417/417 [==============================] - 67s 161ms/step - loss: 82.9429 - val_loss: 82.7489 Epoch {36} 417/417 [==============================] - 62s 149ms/step - loss: 82.9136 - val_loss: 82.7018 Epoch {37} 417/417 [==============================] - 60s 143ms/step - loss: 83.0506 - val_loss: 83.3598 Epoch {38} 417/417 [==============================] - 61s 145ms/step - loss: 83.2241 - val_loss: 83.1517 Epoch {39} 417/417 [==============================] - 60s 144ms/step - loss: 83.0897 - val_loss: 83.0866 Epoch {40} 417/417 [==============================] - 60s 144ms/step - loss: 82.9377 - val_loss: 82.7396 Epoch {41} 417/417 [==============================] - 60s 143ms/step - loss: 83.1042 - val_loss: 82.9703 Epoch {42} 417/417 [==============================] - 60s 144ms/step - loss: 82.9680 - val_loss: 82.7980 Epoch {43} 417/417 [==============================] - 60s 143ms/step - loss: 82.9955 - val_loss: 82.9725 Epoch {44} 417/417 [==============================] - 60s 143ms/step - loss: 83.0009 - val_loss: 83.3855 Epoch {45} 417/417 [==============================] - 60s 143ms/step - loss: 82.9845 - val_loss: 82.7954 Epoch {46} 417/417 [==============================] - 60s 145ms/step - loss: 82.9332 - val_loss: 82.8449 Epoch {47} 417/417 [==============================] - 59s 143ms/step - loss: 83.0175 - val_loss: 82.7920 Epoch {48} 417/417 [==============================] - 60s 144ms/step - loss: 82.8879 - val_loss: 82.6756 Epoch {49} 417/417 [==============================] - 60s 143ms/step - loss: 82.7670 - val_loss: 82.8262 Epoch {50} 417/417 [==============================] - 60s 144ms/step - loss: 82.9561 - val_loss: 82.7425 Epoch {51} 417/417 [==============================] - 60s 143ms/step - loss: 82.6846 - val_loss: 82.7412 Epoch {52} 417/417 [==============================] - 60s 144ms/step - loss: 83.0076 - val_loss: 82.7753 Epoch {53} 417/417 [==============================] - 60s 143ms/step - loss: 82.9550 - val_loss: 82.7250 Epoch {54} 417/417 [==============================] - 60s 144ms/step - loss: 82.7550 - val_loss: 82.9107 Epoch {55} 417/417 [==============================] - 60s 143ms/step - loss: 82.7874 - val_loss: 82.6132 Epoch {56} 417/417 [==============================] - 60s 143ms/step - loss: 82.8413 - val_loss: 82.9596 Epoch {57} 417/417 [==============================] - 60s 144ms/step - loss: 82.7112 - val_loss: 82.4896 Epoch {58} 417/417 [==============================] - 60s 143ms/step - loss: 82.6846 - val_loss: 82.7633 Epoch {59} 417/417 [==============================] - 59s 142ms/step - loss: 82.7705 - val_loss: 82.7061 Epoch {60} 417/417 [==============================] - 60s 143ms/step - loss: 82.7399 - val_loss: 82.5875 Epoch {61} 417/417 [==============================] - 60s 143ms/step - loss: 82.5609 - val_loss: 83.1517 Epoch {62} 417/417 [==============================] - 60s 143ms/step - loss: 82.5973 - val_loss: 82.6425 Epoch {63} 417/417 [==============================] - 59s 142ms/step - loss: 82.6945 - val_loss: 83.0288 Epoch {64} 417/417 [==============================] - 60s 143ms/step - loss: 82.6523 - val_loss: 82.2977 Epoch {65} 417/417 [==============================] - 59s 142ms/step - loss: 82.5379 - val_loss: 82.6018 Epoch {66} 417/417 [==============================] - 60s 143ms/step - loss: 82.4387 - val_loss: 82.2681 Epoch {67} 417/417 [==============================] - 61s 145ms/step - loss: 82.3575 - val_loss: 82.5561 Epoch {68} 417/417 [==============================] - 60s 144ms/step - loss: 82.2778 - val_loss: 82.8662 Epoch {69} 417/417 [==============================] - 60s 143ms/step - loss: 82.2054 - val_loss: 82.4280 Epoch {70} 417/417 [==============================] - 59s 143ms/step - loss: 82.6933 - val_loss: 82.2867 Epoch {71} 417/417 [==============================] - 60s 143ms/step - loss: 82.3694 - val_loss: 82.4201 Epoch {72} 417/417 [==============================] - 59s 142ms/step - loss: 82.3686 - val_loss: 82.2500 Epoch {73} 417/417 [==============================] - 59s 142ms/step - loss: 82.2466 - val_loss: 82.2042 Epoch {74} 417/417 [==============================] - 59s 142ms/step - loss: 82.1715 - val_loss: 82.2676 Epoch {75} 417/417 [==============================] - 60s 144ms/step - loss: 82.1705 - val_loss: 82.3283 Epoch {76} 417/417 [==============================] - 60s 145ms/step - loss: 82.0941 - val_loss: 82.1778 Epoch {77} 417/417 [==============================] - 60s 144ms/step - loss: 82.0360 - val_loss: 81.9762 Epoch {78} 417/417 [==============================] - 60s 144ms/step - loss: 81.9613 - val_loss: 82.0638 Epoch {79} 417/417 [==============================] - 60s 144ms/step - loss: 81.9519 - val_loss: 82.3367 Epoch {80} 417/417 [==============================] - 60s 143ms/step - loss: 81.8773 - val_loss: 81.8730 Epoch {81} 417/417 [==============================] - 60s 144ms/step - loss: 81.8347 - val_loss: 81.9873 Epoch {82} 417/417 [==============================] - 61s 146ms/step - loss: 81.8329 - val_loss: 82.0599 Epoch {83} 417/417 [==============================] - 60s 143ms/step - loss: 82.1322 - val_loss: 82.4606 Epoch {84} 417/417 [==============================] - 60s 143ms/step - loss: 81.8003 - val_loss: 82.0190 Epoch {85} 417/417 [==============================] - 60s 144ms/step - loss: 81.6991 - val_loss: 82.1629 Epoch {86} 417/417 [==============================] - 60s 144ms/step - loss: 81.6070 - val_loss: 82.0994 Epoch {87} 417/417 [==============================] - 60s 143ms/step - loss: 81.6543 - val_loss: 81.8484 Epoch {88} 417/417 [==============================] - 60s 144ms/step - loss: 81.5761 - val_loss: 82.1926 Epoch {89} 417/417 [==============================] - 60s 144ms/step - loss: 81.4459 - val_loss: 81.8696 Epoch {90} 417/417 [==============================] - 60s 144ms/step - loss: 81.4564 - val_loss: 81.8460 Epoch {91} 417/417 [==============================] - 60s 143ms/step - loss: 81.4737 - val_loss: 81.7963 Epoch {92} 417/417 [==============================] - 60s 143ms/step - loss: 81.2941 - val_loss: 81.6649 Epoch {93} 417/417 [==============================] - 60s 144ms/step - loss: 81.2785 - val_loss: 81.5960 Epoch {94} 417/417 [==============================] - 60s 144ms/step - loss: 81.1828 - val_loss: 82.0513 Epoch {95} 417/417 [==============================] - 60s 143ms/step - loss: 81.2188 - val_loss: 81.5714 Epoch {96} 417/417 [==============================] - 60s 144ms/step - loss: 81.0063 - val_loss: 81.7776 Epoch {97} 417/417 [==============================] - 60s 144ms/step - loss: 81.1066 - val_loss: 81.6546 Epoch {98} 417/417 [==============================] - 60s 144ms/step - loss: 81.0081 - val_loss: 81.8404 Epoch {99} 417/417 [==============================] - 60s 144ms/step - loss: 80.9043 - val_loss: 81.5661 Epoch {100} 417/417 [==============================] - 60s 144ms/step - loss: 80.8069 - val_loss: 81.6684 Epoch {101} 417/417 [==============================] - 60s 144ms/step - loss: 80.7730 - val_loss: 81.4647 Epoch {102} 417/417 [==============================] - 60s 143ms/step - loss: 80.7942 - val_loss: 81.3516 Epoch {103} 417/417 [==============================] - 60s 143ms/step - loss: 80.6219 - val_loss: 81.4007 Epoch {104} 417/417 [==============================] - 60s 145ms/step - loss: 80.6730 - val_loss: 81.1922 Epoch {105} 417/417 [==============================] - 60s 144ms/step - loss: 80.5911 - val_loss: 81.7024 Epoch {106} 417/417 [==============================] - 61s 145ms/step - loss: 80.3592 - val_loss: 81.4523 Epoch {107} 417/417 [==============================] - 60s 143ms/step - loss: 80.4429 - val_loss: 81.2451 Epoch {108} 417/417 [==============================] - 61s 145ms/step - loss: 80.3824 - val_loss: 81.1829 Epoch {109} 417/417 [==============================] - 60s 145ms/step - loss: 80.1471 - val_loss: 80.9849 Epoch {110} 417/417 [==============================] - 60s 143ms/step - loss: 80.1414 - val_loss: 81.2149 Epoch {111} 417/417 [==============================] - 60s 143ms/step - loss: 80.1953 - val_loss: 81.3142 Epoch {112} 417/417 [==============================] - 60s 144ms/step - loss: 80.1653 - val_loss: 81.0187 Epoch {113} 417/417 [==============================] - 60s 144ms/step - loss: 79.9270 - val_loss: 81.2120 Epoch {114} 417/417 [==============================] - 60s 143ms/step - loss: 79.9288 - val_loss: 80.8188 Epoch {115} 417/417 [==============================] - 60s 143ms/step - loss: 79.8722 - val_loss: 80.9501 Epoch {116} 417/417 [==============================] - 60s 144ms/step - loss: 80.4031 - val_loss: 81.3811 Epoch {117} 417/417 [==============================] - 60s 144ms/step - loss: 80.6908 - val_loss: 81.3616 Epoch {118} 417/417 [==============================] - 60s 143ms/step - loss: 80.5685 - val_loss: 81.4340 Epoch {119} 417/417 [==============================] - 60s 144ms/step - loss: 80.4992 - val_loss: 81.5162 Epoch {120} 417/417 [==============================] - 60s 144ms/step - loss: 80.4437 - val_loss: 81.0435 Epoch {121} 417/417 [==============================] - 60s 143ms/step - loss: 80.2866 - val_loss: 81.2691 Epoch {122} 417/417 [==============================] - 60s 144ms/step - loss: 79.9083 - val_loss: 80.6481 Epoch {123} 417/417 [==============================] - 60s 143ms/step - loss: 80.0511 - val_loss: 80.9611 Epoch {124} 417/417 [==============================] - 59s 143ms/step - loss: 80.0038 - val_loss: 81.0384 Epoch {125} 417/417 [==============================] - 60s 144ms/step - loss: 79.9394 - val_loss: 80.7872 Epoch {126} 417/417 [==============================] - 61s 146ms/step - loss: 79.8411 - val_loss: 81.2248 Epoch {127} 417/417 [==============================] - 60s 145ms/step - loss: 79.7553 - val_loss: 80.7922 Epoch {128} 417/417 [==============================] - 60s 144ms/step - loss: 79.7102 - val_loss: 80.8904 Epoch {129} 417/417 [==============================] - 60s 143ms/step - loss: 79.5692 - val_loss: 81.0686 Epoch {130} 417/417 [==============================] - 60s 143ms/step - loss: 79.4772 - val_loss: 80.6181 Epoch {131} 417/417 [==============================] - 60s 143ms/step - loss: 79.3856 - val_loss: 80.7893 Epoch {132} 417/417 [==============================] - 60s 143ms/step - loss: 79.2643 - val_loss: 80.4530 Epoch {133} 417/417 [==============================] - 60s 143ms/step - loss: 79.4939 - val_loss: 80.5922 Epoch {134} 417/417 [==============================] - 60s 143ms/step - loss: 79.1094 - val_loss: 80.3704 Epoch {135} 417/417 [==============================] - 60s 144ms/step - loss: 78.8106 - val_loss: 80.4387 Epoch {136} 417/417 [==============================] - 60s 144ms/step - loss: 78.7017 - val_loss: 80.3484 Epoch {137} 417/417 [==============================] - 60s 143ms/step - loss: 78.4793 - val_loss: 79.9072 Epoch {138} 417/417 [==============================] - 60s 143ms/step - loss: 78.3490 - val_loss: 80.2861 Epoch {139} 417/417 [==============================] - 60s 144ms/step - loss: 78.1343 - val_loss: 80.2407 Epoch {140} 417/417 [==============================] - 60s 143ms/step - loss: 78.3711 - val_loss: 80.1231 Epoch {141} 417/417 [==============================] - 60s 143ms/step - loss: 77.9826 - val_loss: 79.8767 Epoch {142} 417/417 [==============================] - 60s 143ms/step - loss: 77.9521 - val_loss: 79.5399 Epoch {143} 417/417 [==============================] - 60s 144ms/step - loss: 77.7578 - val_loss: 79.9468 Epoch {144} 417/417 [==============================] - 60s 145ms/step - loss: 77.5250 - val_loss: 79.7818 Epoch {145} 417/417 [==============================] - 61s 145ms/step - loss: 77.3209 - val_loss: 79.3396 Epoch {146} 417/417 [==============================] - 60s 144ms/step - loss: 77.3386 - val_loss: 79.3631 Epoch {147} 417/417 [==============================] - 60s 145ms/step - loss: 77.2172 - val_loss: 79.6813 Epoch {148} 417/417 [==============================] - 60s 145ms/step - loss: 77.1266 - val_loss: 79.0510 Epoch {149} 417/417 [==============================] - 60s 145ms/step - loss: 77.0287 - val_loss: 79.3780 Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= bidirectional (Bidirection (None, 15, 256) 141312 al) bidirectional_1 (Bidirecti (None, 15, 128) 164352 onal) bidirectional_2 (Bidirecti (None, 64) 41216 onal) dense (Dense) (None, 96) 6240 dense_1 (Dense) (None, 128) 12416 dense_2 (Dense) (None, 1) 129 ================================================================= Total params: 365665 (1.39 MB) Trainable params: 365665 (1.39 MB) Non-trainable params: 0 (0.00 Byte) _________________________________________________________________
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3000: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`. saving_api.save_model(
lossTrain=[]
lossval=[]
for element in historyvet:
lossTrain.append(element['loss'][0])
lossval.append(element['val_loss'][0])
plt.plot(lossTrain, color='g')
plt.plot(lossval, color='r')
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
#skip this cell if the above one finished and you are continuing the work , otherwise if say you disconnected the notebook and want to resume run this one to load the model it generated overnight
model = load_model( 'bilstm_predict_rul_experiment8_extended_full_take1.h5')
print(model.evaluate(Xtest,yTest))
1396/1396 [==============================] - 33s 22ms/step - loss: 79.4987 79.49868774414062
pred = model.predict(Xtest)
1396/1396 [==============================] - 33s 22ms/step
y = yTest.copy()
yhat = pred.copy()
# calculate manually
d = y - yhat
mse_f = np.mean(d**2)
mae_f = np.mean(abs(d))
rmse_f = np.sqrt(mse_f)
r2_f = 1-(sum(d**2)/sum((y-np.mean(y))**2))
print("Results by manual calculation:")
print("MAE:",mae_f) #mean absolute error - difference between the original and predicted extracted by avg the abs diff over dataset
print("MSE:", mse_f) #mean squared error - diff btw orig and pred extracted by squared the avg diff over the dataset
print("RMSE:", rmse_f) #root mean squared error - is the error rate by the square root of MSE
print("R-Squared:", r2_f) #coefficient of determination - the higher the better
Results by manual calculation: MAE: 7.590267282593418 MSE: 79.49876356975359 RMSE: 8.916207914228648 R-Squared: [0.06755997]
More training as it looks like it wants to get somewhere interesting
#another 150 epochs to train
historyvet =[]
model = load_model( 'bilstm_predict_rul_experiment8_extended_full_take1.h5')
epoch = 150
for epoch in range(0,epoch):
print('Epoch {%d}' %(epoch))
history = model.fit(Xtrain, ytrain, epochs=1, batch_size=500, validation_data= (Xval,yVal), shuffle=True)
historyvet.append(history.history)
model.save('bilstm_predict_rul_experiment8_extended_full_take1_part2.h5')
model.summary()
Epoch {0} 417/417 [==============================] - 70s 152ms/step - loss: 76.8524 - val_loss: 79.0652 Epoch {1} 417/417 [==============================] - 61s 147ms/step - loss: 76.6676 - val_loss: 79.3731 Epoch {2} 417/417 [==============================] - 61s 146ms/step - loss: 76.6653 - val_loss: 78.7874 Epoch {3} 417/417 [==============================] - 61s 146ms/step - loss: 76.4369 - val_loss: 78.9934 Epoch {4} 417/417 [==============================] - 61s 145ms/step - loss: 76.4751 - val_loss: 78.8808 Epoch {5} 417/417 [==============================] - 61s 145ms/step - loss: 76.4544 - val_loss: 78.9301 Epoch {6} 417/417 [==============================] - 61s 146ms/step - loss: 76.3133 - val_loss: 78.7373 Epoch {7} 417/417 [==============================] - 60s 145ms/step - loss: 76.0597 - val_loss: 78.6485 Epoch {8} 417/417 [==============================] - 61s 145ms/step - loss: 75.9886 - val_loss: 78.5359 Epoch {9} 417/417 [==============================] - 61s 146ms/step - loss: 75.8782 - val_loss: 78.7132 Epoch {10} 417/417 [==============================] - 61s 147ms/step - loss: 76.1895 - val_loss: 78.6017 Epoch {11} 417/417 [==============================] - 62s 148ms/step - loss: 75.8478 - val_loss: 78.5068 Epoch {12} 417/417 [==============================] - 61s 145ms/step - loss: 75.6287 - val_loss: 78.6277 Epoch {13} 417/417 [==============================] - 61s 145ms/step - loss: 75.5390 - val_loss: 78.1562 Epoch {14} 417/417 [==============================] - 60s 144ms/step - loss: 75.3792 - val_loss: 77.9676 Epoch {15} 417/417 [==============================] - 60s 144ms/step - loss: 75.6322 - val_loss: 77.9345 Epoch {16} 417/417 [==============================] - 60s 145ms/step - loss: 75.2440 - val_loss: 78.0260 Epoch {17} 417/417 [==============================] - 60s 145ms/step - loss: 75.2111 - val_loss: 78.2623 Epoch {18} 417/417 [==============================] - 61s 146ms/step - loss: 75.0694 - val_loss: 78.1301 Epoch {19} 417/417 [==============================] - 61s 147ms/step - loss: 75.0635 - val_loss: 78.2043 Epoch {20} 417/417 [==============================] - 61s 147ms/step - loss: 75.0756 - val_loss: 81.3052 Epoch {21} 417/417 [==============================] - 61s 147ms/step - loss: 77.9672 - val_loss: 80.5546 Epoch {22} 417/417 [==============================] - 61s 146ms/step - loss: 78.9624 - val_loss: 80.1643 Epoch {23} 417/417 [==============================] - 60s 145ms/step - loss: 78.4155 - val_loss: 79.7000 Epoch {24} 417/417 [==============================] - 61s 146ms/step - loss: 78.1290 - val_loss: 79.6455 Epoch {25} 417/417 [==============================] - 60s 145ms/step - loss: 77.7739 - val_loss: 79.7304 Epoch {26} 417/417 [==============================] - 60s 145ms/step - loss: 77.5086 - val_loss: 79.0950 Epoch {27} 417/417 [==============================] - 61s 146ms/step - loss: 77.2861 - val_loss: 79.7094 Epoch {28} 417/417 [==============================] - 60s 145ms/step - loss: 77.2039 - val_loss: 78.9357 Epoch {29} 417/417 [==============================] - 61s 146ms/step - loss: 76.8295 - val_loss: 79.1521 Epoch {30} 417/417 [==============================] - 61s 146ms/step - loss: 76.5209 - val_loss: 78.5403 Epoch {31} 417/417 [==============================] - 60s 145ms/step - loss: 76.3899 - val_loss: 79.1579 Epoch {32} 417/417 [==============================] - 60s 145ms/step - loss: 76.1270 - val_loss: 78.5326 Epoch {33} 417/417 [==============================] - 61s 147ms/step - loss: 76.0684 - val_loss: 78.1595 Epoch {34} 417/417 [==============================] - 61s 146ms/step - loss: 75.7395 - val_loss: 78.4831 Epoch {35} 417/417 [==============================] - 61s 146ms/step - loss: 75.5531 - val_loss: 77.7511 Epoch {36} 417/417 [==============================] - 61s 146ms/step - loss: 75.5603 - val_loss: 78.5110 Epoch {37} 417/417 [==============================] - 61s 145ms/step - loss: 75.0463 - val_loss: 78.0663 Epoch {38} 417/417 [==============================] - 61s 147ms/step - loss: 74.7893 - val_loss: 77.6177 Epoch {39} 417/417 [==============================] - 61s 145ms/step - loss: 74.6087 - val_loss: 77.2369 Epoch {40} 417/417 [==============================] - 61s 147ms/step - loss: 74.4060 - val_loss: 77.1111 Epoch {41} 417/417 [==============================] - 61s 146ms/step - loss: 74.1644 - val_loss: 77.0091 Epoch {42} 417/417 [==============================] - 60s 145ms/step - loss: 74.1009 - val_loss: 77.3582 Epoch {43} 417/417 [==============================] - 61s 146ms/step - loss: 73.8434 - val_loss: 77.2329 Epoch {44} 417/417 [==============================] - 60s 144ms/step - loss: 73.4787 - val_loss: 76.2893 Epoch {45} 417/417 [==============================] - 61s 145ms/step - loss: 73.2203 - val_loss: 76.7871 Epoch {46} 417/417 [==============================] - 61s 145ms/step - loss: 73.0619 - val_loss: 76.7218 Epoch {47} 417/417 [==============================] - 60s 145ms/step - loss: 72.9442 - val_loss: 75.7803 Epoch {48} 417/417 [==============================] - 61s 145ms/step - loss: 72.5032 - val_loss: 75.5949 Epoch {49} 417/417 [==============================] - 61s 147ms/step - loss: 72.3193 - val_loss: 76.1428 Epoch {50} 417/417 [==============================] - 61s 146ms/step - loss: 72.0082 - val_loss: 75.9672 Epoch {51} 417/417 [==============================] - 61s 146ms/step - loss: 71.8284 - val_loss: 75.6700 Epoch {52} 417/417 [==============================] - 61s 146ms/step - loss: 71.5138 - val_loss: 75.5191 Epoch {53} 417/417 [==============================] - 61s 146ms/step - loss: 71.3870 - val_loss: 75.3812 Epoch {54} 417/417 [==============================] - 61s 145ms/step - loss: 71.0655 - val_loss: 74.7498 Epoch {55} 417/417 [==============================] - 60s 145ms/step - loss: 70.5846 - val_loss: 74.7026 Epoch {56} 417/417 [==============================] - 61s 145ms/step - loss: 70.5416 - val_loss: 74.3217 Epoch {57} 417/417 [==============================] - 61s 145ms/step - loss: 70.4242 - val_loss: 74.5481 Epoch {58} 417/417 [==============================] - 60s 145ms/step - loss: 70.0230 - val_loss: 75.3904 Epoch {59} 417/417 [==============================] - 61s 146ms/step - loss: 69.9979 - val_loss: 74.8138 Epoch {60} 417/417 [==============================] - 60s 145ms/step - loss: 69.4330 - val_loss: 73.5628 Epoch {61} 417/417 [==============================] - 61s 146ms/step - loss: 69.2424 - val_loss: 73.5522 Epoch {62} 417/417 [==============================] - 61s 145ms/step - loss: 69.0664 - val_loss: 74.0378 Epoch {63} 417/417 [==============================] - 61s 146ms/step - loss: 68.9708 - val_loss: 73.6028 Epoch {64} 417/417 [==============================] - 61s 146ms/step - loss: 68.4555 - val_loss: 73.4172 Epoch {65} 417/417 [==============================] - 61s 145ms/step - loss: 68.2882 - val_loss: 73.4511 Epoch {66} 417/417 [==============================] - 61s 147ms/step - loss: 68.0406 - val_loss: 72.6864 Epoch {67} 417/417 [==============================] - 61s 145ms/step - loss: 67.6467 - val_loss: 72.5826 Epoch {68} 417/417 [==============================] - 61s 146ms/step - loss: 67.4436 - val_loss: 71.9456 Epoch {69} 417/417 [==============================] - 61s 146ms/step - loss: 67.3635 - val_loss: 71.7553 Epoch {70} 417/417 [==============================] - 61s 146ms/step - loss: 66.9852 - val_loss: 72.4882 Epoch {71} 417/417 [==============================] - 61s 145ms/step - loss: 66.5985 - val_loss: 72.5721 Epoch {72} 417/417 [==============================] - 61s 147ms/step - loss: 66.6123 - val_loss: 72.1087 Epoch {73} 417/417 [==============================] - 60s 145ms/step - loss: 66.0231 - val_loss: 70.8719 Epoch {74} 417/417 [==============================] - 60s 144ms/step - loss: 65.6880 - val_loss: 71.5633 Epoch {75} 417/417 [==============================] - 60s 144ms/step - loss: 65.3219 - val_loss: 71.7068 Epoch {76} 417/417 [==============================] - 61s 146ms/step - loss: 65.3403 - val_loss: 70.5510 Epoch {77} 417/417 [==============================] - 61s 145ms/step - loss: 65.0687 - val_loss: 70.4706 Epoch {78} 417/417 [==============================] - 61s 146ms/step - loss: 64.4711 - val_loss: 70.3213 Epoch {79} 417/417 [==============================] - 61s 145ms/step - loss: 64.4282 - val_loss: 70.0780 Epoch {80} 417/417 [==============================] - 61s 146ms/step - loss: 64.0798 - val_loss: 70.9438 Epoch {81} 417/417 [==============================] - 61s 146ms/step - loss: 63.9875 - val_loss: 69.1577 Epoch {82} 417/417 [==============================] - 61s 146ms/step - loss: 63.4604 - val_loss: 69.1071 Epoch {83} 417/417 [==============================] - 61s 146ms/step - loss: 63.1592 - val_loss: 69.6169 Epoch {84} 417/417 [==============================] - 61s 146ms/step - loss: 62.8506 - val_loss: 69.2852 Epoch {85} 417/417 [==============================] - 61s 146ms/step - loss: 62.6432 - val_loss: 69.3647 Epoch {86} 417/417 [==============================] - 61s 145ms/step - loss: 62.5509 - val_loss: 69.4552 Epoch {87} 417/417 [==============================] - 61s 146ms/step - loss: 62.1506 - val_loss: 67.3090 Epoch {88} 417/417 [==============================] - 61s 146ms/step - loss: 61.7585 - val_loss: 68.1062 Epoch {89} 417/417 [==============================] - 61s 145ms/step - loss: 61.6289 - val_loss: 68.1266 Epoch {90} 417/417 [==============================] - 61s 146ms/step - loss: 61.1388 - val_loss: 68.1729 Epoch {91} 417/417 [==============================] - 60s 145ms/step - loss: 60.9458 - val_loss: 69.2790 Epoch {92} 417/417 [==============================] - 61s 146ms/step - loss: 60.4420 - val_loss: 67.0683 Epoch {93} 417/417 [==============================] - 61s 146ms/step - loss: 60.4977 - val_loss: 68.9159 Epoch {94} 417/417 [==============================] - 61s 146ms/step - loss: 60.0593 - val_loss: 66.7631 Epoch {95} 417/417 [==============================] - 61s 146ms/step - loss: 59.4461 - val_loss: 66.1072 Epoch {96} 417/417 [==============================] - 61s 146ms/step - loss: 59.5383 - val_loss: 65.5297 Epoch {97} 417/417 [==============================] - 62s 148ms/step - loss: 59.3304 - val_loss: 65.8784 Epoch {98} 417/417 [==============================] - 61s 145ms/step - loss: 58.7632 - val_loss: 66.3943 Epoch {99} 417/417 [==============================] - 61s 145ms/step - loss: 58.6838 - val_loss: 65.6789 Epoch {100} 417/417 [==============================] - 61s 146ms/step - loss: 58.2051 - val_loss: 63.8101 Epoch {101} 417/417 [==============================] - 63s 150ms/step - loss: 57.9286 - val_loss: 64.1653 Epoch {102} 417/417 [==============================] - 61s 146ms/step - loss: 57.6891 - val_loss: 65.1961 Epoch {103} 417/417 [==============================] - 61s 147ms/step - loss: 57.4829 - val_loss: 65.2296 Epoch {104} 397/417 [===========================>..] - ETA: 2s - loss: 57.6770
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
lossTrain=[]
lossval=[]
for element in historyvet:
lossTrain.append(element['loss'][0])
lossval.append(element['val_loss'][0])
plt.plot(lossTrain, color='g')
plt.plot(lossval, color='r')
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
pred = model.predict(Xtest)
1396/1396 [==============================] - 34s 22ms/step
print(model.evaluate(Xtest,yTest))
1396/1396 [==============================] - 32s 23ms/step - loss: 55.5761 55.5760612487793
y = yTest.copy()
yhat = pred.copy()
# calculate manually
d = y - yhat
mse_f = np.mean(d**2)
mae_f = np.mean(abs(d))
rmse_f = np.sqrt(mse_f)
r2_f = 1-(sum(d**2)/sum((y-np.mean(y))**2))
print("Results by manual calculation:")
print("MAE:",mae_f) #mean absolute error - difference between the original and predicted extracted by avg the abs diff over dataset
print("MSE:", mse_f) #mean squared error - diff btw orig and pred extracted by squared the avg diff over the dataset
print("RMSE:", rmse_f) #root mean squared error - is the error rate by the square root of MSE
print("R-Squared:", r2_f) #coefficient of determination - the higher the better
Results by manual calculation: MAE: 5.874125252771022 MSE: 55.57604066454788 RMSE: 7.454933981233361 R-Squared: [0.3481493]
# Plot true and predicted RUL values
plt.plot(yTest, label = "True RUL", color = "red")
plt.plot(pred, label = "Pred RUL", color = "blue")
plt.legend()
plt.show()
x = list(range(len(yTest)))
plt.scatter(x, yTest, color="blue", label="original")
plt.plot(x, pred, color="red", label="predicted")
plt.legend()
plt.show()
y = yTest.copy()
yhat = pred.copy()
# calculate manually
d = y - yhat
mse_f = np.mean(d**2)
mae_f = np.mean(abs(d))
rmse_f = np.sqrt(mse_f)
r2_f = 1-(sum(d**2)/sum((y-np.mean(y))**2))
print("Results by manual calculation:")
print("MAE:",mae_f) #mean absolute error - difference between the original and predicted extracted by avg the abs diff over dataset
print("MSE:", mse_f) #mean squared error - diff btw orig and pred extracted by squared the avg diff over the dataset
print("RMSE:", rmse_f) #root mean squared error - is the error rate by the square root of MSE
print("R-Squared:", r2_f) #coefficient of determination - the higher the better
Results by manual calculation: MAE: 5.874125252771022 MSE: 55.57604066454788 RMSE: 7.454933981233361 R-Squared: [0.3481493]
#another 150 epochs to train
historyvet =[]
model = load_model( 'bilstm_predict_rul_experiment8_extended_full_take1_part2.h5')
epoch = 150
for epoch in range(0,epoch):
print('Epoch {%d}' %(epoch))
history = model.fit(Xtrain, ytrain, epochs=1, batch_size=500, validation_data= (Xval,yVal), shuffle=True)
historyvet.append(history.history)
model.save('bilstm_predict_rul_experiment8_extended_full_take1_part3.h5')
model.summary()
Epoch {0} 417/417 [==============================] - 71s 155ms/step - loss: 45.5250 - val_loss: 53.3802 Epoch {1} 417/417 [==============================] - 61s 146ms/step - loss: 45.6368 - val_loss: 55.3708 Epoch {2} 417/417 [==============================] - 61s 145ms/step - loss: 45.1592 - val_loss: 54.4436 Epoch {3} 417/417 [==============================] - 61s 146ms/step - loss: 44.9672 - val_loss: 56.5786 Epoch {4} 417/417 [==============================] - 61s 146ms/step - loss: 44.2424 - val_loss: 52.7278 Epoch {5} 417/417 [==============================] - 61s 145ms/step - loss: 43.7204 - val_loss: 52.6551 Epoch {6} 417/417 [==============================] - 61s 146ms/step - loss: 44.3015 - val_loss: 53.5841 Epoch {7} 417/417 [==============================] - 61s 146ms/step - loss: 44.0761 - val_loss: 54.4101 Epoch {8} 417/417 [==============================] - 61s 147ms/step - loss: 43.9815 - val_loss: 53.0452 Epoch {9} 417/417 [==============================] - 61s 147ms/step - loss: 43.6852 - val_loss: 54.2320 Epoch {10} 417/417 [==============================] - 61s 146ms/step - loss: 43.3530 - val_loss: 53.7329 Epoch {11} 417/417 [==============================] - 61s 146ms/step - loss: 43.1921 - val_loss: 51.7534 Epoch {12} 417/417 [==============================] - 61s 146ms/step - loss: 43.0560 - val_loss: 52.0054 Epoch {13} 417/417 [==============================] - 61s 147ms/step - loss: 43.0121 - val_loss: 54.0206 Epoch {14} 417/417 [==============================] - 62s 147ms/step - loss: 42.4245 - val_loss: 52.7469 Epoch {15} 417/417 [==============================] - 61s 147ms/step - loss: 42.1506 - val_loss: 52.3071 Epoch {16} 417/417 [==============================] - 61s 146ms/step - loss: 41.5827 - val_loss: 51.9885 Epoch {17} 417/417 [==============================] - 61s 146ms/step - loss: 41.8255 - val_loss: 52.4439 Epoch {18} 417/417 [==============================] - 62s 148ms/step - loss: 41.8204 - val_loss: 50.7387 Epoch {19} 417/417 [==============================] - 61s 146ms/step - loss: 41.2537 - val_loss: 50.5455 Epoch {20} 417/417 [==============================] - 61s 146ms/step - loss: 41.2996 - val_loss: 49.9487 Epoch {21} 417/417 [==============================] - 61s 146ms/step - loss: 41.1346 - val_loss: 52.1963 Epoch {22} 417/417 [==============================] - 61s 146ms/step - loss: 40.9720 - val_loss: 51.8983 Epoch {23} 417/417 [==============================] - 61s 147ms/step - loss: 40.7686 - val_loss: 51.2925 Epoch {24} 417/417 [==============================] - 61s 146ms/step - loss: 40.8481 - val_loss: 50.4244 Epoch {25} 417/417 [==============================] - 61s 146ms/step - loss: 39.8292 - val_loss: 50.0283 Epoch {26} 417/417 [==============================] - 61s 146ms/step - loss: 40.2289 - val_loss: 49.4388 Epoch {27} 417/417 [==============================] - 61s 147ms/step - loss: 40.2658 - val_loss: 49.0737 Epoch {28} 417/417 [==============================] - 62s 148ms/step - loss: 39.7491 - val_loss: 49.0944 Epoch {29} 417/417 [==============================] - 61s 147ms/step - loss: 39.3759 - val_loss: 50.7819 Epoch {30} 417/417 [==============================] - 61s 146ms/step - loss: 39.3474 - val_loss: 50.4122 Epoch {31} 417/417 [==============================] - 61s 146ms/step - loss: 39.4613 - val_loss: 48.2995 Epoch {32} 417/417 [==============================] - 61s 146ms/step - loss: 39.6364 - val_loss: 50.2065 Epoch {33} 417/417 [==============================] - 61s 145ms/step - loss: 38.9421 - val_loss: 49.0659 Epoch {34} 417/417 [==============================] - 61s 146ms/step - loss: 38.8538 - val_loss: 49.8195 Epoch {35} 417/417 [==============================] - 61s 145ms/step - loss: 38.2287 - val_loss: 49.4068 Epoch {36} 417/417 [==============================] - 61s 147ms/step - loss: 38.5873 - val_loss: 48.4423 Epoch {37} 417/417 [==============================] - 61s 146ms/step - loss: 38.8376 - val_loss: 48.5653 Epoch {38} 417/417 [==============================] - 62s 148ms/step - loss: 37.9459 - val_loss: 48.3308 Epoch {39} 417/417 [==============================] - 62s 149ms/step - loss: 37.5998 - val_loss: 48.6029 Epoch {40} 417/417 [==============================] - 61s 147ms/step - loss: 37.9216 - val_loss: 46.2315 Epoch {41} 417/417 [==============================] - 61s 146ms/step - loss: 37.0706 - val_loss: 46.4404 Epoch {42} 417/417 [==============================] - 61s 146ms/step - loss: 37.0765 - val_loss: 46.6861 Epoch {43} 417/417 [==============================] - 61s 147ms/step - loss: 37.0415 - val_loss: 48.0763 Epoch {44} 417/417 [==============================] - 61s 145ms/step - loss: 37.2636 - val_loss: 50.8576 Epoch {45} 417/417 [==============================] - 61s 147ms/step - loss: 36.4690 - val_loss: 50.9807 Epoch {46} 417/417 [==============================] - 61s 146ms/step - loss: 36.4517 - val_loss: 46.7504 Epoch {47} 417/417 [==============================] - 62s 148ms/step - loss: 36.5174 - val_loss: 45.8858 Epoch {48} 417/417 [==============================] - 61s 147ms/step - loss: 36.5007 - val_loss: 47.8323 Epoch {49} 417/417 [==============================] - 62s 148ms/step - loss: 36.6711 - val_loss: 46.1649 Epoch {50} 417/417 [==============================] - 61s 147ms/step - loss: 36.0533 - val_loss: 47.4047 Epoch {51} 417/417 [==============================] - 62s 147ms/step - loss: 36.3330 - val_loss: 46.2508 Epoch {52} 417/417 [==============================] - 61s 147ms/step - loss: 35.6203 - val_loss: 47.4236 Epoch {53} 417/417 [==============================] - 61s 147ms/step - loss: 35.4039 - val_loss: 47.4886 Epoch {54} 417/417 [==============================] - 61s 147ms/step - loss: 35.0336 - val_loss: 46.4134 Epoch {55} 417/417 [==============================] - 61s 146ms/step - loss: 34.9839 - val_loss: 47.6815 Epoch {56} 417/417 [==============================] - 62s 148ms/step - loss: 35.3112 - val_loss: 45.7324 Epoch {57} 417/417 [==============================] - 61s 147ms/step - loss: 35.1060 - val_loss: 44.1062 Epoch {58} 417/417 [==============================] - 61s 146ms/step - loss: 34.7574 - val_loss: 45.1731 Epoch {59} 417/417 [==============================] - 61s 146ms/step - loss: 34.7237 - val_loss: 48.1134 Epoch {60} 417/417 [==============================] - 61s 146ms/step - loss: 34.9102 - val_loss: 44.0434 Epoch {61} 417/417 [==============================] - 62s 148ms/step - loss: 34.1893 - val_loss: 44.0282 Epoch {62} 417/417 [==============================] - 61s 146ms/step - loss: 34.0553 - val_loss: 46.1338 Epoch {63} 417/417 [==============================] - 61s 147ms/step - loss: 34.3231 - val_loss: 45.2917 Epoch {64} 417/417 [==============================] - 61s 146ms/step - loss: 33.8258 - val_loss: 46.5582 Epoch {65} 417/417 [==============================] - 61s 147ms/step - loss: 33.7300 - val_loss: 45.1520 Epoch {66} 417/417 [==============================] - 61s 147ms/step - loss: 33.4922 - val_loss: 44.2255 Epoch {67} 417/417 [==============================] - 62s 148ms/step - loss: 33.3909 - val_loss: 43.1825 Epoch {68} 417/417 [==============================] - 61s 146ms/step - loss: 32.8899 - val_loss: 47.6143 Epoch {69} 417/417 [==============================] - 61s 146ms/step - loss: 33.4277 - val_loss: 44.6777 Epoch {70} 417/417 [==============================] - 61s 146ms/step - loss: 33.4755 - val_loss: 46.3491 Epoch {71} 417/417 [==============================] - 61s 146ms/step - loss: 32.4337 - val_loss: 43.6354 Epoch {72} 417/417 [==============================] - 61s 147ms/step - loss: 32.6218 - val_loss: 42.8437 Epoch {73} 417/417 [==============================] - 61s 146ms/step - loss: 32.5901 - val_loss: 42.8820 Epoch {74} 417/417 [==============================] - 61s 146ms/step - loss: 32.0884 - val_loss: 43.3284 Epoch {75} 417/417 [==============================] - 61s 146ms/step - loss: 32.3688 - val_loss: 42.8652 Epoch {76} 417/417 [==============================] - 61s 146ms/step - loss: 32.0142 - val_loss: 43.4955 Epoch {77} 417/417 [==============================] - 61s 147ms/step - loss: 31.8559 - val_loss: 43.5572 Epoch {78} 417/417 [==============================] - 61s 146ms/step - loss: 31.9166 - val_loss: 43.9381 Epoch {79} 417/417 [==============================] - 61s 146ms/step - loss: 31.7775 - val_loss: 43.5294 Epoch {80} 417/417 [==============================] - 61s 146ms/step - loss: 32.1087 - val_loss: 43.6702 Epoch {81} 351/417 [========================>.....] - ETA: 8s - loss: 32.1710
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
417/417 [==============================] - 61s 146ms/step - loss: 30.5227 - val_loss: 43.1171 Epoch {91} 417/417 [==============================] - 61s 146ms/step - loss: 30.6037 - val_loss: 42.4693 Epoch {92} 417/417 [==============================] - 62s 148ms/step - loss: 29.8825 - val_loss: 40.8402 Epoch {93} 417/417 [==============================] - 61s 147ms/step - loss: 30.2688 - val_loss: 41.3155 Epoch {94} 417/417 [==============================] - 61s 146ms/step - loss: 30.0060 - val_loss: 41.6294 Epoch {95} 417/417 [==============================] - 62s 148ms/step - loss: 29.8832 - val_loss: 40.0331 Epoch {96} 417/417 [==============================] - 61s 147ms/step - loss: 30.0769 - val_loss: 41.0643 Epoch {97} 417/417 [==============================] - 61s 146ms/step - loss: 29.3467 - val_loss: 42.3000 Epoch {98} 417/417 [==============================] - 61s 145ms/step - loss: 29.8377 - val_loss: 40.9519 Epoch {99} 417/417 [==============================] - 61s 147ms/step - loss: 29.3055 - val_loss: 40.1252 Epoch {100} 417/417 [==============================] - 61s 146ms/step - loss: 29.3843 - val_loss: 40.4204 Epoch {101} 417/417 [==============================] - 61s 146ms/step - loss: 29.0416 - val_loss: 41.2022 Epoch {102} 417/417 [==============================] - 61s 147ms/step - loss: 29.0256 - val_loss: 40.6255 Epoch {103} 417/417 [==============================] - 61s 146ms/step - loss: 29.3905 - val_loss: 43.4271 Epoch {104} 417/417 [==============================] - 62s 147ms/step - loss: 29.1191 - val_loss: 41.1378 Epoch {105} 417/417 [==============================] - 61s 147ms/step - loss: 29.0808 - val_loss: 41.0207 Epoch {106} 417/417 [==============================] - 61s 147ms/step - loss: 29.0921 - val_loss: 41.3110 Epoch {107} 417/417 [==============================] - 61s 146ms/step - loss: 28.6196 - val_loss: 42.1662 Epoch {108} 417/417 [==============================] - 61s 146ms/step - loss: 28.5513 - val_loss: 41.1686 Epoch {109} 417/417 [==============================] - 61s 146ms/step - loss: 28.5181 - val_loss: 39.6974 Epoch {110} 417/417 [==============================] - 61s 146ms/step - loss: 28.2563 - val_loss: 40.0436 Epoch {111} 417/417 [==============================] - 61s 146ms/step - loss: 27.8426 - val_loss: 40.1664 Epoch {112} 417/417 [==============================] - 61s 147ms/step - loss: 28.4504 - val_loss: 40.1721 Epoch {113} 417/417 [==============================] - 61s 147ms/step - loss: 28.0390 - val_loss: 40.7384 Epoch {114} 417/417 [==============================] - 61s 145ms/step - loss: 27.7399 - val_loss: 39.6192 Epoch {115} 417/417 [==============================] - 61s 146ms/step - loss: 27.8444 - val_loss: 41.9367 Epoch {116} 417/417 [==============================] - 62s 148ms/step - loss: 27.9179 - val_loss: 40.0600 Epoch {117} 417/417 [==============================] - 62s 149ms/step - loss: 27.7171 - val_loss: 40.5370 Epoch {118} 417/417 [==============================] - 61s 147ms/step - loss: 27.5094 - val_loss: 40.6384 Epoch {119} 417/417 [==============================] - 61s 147ms/step - loss: 27.2844 - val_loss: 40.5253 Epoch {120} 417/417 [==============================] - 61s 146ms/step - loss: 26.7987 - val_loss: 39.5466 Epoch {121} 417/417 [==============================] - 61s 147ms/step - loss: 26.9844 - val_loss: 39.1831 Epoch {122} 417/417 [==============================] - 61s 146ms/step - loss: 26.7891 - val_loss: 39.4603 Epoch {123} 417/417 [==============================] - 61s 146ms/step - loss: 27.5965 - val_loss: 39.7798 Epoch {124} 417/417 [==============================] - 61s 146ms/step - loss: 27.0529 - val_loss: 41.9647 Epoch {125} 417/417 [==============================] - 61s 147ms/step - loss: 27.4248 - val_loss: 40.5191 Epoch {126} 417/417 [==============================] - 61s 146ms/step - loss: 26.5790 - val_loss: 39.9434 Epoch {127} 417/417 [==============================] - 61s 146ms/step - loss: 27.0167 - val_loss: 39.3032 Epoch {128} 417/417 [==============================] - 61s 146ms/step - loss: 26.8346 - val_loss: 39.5987 Epoch {129} 417/417 [==============================] - 61s 147ms/step - loss: 27.3096 - val_loss: 39.2208 Epoch {130} 417/417 [==============================] - 62s 148ms/step - loss: 26.5937 - val_loss: 39.0004 Epoch {131} 417/417 [==============================] - 61s 146ms/step - loss: 26.1301 - val_loss: 38.9951 Epoch {132} 417/417 [==============================] - 62s 148ms/step - loss: 25.8666 - val_loss: 38.8354 Epoch {133} 417/417 [==============================] - 61s 147ms/step - loss: 25.9765 - val_loss: 37.6574 Epoch {134} 417/417 [==============================] - 61s 147ms/step - loss: 25.9306 - val_loss: 40.0184 Epoch {135} 417/417 [==============================] - 61s 147ms/step - loss: 26.2593 - val_loss: 39.9682 Epoch {136} 417/417 [==============================] - 61s 147ms/step - loss: 26.0259 - val_loss: 37.9807 Epoch {137} 417/417 [==============================] - 61s 146ms/step - loss: 26.0186 - val_loss: 36.7190 Epoch {138} 417/417 [==============================] - 61s 147ms/step - loss: 25.3824 - val_loss: 38.0696 Epoch {139} 417/417 [==============================] - 61s 145ms/step - loss: 25.8383 - val_loss: 36.5017 Epoch {140} 417/417 [==============================] - 61s 147ms/step - loss: 25.5499 - val_loss: 36.8519 Epoch {141} 417/417 [==============================] - 61s 147ms/step - loss: 25.3177 - val_loss: 37.7443 Epoch {142} 417/417 [==============================] - 62s 148ms/step - loss: 25.9615 - val_loss: 37.5349 Epoch {143} 417/417 [==============================] - 61s 146ms/step - loss: 25.1721 - val_loss: 38.0082 Epoch {144} 417/417 [==============================] - 61s 147ms/step - loss: 25.3101 - val_loss: 36.8890 Epoch {145} 417/417 [==============================] - 61s 146ms/step - loss: 24.8942 - val_loss: 39.2903 Epoch {146} 417/417 [==============================] - 61s 146ms/step - loss: 25.2470 - val_loss: 36.0209 Epoch {147} 417/417 [==============================] - 61s 146ms/step - loss: 24.7789 - val_loss: 36.8653 Epoch {148} 417/417 [==============================] - 61s 146ms/step - loss: 25.0389 - val_loss: 38.1532 Epoch {149} 417/417 [==============================] - 61s 146ms/step - loss: 25.2372 - val_loss: 38.3573 Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= bidirectional (Bidirection (None, 15, 256) 141312 al) bidirectional_1 (Bidirecti (None, 15, 128) 164352 onal) bidirectional_2 (Bidirecti (None, 64) 41216 onal) dense (Dense) (None, 96) 6240 dense_1 (Dense) (None, 128) 12416 dense_2 (Dense) (None, 1) 129 ================================================================= Total params: 365665 (1.39 MB) Trainable params: 365665 (1.39 MB) Non-trainable params: 0 (0.00 Byte) _________________________________________________________________
lossTrain=[]
lossval=[]
for element in historyvet:
lossTrain.append(element['loss'][0])
lossval.append(element['val_loss'][0])
plt.plot(lossTrain, color='g')
plt.plot(lossval, color='r')
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
pred = model.predict(Xtest)
1396/1396 [==============================] - 34s 22ms/step
print(model.evaluate(Xtest,yTest))
1396/1396 [==============================] - 31s 23ms/step - loss: 38.4965 38.49654769897461
y = yTest.copy()
yhat = pred.copy()
# calculate manually
d = y - yhat
mse_f = np.mean(d**2)
mae_f = np.mean(abs(d))
rmse_f = np.sqrt(mse_f)
r2_f = 1-(sum(d**2)/sum((y-np.mean(y))**2))
print("Results by manual calculation:")
print("MAE:",mae_f) #mean absolute error - difference between the original and predicted extracted by avg the abs diff over dataset
print("MSE:", mse_f) #mean squared error - diff btw orig and pred extracted by squared the avg diff over the dataset
print("RMSE:", rmse_f) #root mean squared error - is the error rate by the square root of MSE
print("R-Squared:", r2_f) #coefficient of determination - the higher the better
Results by manual calculation: MAE: 4.6139056233222275 MSE: 38.49655581822394 RMSE: 6.204559276711275 R-Squared: [0.54847437]
# Plot true and predicted RUL values
plt.plot(yTest, label = "True RUL", color = "red")
plt.plot(pred, label = "Pred RUL", color = "blue")
plt.legend()
plt.show()
x = list(range(len(yTest)))
plt.scatter(x, yTest, color="blue", label="original")
plt.plot(x, pred, color="red", label="predicted")
plt.legend()
plt.show()
#another 150 epochs to train
historyvet =[]
model = load_model( 'bilstm_predict_rul_experiment8_extended_full_take1_part3.h5')
epoch = 150
for epoch in range(0,epoch):
print('Epoch {%d}' %(epoch))
history = model.fit(Xtrain, ytrain, epochs=1, batch_size=500, validation_data= (Xval,yVal), shuffle=True)
historyvet.append(history.history)
model.save('bilstm_predict_rul_experiment8_extended_full_take1_part4.h5')
model.summary()
Epoch {0} 417/417 [==============================] - 72s 156ms/step - loss: 25.2065 - val_loss: 36.6388 Epoch {1} 417/417 [==============================] - 62s 148ms/step - loss: 24.8557 - val_loss: 35.4476 Epoch {2} 417/417 [==============================] - 62s 148ms/step - loss: 24.3274 - val_loss: 37.2842 Epoch {3} 417/417 [==============================] - 62s 147ms/step - loss: 24.4278 - val_loss: 37.4299 Epoch {4} 417/417 [==============================] - 62s 149ms/step - loss: 24.3842 - val_loss: 37.5519 Epoch {5} 417/417 [==============================] - 62s 147ms/step - loss: 24.4239 - val_loss: 37.9402 Epoch {6} 417/417 [==============================] - 62s 149ms/step - loss: 24.9670 - val_loss: 37.0719 Epoch {7} 417/417 [==============================] - 62s 149ms/step - loss: 23.8002 - val_loss: 36.0193 Epoch {8} 417/417 [==============================] - 62s 148ms/step - loss: 23.5360 - val_loss: 35.7242 Epoch {9} 417/417 [==============================] - 61s 147ms/step - loss: 23.8291 - val_loss: 36.4133 Epoch {10} 417/417 [==============================] - 61s 147ms/step - loss: 23.9111 - val_loss: 36.7556 Epoch {11} 417/417 [==============================] - 62s 148ms/step - loss: 24.3146 - val_loss: 37.7184 Epoch {12} 417/417 [==============================] - 61s 147ms/step - loss: 23.4790 - val_loss: 35.7914 Epoch {13} 417/417 [==============================] - 61s 147ms/step - loss: 23.9065 - val_loss: 35.7152 Epoch {14} 417/417 [==============================] - 62s 148ms/step - loss: 23.4426 - val_loss: 38.0939 Epoch {15} 417/417 [==============================] - 66s 157ms/step - loss: 23.3824 - val_loss: 38.8431 Epoch {16} 417/417 [==============================] - 62s 149ms/step - loss: 23.6668 - val_loss: 37.0771 Epoch {17} 283/417 [===================>..........] - ETA: 17s - loss: 23.7568
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
417/417 [==============================] - 61s 147ms/step - loss: 23.0798 - val_loss: 36.1934 Epoch {21} 417/417 [==============================] - 61s 146ms/step - loss: 22.9668 - val_loss: 35.9325 Epoch {22} 417/417 [==============================] - 61s 147ms/step - loss: 22.9929 - val_loss: 35.0717 Epoch {23} 417/417 [==============================] - 61s 147ms/step - loss: 22.8793 - val_loss: 34.3491 Epoch {24} 417/417 [==============================] - 62s 147ms/step - loss: 22.7364 - val_loss: 36.1029 Epoch {25} 417/417 [==============================] - 62s 150ms/step - loss: 23.2038 - val_loss: 34.3044 Epoch {26} 417/417 [==============================] - 62s 148ms/step - loss: 22.9945 - val_loss: 37.2682 Epoch {27} 175/417 [===========>..................] - ETA: 32s - loss: 21.9081
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
417/417 [==============================] - 61s 147ms/step - loss: 22.3582 - val_loss: 36.2649 Epoch {31} 417/417 [==============================] - 61s 147ms/step - loss: 22.7639 - val_loss: 33.4122 Epoch {32} 417/417 [==============================] - 62s 148ms/step - loss: 22.8160 - val_loss: 36.9609 Epoch {33} 417/417 [==============================] - 62s 148ms/step - loss: 22.3251 - val_loss: 35.1993 Epoch {34} 417/417 [==============================] - 62s 148ms/step - loss: 22.3566 - val_loss: 34.6793 Epoch {35} 417/417 [==============================] - 62s 149ms/step - loss: 22.0315 - val_loss: 34.7027 Epoch {36} 417/417 [==============================] - 61s 147ms/step - loss: 21.9698 - val_loss: 34.1477 Epoch {37} 417/417 [==============================] - 62s 148ms/step - loss: 22.5656 - val_loss: 34.4885 Epoch {38} 235/417 [===============>..............] - ETA: 24s - loss: 21.2834
IOPub message rate exceeded. The notebook server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--NotebookApp.iopub_msg_rate_limit`. Current values: NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec) NotebookApp.rate_limit_window=3.0 (secs)
417/417 [==============================] - 61s 147ms/step - loss: 21.0600 - val_loss: 35.1760 Epoch {43} 417/417 [==============================] - 61s 146ms/step - loss: 22.1203 - val_loss: 36.4063 Epoch {44} 417/417 [==============================] - 61s 147ms/step - loss: 22.2604 - val_loss: 34.6257 Epoch {45} 417/417 [==============================] - 61s 147ms/step - loss: 21.3753 - val_loss: 32.8848 Epoch {46} 417/417 [==============================] - 62s 149ms/step - loss: 21.2788 - val_loss: 33.9894 Epoch {47} 417/417 [==============================] - 61s 147ms/step - loss: 21.6419 - val_loss: 33.2308 Epoch {48} 417/417 [==============================] - 62s 148ms/step - loss: 21.5391 - val_loss: 36.6366 Epoch {49} 417/417 [==============================] - 61s 146ms/step - loss: 21.4260 - val_loss: 34.0745 Epoch {50} 417/417 [==============================] - 61s 147ms/step - loss: 21.3438 - val_loss: 33.1494 Epoch {51} 417/417 [==============================] - 61s 147ms/step - loss: 20.9282 - val_loss: 34.3457 Epoch {52} 257/417 [=================>............] - ETA: 21s - loss: 21.8262
lossTrain=[]
lossval=[]
for element in historyvet:
lossTrain.append(element['loss'][0])
lossval.append(element['val_loss'][0])
plt.plot(lossTrain, color='g')
plt.plot(lossval, color='r')
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
pred = model.predict(Xtest)
1396/1396 [==============================] - 34s 22ms/step
print(model.evaluate(Xtest,yTest))
1396/1396 [==============================] - 31s 22ms/step - loss: 28.4646 28.464616775512695
y = yTest.copy()
yhat = pred.copy()
# calculate manually
d = y - yhat
mse_f = np.mean(d**2)
mae_f = np.mean(abs(d))
rmse_f = np.sqrt(mse_f)
r2_f = 1-(sum(d**2)/sum((y-np.mean(y))**2))
print("Results by manual calculation:")
print("MAE:",mae_f) #mean absolute error - difference between the original and predicted extracted by avg the abs diff over dataset
print("MSE:", mse_f) #mean squared error - diff btw orig and pred extracted by squared the avg diff over the dataset
print("RMSE:", rmse_f) #root mean squared error - is the error rate by the square root of MSE
print("R-Squared:", r2_f) #coefficient of determination - the higher the better
Results by manual calculation: MAE: 3.8619044785419687 MSE: 28.464647783454012 RMSE: 5.335227060159109 R-Squared: [0.66613849]
# Plot true and predicted RUL values
plt.plot(yTest, label = "True RUL", color = "red")
plt.plot(pred, label = "Pred RUL", color = "blue")
plt.legend()
plt.show()
x = list(range(len(yTest)))
plt.scatter(x, yTest, color="blue", label="original")
plt.plot(x, pred, color="red", label="predicted")
plt.legend()
plt.show()
#another 150 epochs to train
historyvet =[]
model = load_model( 'bilstm_predict_rul_experiment8_extended_full_take1_part4.h5')
epoch = 150
for epoch in range(0,epoch):
print('Epoch {%d}' %(epoch))
history = model.fit(Xtrain, ytrain, epochs=1, batch_size=500, validation_data= (Xval,yVal), shuffle=True)
historyvet.append(history.history)
model.save('bilstm_predict_rul_experiment8_extended_full_take1_part5.h5')
model.summary()
Epoch {0} 417/417 [==============================] - 74s 156ms/step - loss: 17.0762 - val_loss: 29.1492 Epoch {1} 417/417 [==============================] - 63s 150ms/step - loss: 16.6404 - val_loss: 31.5541 Epoch {2} 417/417 [==============================] - 62s 148ms/step - loss: 17.2493 - val_loss: 27.6492 Epoch {3} 417/417 [==============================] - 62s 149ms/step - loss: 16.9610 - val_loss: 30.7429 Epoch {4} 68/417 [===>..........................] - ETA: 47s - loss: 16.7998
lossTrain=[]
lossval=[]
for element in historyvet:
lossTrain.append(element['loss'][0])
lossval.append(element['val_loss'][0])
plt.plot(lossTrain, color='g')
plt.plot(lossval, color='r')
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
pred = model.predict(Xtest)
1396/1396 [==============================] - 33s 22ms/step
print(model.evaluate(Xtest,yTest))
1396/1396 [==============================] - 30s 21ms/step - loss: 26.0985 26.098464965820312
y = yTest.copy()
yhat = pred.copy()
# calculate manually
d = y - yhat
mse_f = np.mean(d**2)
mae_f = np.mean(abs(d))
rmse_f = np.sqrt(mse_f)
r2_f = 1-(sum(d**2)/sum((y-np.mean(y))**2))
print("Results by manual calculation:")
print("MAE:",mae_f) #mean absolute error - difference between the original and predicted extracted by avg the abs diff over dataset
print("MSE:", mse_f) #mean squared error - diff btw orig and pred extracted by squared the avg diff over the dataset
print("RMSE:", rmse_f) #root mean squared error - is the error rate by the square root of MSE
print("R-Squared:", r2_f) #coefficient of determination - the higher the better
Results by manual calculation: MAE: 3.6417208252572455 MSE: 26.098465565188167 RMSE: 5.10866573237946 R-Squared: [0.69389142]
# Plot true and predicted RUL values
plt.plot(yTest, label = "True RUL", color = "red")
plt.plot(pred, label = "Pred RUL", color = "blue")
plt.legend()
plt.show()
x = list(range(len(yTest)))
plt.scatter(x, yTest, color="blue", label="original")
plt.plot(x, pred, color="red", label="predicted")
plt.legend()
plt.show()
Fine tune
newModel = load_model( 'bilstm_predict_rul_experiment8_extended_full_take1_part5.h5')
#adam = optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
adam=tf.keras.optimizers.Adam(learning_rate=0.001)
newModel.compile(loss='mse', optimizer=adam)
newModel.summary()
epochs = 25
newModel.fit(Xval,yVal, epochs=epochs, batch_size=500)
newModel.save('bilstm_predict_rul_experiment8_extended_full_take1_part5_best.h5')
newModel.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= bidirectional (Bidirection (None, 15, 256) 141312 al) bidirectional_1 (Bidirecti (None, 15, 128) 164352 onal) bidirectional_2 (Bidirecti (None, 64) 41216 onal) dense (Dense) (None, 96) 6240 dense_1 (Dense) (None, 128) 12416 dense_2 (Dense) (None, 1) 129 ================================================================= Total params: 365665 (1.39 MB) Trainable params: 365665 (1.39 MB) Non-trainable params: 0 (0.00 Byte) _________________________________________________________________ Epoch 1/25 90/90 [==============================] - 25s 136ms/step - loss: 30.0208 Epoch 2/25 90/90 [==============================] - 12s 137ms/step - loss: 25.0686 Epoch 3/25 90/90 [==============================] - 12s 134ms/step - loss: 22.9721 Epoch 4/25 90/90 [==============================] - 12s 135ms/step - loss: 22.2551 Epoch 5/25 90/90 [==============================] - 12s 135ms/step - loss: 20.0813 Epoch 6/25 90/90 [==============================] - 12s 133ms/step - loss: 20.3510 Epoch 7/25 90/90 [==============================] - 12s 133ms/step - loss: 18.9501 Epoch 8/25 90/90 [==============================] - 12s 133ms/step - loss: 17.6036 Epoch 9/25 90/90 [==============================] - 12s 133ms/step - loss: 17.7045 Epoch 10/25 90/90 [==============================] - 12s 134ms/step - loss: 17.3402 Epoch 11/25 90/90 [==============================] - 12s 135ms/step - loss: 17.1489 Epoch 12/25 90/90 [==============================] - 12s 133ms/step - loss: 16.4222 Epoch 13/25 90/90 [==============================] - 12s 133ms/step - loss: 16.9329 Epoch 14/25 90/90 [==============================] - 12s 133ms/step - loss: 16.0967 Epoch 15/25 90/90 [==============================] - 12s 134ms/step - loss: 15.1926 Epoch 16/25 90/90 [==============================] - 12s 133ms/step - loss: 14.8439 Epoch 17/25 90/90 [==============================] - 12s 133ms/step - loss: 14.8258 Epoch 18/25 90/90 [==============================] - 12s 132ms/step - loss: 13.3039 Epoch 19/25 90/90 [==============================] - 12s 133ms/step - loss: 16.0017 Epoch 20/25 90/90 [==============================] - 12s 133ms/step - loss: 14.8606 Epoch 21/25 90/90 [==============================] - 12s 133ms/step - loss: 14.4499 Epoch 22/25 90/90 [==============================] - 12s 135ms/step - loss: 13.5199 Epoch 23/25 90/90 [==============================] - 12s 133ms/step - loss: 13.1400 Epoch 24/25 90/90 [==============================] - 12s 134ms/step - loss: 12.8299 Epoch 25/25 90/90 [==============================] - 12s 135ms/step - loss: 12.4812 Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= bidirectional (Bidirection (None, 15, 256) 141312 al) bidirectional_1 (Bidirecti (None, 15, 128) 164352 onal) bidirectional_2 (Bidirecti (None, 64) 41216 onal) dense (Dense) (None, 96) 6240 dense_1 (Dense) (None, 128) 12416 dense_2 (Dense) (None, 1) 129 ================================================================= Total params: 365665 (1.39 MB) Trainable params: 365665 (1.39 MB) Non-trainable params: 0 (0.00 Byte) _________________________________________________________________
print(newModel.evaluate(Xtest,yTest))
1396/1396 [==============================] - 34s 23ms/step - loss: 29.3947 29.394678115844727
pred = newModel.predict(Xtest)
1396/1396 [==============================] - 33s 22ms/step
# Plot true and predicted RUL values
plt.plot(yTest, label = "True RUL", color = "red")
plt.plot(pred, label = "Pred RUL", color = "blue")
plt.legend()
plt.show()
x = list(range(len(yTest)))
plt.scatter(x, yTest, color="blue", label="original")
plt.plot(x, pred, color="red", label="predicted")
plt.legend()
plt.show()
y = yTest.copy()
yhat = pred.copy()
# calculate manually
d = y - yhat
mse_f = np.mean(d**2)
mae_f = np.mean(abs(d))
rmse_f = np.sqrt(mse_f)
r2_f = 1-(sum(d**2)/sum((y-np.mean(y))**2))
print("Results by manual calculation:")
print("MAE:",mae_f) #mean absolute error - difference between the original and predicted extracted by avg the abs diff over dataset
print("MSE:", mse_f) #mean squared error - diff btw orig and pred extracted by squared the avg diff over the dataset
print("RMSE:", rmse_f) #root mean squared error - is the error rate by the square root of MSE
print("R-Squared:", r2_f) #coefficient of determination - the higher the better - in my case >85% after training on the val dataset
Results by manual calculation: MAE: 3.9411885885822477 MSE: 29.394694086310825 RMSE: 5.421687383675937 R-Squared: [0.65522999]
results = pd.DataFrame({'Predicted':pred.flatten()})
results['Actual'] = yTest.flatten()
results
Predicted | Actual | |
---|---|---|
0 | 6.908440 | 18 |
1 | 24.287235 | 30 |
2 | 19.718840 | 18 |
3 | 23.154980 | 21 |
4 | 13.894153 | 17 |
... | ... | ... |
44643 | 18.152962 | 28 |
44644 | 26.346445 | 30 |
44645 | 15.683774 | 30 |
44646 | 6.748013 | 9 |
44647 | 14.507665 | 26 |
44648 rows × 2 columns