SoFunction
Updated on 2024-11-16

Example of keras plotting acc and loss graphs

I'm going to cut to the chase, so let's just go straight to the code!

#Load the keras module
from __future__ import print_function
import numpy as np
(1337) # for reproducibility

import keras
from  import mnist
from  import Sequential
from  import Dense, Dropout, Activation
from  import SGD, Adam, RMSprop
from  import np_utils
import  as plt
%matplotlib inline

#Write a LossHistory class that holds the loss and acc
class LossHistory():
 def on_train_begin(self, logs={}):
   = {'batch':[], 'epoch':[]}
   = {'batch':[], 'epoch':[]}
  self.val_loss = {'batch':[], 'epoch':[]}
  self.val_acc = {'batch':[], 'epoch':[]}

 def on_batch_end(self, batch, logs={}):
  ['batch'].append(('loss'))
  ['batch'].append(('acc'))
  self.val_loss['batch'].append(('val_loss'))
  self.val_acc['batch'].append(('val_acc'))

 def on_epoch_end(self, batch, logs={}):
  ['epoch'].append(('loss'))
  ['epoch'].append(('acc'))
  self.val_loss['epoch'].append(('val_loss'))
  self.val_acc['epoch'].append(('val_acc'))

 def loss_plot(self, loss_type):
  iters = range(len([loss_type]))
  ()
  # acc
  (iters, [loss_type], 'r', label='train acc')
  # loss
  (iters, [loss_type], 'g', label='train loss')
  if loss_type == 'epoch':
   # val_acc
   (iters, self.val_acc[loss_type], 'b', label='val acc')
   # val_loss
   (iters, self.val_loss[loss_type], 'k', label='val loss')
  (True)
  (loss_type)
  ('acc-loss')
  (loc="upper right")
  ()
# Variable initialization
batch_size = 128 
nb_classes = 10
nb_epoch = 20

# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()

X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

#Modeling Use Sequential()
model = Sequential()
(Dense(512, input_shape=(784,)))
(Activation('relu'))
(Dropout(0.2))
(Dense(512))
(Activation('relu'))
(Dropout(0.2))
(Dense(10))
(Activation('softmax'))

#Print the model
()

# Training and evaluation
# Compilation model
(loss='categorical_crossentropy',
    optimizer=RMSprop(),
    metrics=['accuracy'])
# Create an instance of history
history = LossHistory()

#Iterative training (note that callbacks are added in this place)
(X_train, Y_train,
   batch_size=batch_size, nb_epoch=nb_epoch,
   verbose=1, 
   validation_data=(X_test, Y_test),
   callbacks=[history])

# Model evaluation
score = (X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])

# Plotting the acc-loss curve
history.loss_plot('epoch')

Additional knowledge:Performance evaluation of custom validation sets in keras (ROC,AUC)

The performance evaluation that comes with keras has accuracy as well as loss, and when auc is needed as a way to evaluate how good the validation set is, you have to write your own evaluation function:

from  import roc_auc_score
from keras import backend as K

# AUC for a binary classifier
def auc(y_true, y_pred):
 ptas = ([binary_PTA(y_true,y_pred,k) for k in (0, 1, 1000)],axis=0)
 pfas = ([binary_PFA(y_true,y_pred,k) for k in (0, 1, 1000)],axis=0)
 pfas = ([((1,)) ,pfas],axis=0)
 binSizes = -(pfas[1:]-pfas[:-1])
 s = ptas*binSizes
 return (s, axis=0)
#------------------------------------------------------------------------------------
# PFA, prob false alert for binary classifier
def binary_PFA(y_true, y_pred, threshold=(value=0.5)):
 y_pred = (y_pred >= threshold, 'float32')
 # N = total number of negative labels
 N = (1 - y_true)
 # FP = total number of false alerts, alerts from the negative class labels
 FP = (y_pred - y_pred * y_true)
 return FP/N
#-----------------------------------------------------------------------------------
# P_TA prob true alerts for binary classifier
def binary_PTA(y_true, y_pred, threshold=(value=0.5)):
 y_pred = (y_pred >= threshold, 'float32')
 # P = total number of positive labels
 P = (y_true)
 # TP = total number of correct alerts, alerts from the positive class labels
 TP = (y_pred * y_true)
 return TP/P
 
# Then set the metrics in the model's compile
# The following example, I'm using an RNN for classification
from  import Sequential
from  import Dense, Dropout
import keras
from  import GRU

model = Sequential()
((mask_value=0., input_shape=(max_lenth, max_features))) #masking for variable-length sequence inputs
(GRU(units=n_hidden_units,activation='selu',kernel_initializer='orthogonal', recurrent_initializer='orthogonal',
    bias_initializer='zeros', kernel_regularizer=regularizers.l2(0.01), recurrent_regularizer=regularizers.l2(0.01),
    bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None,
    bias_constraint=None, dropout=0.5, recurrent_dropout=0.0, implementation=1, return_sequences=False,
    return_state=False, go_backwards=False, stateful=False, unroll=False)) 
(Dropout(0.5))
(Dense(1, activation='sigmoid'))

(loss='binary_crossentropy',
    optimizer='adam',
    metrics=[auc]) # Write custom evaluation functions

Next up, make your own predictions...

Method II:

from  import roc_auc_score
import keras
class RocAucMetricCallback():
 def __init__(self, predict_batch_size=1024, include_on_batch=False):
  super(RocAucMetricCallback, self).__init__()
  self.predict_batch_size=predict_batch_size
  self.include_on_batch=include_on_batch
 
 def on_batch_begin(self, batch, logs={}):
  pass
 
 def on_batch_end(self, batch, logs={}):
  if(self.include_on_batch):
   logs['roc_auc_val']=float('-inf')
   if(self.validation_data):
    logs['roc_auc_val']=roc_auc_score(self.validation_data[1], 
             (self.validation_data[0],
                  batch_size=self.predict_batch_size))
 def on_train_begin(self, logs={}):
  if not ('roc_auc_val' in ['metrics']):
   ['metrics'].append('roc_auc_val')
 
 def on_train_end(self, logs={}):
  pass
 
 def on_epoch_begin(self, epoch, logs={}):
  pass
 
 def on_epoch_end(self, epoch, logs={}):
  logs['roc_auc_val']=float('-inf')
  if(self.validation_data):
   logs['roc_auc_val']=roc_auc_score(self.validation_data[1], 
            (self.validation_data[0],
                 batch_size=self.predict_batch_size))
import numpy as np
import tensorflow as tf
from  import Sequential
from  import Dense, Dropout
from  import GRU
import keras
from  import EarlyStopping
from  import roc_auc_score
from keras import metrics
 
cb = [
 my_callbacks.RocAucMetricCallback(), # include it before EarlyStopping!
 EarlyStopping(monitor='roc_auc_val',patience=300, verbose=2,mode='max')
]
model = Sequential()
((mask_value=0., input_shape=(max_lenth, max_features)))
# (Embedding(input_dim=max_features+1, output_dim=64,mask_zero=True))
(GRU(units=n_hidden_units,activation='selu',kernel_initializer='orthogonal', recurrent_initializer='orthogonal',
    bias_initializer='zeros', kernel_regularizer=regularizers.l2(0.01), recurrent_regularizer=regularizers.l2(0.01),
    bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None,
    bias_constraint=None, dropout=0.5, recurrent_dropout=0.0, implementation=1, return_sequences=False,
    return_state=False, go_backwards=False, stateful=False, unroll=False)) #input_shape=(max_lenth, max_features),
(Dropout(0.5))
(Dense(1, activation='sigmoid'))
 
(loss='binary_crossentropy',
    optimizer='adam',
    metrics=[auc]) # Here's where you can write other evaluation criteria
(x_train, y_train, batch_size=train_batch_size, epochs=training_iters, verbose=2,
   callbacks=cb,validation_split=0.2,
   shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0)

Pro-tested to be effective!

The above example of this keras plotting acc and loss curves is all that I have shared with you, I hope it will give you a reference and I hope you will support me more.