We introduce the "exponential linear unit" (ELU) which speeds up learning in deep neural networks and leads to higher classification accuracies. Like rectified linear units (ReLUs), leaky ReLUs (LReLUs) and parametrized ReLUs (PReLUs), ELUs alleviate the vanishing gradient problem via the identity for positive values. However, ELUs have improved learning characteristics compared to the units with other activation functions. In contrast to ReLUs, ELUs have negative values which allows them to push mean unit activations closer to zero like batch normalization but with lower computational complexity. Mean shifts toward zero speed up learning by bringing the normal gradient closer to the unit natural gradient because of a reduced bias shift effect. While LReLUs and PReLUs have negative values, too, they do not ensure a noise-robust deactivation state. ELUs saturate to a negative value with smaller inputs and thereby decrease the forward propagated variation and information. Therefore, ELUs code the degree of presence of particular phenomena in the input, while they do not quantitatively model the degree of their absence. In experiments, ELUs lead not only to faster learning, but also to significantly better generalization performance than ReLUs and LReLUs on networks with more than 5 layers. On CIFAR-100 ELUs networks significantly outperform ReLU networks with batch normalization while batch normalization does not improve ELU networks. ELU networks are among the top 10 reported CIFAR-10 results and yield the best published result on CIFAR-100, without resorting to multi-view evaluation or model averaging. On ImageNet, ELU networks considerably speed up learning compared to a ReLU network with the same architecture, obtaining less than 10% classification error for a single crop, single model network.
Source: Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) (2015-11-23). See: paper link.
import torch
import numpy as np
import seaborn as sns
from tqdm import tqdm
import torch.nn as nn
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from keras.datasets.mnist import load_data
sns.set_theme()
# load (and normalize) mnist dataset
(trainX, trainy), (testX, testy) = load_data()
trainX = np.float32(trainX) / 127.5 - 1
testX = np.float32(testX) / 127.5 - 1
def train(model, optimizer, loss_fct=torch.nn.NLLLoss(), nb_epochs=25):
training_loss = []
validation_loss = []
for _ in tqdm(range(nb_epochs)):
batch_loss = []
for batch in dataset:
x, y = batch
log_prob = model(x.to(device).reshape(-1, 28 * 28))
loss = loss_fct(log_prob, y.to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_loss.append(loss.item())
training_loss.append(np.mean(batch_loss))
# Validation
model.train(False)
log_prob = model(torch.from_numpy(testX).reshape(-1, 28 * 28).to(device))
t_loss = loss_fct(log_prob, torch.from_numpy(testy).to(device))
validation_loss.append(t_loss.item())
model.train(True)
return training_loss, validation_loss
class ELU(nn.Module):
def __init__(self, alpha=1.):
super(ELU, self).__init__()
self.alpha = alpha
def forward(self, x):
cond = x > 0
y = x.clone()
y[~cond] = self.alpha * (torch.exp(x[~cond]) - 1.)
return y
if __name__ == "__main__":
dataset = DataLoader([[trainX[i], trainy[i]] for i in range(trainX.shape[0])], batch_size=64, shuffle=True)
testing_dataset = DataLoader([[testX[i], testy[i]] for i in range(testX.shape[0])], batch_size=64, shuffle=True)
device = 'cuda'
model = torch.nn.Sequential(nn.Linear(28 * 28, 128), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=-1)).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
training_loss, validation_loss = train(model, optimizer, nb_epochs=100)
plt.plot(training_loss, label='ReLU')
plt.plot(validation_loss, label='ReLU', linestyle='--')
model = torch.nn.Sequential(nn.Linear(28 * 28, 128), ELU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), ELU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), ELU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), ELU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), ELU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), ELU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), ELU(), nn.Dropout(p=0.5),
nn.Linear(128, 128), ELU(), nn.Dropout(p=0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=-1)).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
training_loss, validation_loss = train(model, optimizer, nb_epochs=100)
plt.plot(training_loss, label='ELU')
plt.plot(validation_loss, label='ELU', linestyle='--')
plt.legend(fontsize=12)
plt.xlabel('Epochs', fontsize=14)
plt.ylabel('Cross Entropy Loss', fontsize=14)
plt.savefig(f'Imgs/elu.png')
python implementation Fast and Accurate Deep Network Learning by Exponential Linear Units in 100 lines
2015-11-23