Using this code I see a speedup in amp
, but not a huge one:
import torch
import torch.nn as nn
import time
class CharacterLevelCNN(nn.Module):
def __init__(self, number_of_classes):
super(CharacterLevelCNN, self).__init__()
# define conv layers
self.dropout_input = nn.Dropout2d(0.5)
self.conv1 = nn.Sequential(
nn.Conv1d(
128,
256,
kernel_size=7,
padding=0,
),
nn.ReLU(),
nn.MaxPool1d(3),
)
self.conv2 = nn.Sequential(
nn.Conv1d(256, 256, kernel_size=7, padding=0), nn.ReLU(), nn.MaxPool1d(3)
)
self.conv3 = nn.Sequential(
nn.Conv1d(256, 256, kernel_size=3, padding=0), nn.ReLU()
)
self.conv4 = nn.Sequential(
nn.Conv1d(256, 256, kernel_size=3, padding=0), nn.ReLU()
)
self.conv5 = nn.Sequential(
nn.Conv1d(256, 256, kernel_size=3, padding=0), nn.ReLU()
)
self.conv6 = nn.Sequential(
nn.Conv1d(256, 256, kernel_size=3, padding=0), nn.ReLU(), nn.MaxPool1d(3)
)
# compute the output shape after forwarding an input to the conv layers
input_shape = (
128,
128,
128,
)
self.output_dimension = self._get_conv_output(input_shape)
# define linear layers
self.fc1 = nn.Sequential(
nn.Linear(self.output_dimension, 1024), nn.ReLU(), nn.Dropout(0.5)
)
self.fc2 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU(), nn.Dropout(0.5))
self.fc3 = nn.Linear(1024, number_of_classes)
# initialize weights
self._create_weights()
# utility private functions
def _create_weights(self, mean=0.0, std=0.05):
for module in self.modules():
if isinstance(module, nn.Conv1d) or isinstance(module, nn.Linear):
module.weight.data.normal_(mean, std)
def _get_conv_output(self, shape):
x = torch.rand(shape)
x = x.transpose(1, 2)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
x = x.view(x.size(0), -1)
output_dimension = x.size(1)
return output_dimension
# forward
def forward(self, x):
x = self.dropout_input(x)
x = x.transpose(1, 2)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def profile(model, x, amp, benchmark):
torch.backends.cudnn.benchmark = benchmark
scaler = torch.cuda.amp.GradScaler(enabled=amp)
# warmup
for _ in range(10):
with torch.cuda.amp.autocast(enabled=amp):
out = model(x)
scaler.scale(out.mean()).backward()
nb_iters = 100
torch.cuda.synchronize()
t0 = time.perf_counter()
for _ in range(nb_iters):
with torch.cuda.amp.autocast(enabled=amp):
out = model(x)
scaler.scale(out.mean()).backward()
torch.cuda.synchronize()
t1 = time.perf_counter()
print("amp enabled: {}, benchmark: {}, {}iter/s".format(
amp, benchmark, nb_iters/(t1-t0)))
device = "cuda"
model = CharacterLevelCNN(10).to(device)
x = torch.randn(128, 128, 128, device=device)
profile(model, x, amp=False, benchmark=False)
profile(model, x, amp=False, benchmark=True)
profile(model, x, amp=True, benchmark=False)
profile(model, x, amp=True, benchmark=True)
# amp enabled: False, benchmark: False, 355.158750122739iter/s
# amp enabled: False, benchmark: True, 370.2653699860738iter/s
# amp enabled: True, benchmark: False, 404.79240530359124iter/s
# amp enabled: True, benchmark: True, 419.78821713531175iter/s
You should also note that cuDNN is allowed to use TF32, which uses TensorCores for convs if possible, and is already speeding up your model in float32
. Disabling it shows the true FP32 performance:
torch.backends.cudnn.allow_tf32 = False
profile(model, x, amp=False, benchmark=False)
profile(model, x, amp=False, benchmark=True)
# amp enabled: False, benchmark: False, 191.57821257885942iter/s
# amp enabled: False, benchmark: True, 193.78960927342436iter/s
Could you check my execute my code and see how the model performs on your system?