In these two days, I tried to rewrite my previous tensorflow codes as pytorch format.
However, I found some issues about the high memory demand of pytorch. To validate this, i implemented a simple network using both pytorch and tf.
For the pytorch version, it runs out of memory 5~14G (vary from time) .
However for the tf version, it only needs memory 400~700M (less than 1/10 memory cost?)
I have carefully check my codes, but i still wonder if there is something wrong. I am not sure what’s going on.
By the way, both codes were tested only for CPU.
Anyone has any idea?
Thanks in advance.
implement a very simple network using pytorch (only conv layers and a fully-connected layer)
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3,64,5,padding=2)
self.conv2 = nn.Conv2d(64,64,5,padding=2)
self.conv3 = nn.Conv2d(64, 64, 5,padding=2)
self.conv4 = nn.Conv2d(64, 64, 5,padding=2)
self.conv5 = nn.Conv2d(64, 64, 5,padding=2)
self.conv6 = nn.Conv2d(64, 64, 5,padding=2)
self.conv7 = nn.Conv2d(64, 64, 5,padding=2)
self.fc1 = nn.Linear(64*32*32, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.conv7(x)
x = x.view(-1, 64*32*32)
x = self.fc1(x)
return x
net = Net()
criterion = nn.CrossEntropyLoss() # use a Classification Cross-Entropy loss
optimizer = optim.Adam(net.parameters(), lr=0.001)
for epoch in range(10):
running_loss = 0.0
inputs = torch.FloatTensor(128,3,32,32)
labels = torch.from_numpy(np.zeros(128).astype(int))
inputs, labels = Variable(inputs), Variable(labels)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
print('[%d] loss: %.3f' % (epoch+1, running_loss/100))
implement the same network using tensorflow (tflearn)
import tflearn
from tflearn.layers.conv import conv_2d
from tflearn.layers.core import input_data, fully_connected
from tflearn.layers.estimator import regression
import numpy as np
def Net (input):
net = conv_2d(input, 64, 5, padding='SAME')
for i in range (6):
net = conv_2d(net, 64, 5, padding='SAME')
net = fully_connected(net, 10)
return net
input = input_data(shape=[None, 32,32,3])
net = Net(input)
net = regression(net, optimizer='adam',
loss='categorical_crossentropy', metric=None,
learning_rate=0.001)
model = tflearn.DNN(net)
X = np.random.randn(128*32*32*3).reshape(128,32,32,3)
Y = np.zeros(128*10).reshape(128,10)
model.fit(X, Y, n_epoch=100, batch_size=128, show_metric=True)