I try to import torchvision in order to use a fedearted dataset from the tutorial with a VGG (or any other existing model)-> https://github.com/OpenMined/PySyft/blob/dev/examples/tutorials/advanced/Federated%20Dataset.ipynb
and I got RuntimeError :
RuntimeError Traceback (most recent call last)
<ipython-input-15-bd04a42c539f> in <module>
----> 1 import torchvision
2 model = torchvision.models.vgg11(pretrained=False)
3 #model = torchvision.models.resnet101(pretrained=False)
~/miniconda3/envs/pysyft/lib/python3.7/site-packages/torchvision/__init__.py in <module>
----> 1 from torchvision import models
2 from torchvision import datasets
3 from torchvision import ops
4 from torchvision import transforms
5 from torchvision import utils
~/miniconda3/envs/pysyft/lib/python3.7/site-packages/torchvision/models/__init__.py in <module>
9 from .shufflenetv2 import *
10 from . import segmentation
---> 11 from . import detection
~/miniconda3/envs/pysyft/lib/python3.7/site-packages/torchvision/models/detection/__init__.py in <module>
----> 1 from .faster_rcnn import *
2 from .mask_rcnn import *
3 from .keypoint_rcnn import *
~/miniconda3/envs/pysyft/lib/python3.7/site-packages/torchvision/models/detection/faster_rcnn.py in <module>
11
12 from .generalized_rcnn import GeneralizedRCNN
---> 13 from .rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
14 from .roi_heads import RoIHeads
15 from .transform import GeneralizedRCNNTransform
~/miniconda3/envs/pysyft/lib/python3.7/site-packages/torchvision/models/detection/rpn.py in <module>
6 from torchvision.ops import boxes as box_ops
7
----> 8 from . import _utils as det_utils
9
10
~/miniconda3/envs/pysyft/lib/python3.7/site-packages/torchvision/models/detection/_utils.py in <module>
72
73
---> 74 @torch.jit.script
75 def encode_boxes(reference_boxes, proposals, weights):
76 # type: (torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
~/miniconda3/envs/pysyft/lib/python3.7/site-packages/torch/jit/__init__.py in script(obj, optimize, _frames_up, _rcb)
822 else:
823 ast = get_jit_def(obj)
--> 824 fn = torch._C._jit_script_compile(ast, _rcb, get_default_args(obj))
825 # Forward docstrings
826 fn.__doc__ = obj.__doc__
RuntimeError:
for operator (Tensor 0) -> Tensor:
expected a value of type Tensor for argument '0' but found (Tensor, Tensor, Tensor, Tensor)
gt_heights = reference_boxes_y2 - reference_boxes_y1
gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths
gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
return targets
:
gt_heights = reference_boxes_y2 - reference_boxes_y1
gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths
gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
~~~~~~~~~ <--- HERE
return targets
My code (basically the code from tutorial + imports in the end):
from torch.utils.data import Dataset
import syft as sy
import torch
hook = sy.TorchHook(torch) # <-- NEW: hook PyTorch ie add extra functionalities to support Federated Learning
bob = sy.VirtualWorker(hook, id="bob") # <-- NEW: define remote worker bob
alice = sy.VirtualWorker(hook, id="alice") # <-- NEW: and alice
class Arguments():
def __init__(self):
self.batch_size = 64
self.test_batch_size = 1000
self.epochs = 10
self.lr = 0.01
self.momentum = 0.5
self.no_cuda = False
self.seed = 1
self.log_interval = 10
self.save_model = False
args = Arguments()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
from scipy.io import loadmat
import matplotlib.pyplot as plt
def load_data(path):
""" Helper function for loading a MAT-File"""
data = loadmat(path)
return data['X'], data['y']
train_data , train_labels = load_data("advanced/data/train_32x32.mat")
test_data , test_labels = load_data("advanced/data/test_32x32.mat")
# Transpose the image arrays
X_train, y_train = train_data.transpose((3,0,1,2)), train_labels[:,0]
X_test, y_test = test_data.transpose((3,0,1,2)), test_labels[:,0]
fig=plt.figure(figsize=(8, 8))
columns = 4
rows = 5
for i in range(1,columns*rows+1):
fig.add_subplot(rows, columns, i)
plt.imshow(X_train[i])
plt.show()
base=sy.BaseDataset(torch.from_numpy(X_train),torch.from_numpy(y_train))
base_federated=base.federate((bob, alice))
federated_train_loader = sy.FederatedDataLoader( # <-- this is now a FederatedDataLoader
base_federated,batch_size=args.batch_size)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(federated_train_loader): # <-- now it is a distributed dataset
model.send(data.location) # <-- NEW: send the model to the right location
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
model.get() # <-- NEW: get the model back
if batch_idx % args.log_interval == 0:
loss = loss.get() # <-- NEW: get the loss back
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * args.batch_size, len(train_loader) * args.batch_size, #batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
import torchvision
import torch.optim as optim
model = torchvision.models.vgg11(pretrained=False)
model = model.to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr) # TODO momentum is not supported at the moment
print(model)
print(optimizer)
for epoch in range(1, args.epochs + 1):
train(args, model, device, federated_train_loader, optimizer, epoch)