Pytorch xla Exception: SIGSEGV

Hi everyone,

I am currently using gcp in order to test the TPU on pytorch.
My code comes from Google Colab . I change or remove some part of codes like the TPU_ADDRESS.

During the training, I got an exeption message :

2019-11-21 18:22:14.125827: I 2364 tensorflow/compiler/xla/xla_client/mesh_service.cc:168] Waiting to connect to client mesh master (300 seconds) localhost:40359
2019-11-21 18:22:14.141768: I 2369 tensorflow/compiler/xla/xla_client/mesh_service.cc:168] Waiting to connect to client mesh master (300 seconds) localhost:40359
2019-11-21 18:22:14.137212: I 2365 tensorflow/compiler/xla/xla_client/mesh_service.cc:168] Waiting to connect to client mesh master (300 seconds) localhost:40359
2019-11-21 18:22:14.159541: I 2376 tensorflow/compiler/xla/xla_client/mesh_service.cc:168] Waiting to connect to client mesh master (300 seconds) localhost:40359
2019-11-21 18:22:14.170106: I 2380 tensorflow/compiler/xla/xla_client/mesh_service.cc:168] Waiting to connect to client mesh master (300 seconds) localhost:40359
2019-11-21 18:22:14.182444: I 2384 tensorflow/compiler/xla/xla_client/mesh_service.cc:168] Waiting to connect to client mesh master (300 seconds) localhost:40359
2019-11-21 18:22:14.191538: I 2392 tensorflow/compiler/xla/xla_client/mesh_service.cc:168] Waiting to connect to client mesh master (300 seconds) localhost:40359
Device : xla:1
2019-11-21 18:22:22.612051: I 2364 tensorflow/compiler/xla/xla_client/computation_client.cc:195] Fetching mesh configuration for worker tpu_worker:0 from mesh service at localhost:40359
Device : xla:0
2019-11-21 18:22:23.098120: I 2380 tensorflow/compiler/xla/xla_client/computation_client.cc:195] Fetching mesh configuration for worker tpu_worker:0 from mesh service at localhost:40359
Device : xla:0
2019-11-21 18:22:23.538515: I 2384 tensorflow/compiler/xla/xla_client/computation_client.cc:195] Fetching mesh configuration for worker tpu_worker:0 from mesh service at localhost:40359
2019-11-21 18:22:23.689589: I 2376 tensorflow/compiler/xla/xla_client/computation_client.cc:195] Fetching mesh configuration for worker tpu_worker:0 from mesh service at localhost:40359
2019-11-21 18:22:23.776546: I 2392 tensorflow/compiler/xla/xla_client/computation_client.cc:195] Fetching mesh configuration for worker tpu_worker:0 from mesh service at localhost:40359
Device : xla:0
Device : xla:0
Device : xla:0
2019-11-21 18:22:24.179791: I 2369 tensorflow/compiler/xla/xla_client/computation_client.cc:195] Fetching mesh configuration for worker tpu_worker:0 from mesh service at localhost:40359
2019-11-21 18:22:24.274278: I 2365 tensorflow/compiler/xla/xla_client/computation_client.cc:195] Fetching mesh configuration for worker tpu_worker:0 from mesh service at localhost:40359
Device : xla:0
Device : xla:0
Traceback (most recent call last):
File “test.py”, line 217, in
xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=FLAGS[‘num_cores’],start_method=‘fork’)
File “/anaconda3/envs/torch-xla-nightly/lib/python3.6/site-packages/torch_xla/distributed/xla_multiprocessing.py”, line 173, in spawn
start_method=start_method)
File “/anaconda3/envs/torch-xla-nightly/lib/python3.6/site-packages/torch/multiprocessing/spawn.py”, line 149, in start_processes
while not context.join():
File “/anaconda3/envs/torch-xla-nightly/lib/python3.6/site-packages/torch/multiprocessing/spawn.py”, line 107, in join
(error_index, name)
Exception: process 5 terminated with signal SIGSEGV

My code is this one :

 import collections
from datetime import datetime, timedelta
import os
import requests
import threading

_VersionConfig = collections.namedtuple('_VersionConfig', 'wheels,server')
VERSION = "xrt==1.15.0"  #@param ["xrt==1.15.0", "torch_xla==nightly"]
CONFIG = {
    'xrt==1.15.0': _VersionConfig('1.15', '1.15.0'),
    'torch_xla==nightly': _VersionConfig('nightly', 'XRT-dev{}'.format(
        (datetime.today() - timedelta(1)).strftime('%Y%m%d'))),
}[VERSION]
DIST_BUCKET = 'gs://tpu-pytorch/wheels'
TORCH_WHEEL = 'torch-{}-cp36-cp36m-linux_x86_64.whl'.format(CONFIG.wheels)
TORCH_XLA_WHEEL = 'torch_xla-{}-cp36-cp36m-linux_x86_64.whl'.format(CONFIG.wheels)
TORCHVISION_WHEEL = 'torchvision-{}-cp36-cp36m-linux_x86_64.whl'.format(CONFIG.wheels)

# Update TPU XRT version
def update_server_xrt():
  print('Updating server-side XRT to {} ...'.format(CONFIG.server))
  url = 'http://{TPU_ADDRESS}:8475/requestversion/{XRT_VERSION}'.format(
      TPU_ADDRESS=os.environ['TPU_IP_ADDRESS'].split(':')[0],
      XRT_VERSION=CONFIG.server,
  )
  print('Done updating server-side XRT: {}'.format(requests.post(url)))

#update = threading.Thread(target=update_server_xrt)
#update.start()
#update.join()

# Result Visualization Helper
import math
from matplotlib import pyplot as plt

M, N = 4, 6
RESULT_IMG_PATH = '/tmp/test_result.png'

def plot_results(images, labels, preds):
  images, labels, preds = images[:M*N], labels[:M*N], preds[:M*N]
  inv_norm = transforms.Normalize((-0.1307/0.3081,), (1/0.3081,))

  num_images = images.shape[0]
  fig, axes = plt.subplots(M, N, figsize=(11, 9))
  fig.suptitle('Correct / Predicted Labels (Red text for incorrect ones)')

  for i, ax in enumerate(fig.axes):
    ax.axis('off')
    if i >= num_images:
      continue
    img, label, prediction = images[i], labels[i], preds[i]
    img = inv_norm(img)
    img = img.squeeze() # [1,Y,X] -> [Y,X]
    label, prediction = label.item(), prediction.item()
    if label == prediction:
      ax.set_title(u'\u2713', color='blue', fontsize=22)
    else:
      ax.set_title(
          'X {}/{}'.format(label, prediction), color='red')
    ax.imshow(img)
  plt.savefig(RESULT_IMG_PATH, transparent=True)


  # Define Parameters
FLAGS = {}
FLAGS['datadir'] = "/tmp/mnist"
FLAGS['batch_size'] = 128
FLAGS['num_workers'] = 4
FLAGS['learning_rate'] = 0.01
FLAGS['momentum'] = 0.5
FLAGS['num_epochs'] = 10
FLAGS['num_cores'] = 8
FLAGS['log_steps'] = 20
FLAGS['metrics_debug'] = False

import numpy as np
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch_xla
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla.utils.utils as xu
from torchvision import datasets, transforms


class MNIST(nn.Module):

  def __init__(self):
    super(MNIST, self).__init__()
    self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
    self.bn1 = nn.BatchNorm2d(10)
    self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
    self.bn2 = nn.BatchNorm2d(20)
    self.fc1 = nn.Linear(320, 50)
    self.fc2 = nn.Linear(50, 10)

  def forward(self, x):
    x = F.relu(F.max_pool2d(self.conv1(x), 2))
    x = self.bn1(x)
    x = F.relu(F.max_pool2d(self.conv2(x), 2))
    x = self.bn2(x)
    x = torch.flatten(x, 1)
    x = F.relu(self.fc1(x))
    x = self.fc2(x)
    return F.log_softmax(x, dim=1)


def train_mnist():
  torch.manual_seed(1)
  
  # Get and shard dataset into dataloaders
  norm = transforms.Normalize((0.1307,), (0.3081,))
  train_dataset = datasets.MNIST(
      os.path.join(FLAGS['datadir'], str(xm.get_ordinal())),
      train=True,
      download=True,
      transform=transforms.Compose(
          [transforms.ToTensor(), norm]))
  test_dataset = datasets.MNIST(
      os.path.join(FLAGS['datadir'], str(xm.get_ordinal())),
      train=False,
      download=True,
      transform=transforms.Compose(
          [transforms.ToTensor(), norm]))
  train_sampler = torch.utils.data.distributed.DistributedSampler(
    train_dataset,
    num_replicas=xm.xrt_world_size(),
    rank=xm.get_ordinal(),
    shuffle=True)
  train_loader = torch.utils.data.DataLoader(
      train_dataset,
      batch_size=FLAGS['batch_size'],
      sampler=train_sampler,
      num_workers=FLAGS['num_workers'],
      drop_last=True)
  test_loader = torch.utils.data.DataLoader(
      test_dataset,
      batch_size=FLAGS['batch_size'],
      shuffle=False,
      num_workers=FLAGS['num_workers'],
      drop_last=True)

  # Scale learning rate to world size
  lr = FLAGS['learning_rate'] * xm.xrt_world_size()

  # Get loss function, optimizer, and model
  device = xm.xla_device()
  print("Device : ", device)
  model = MNIST().to(device)
  optimizer = optim.SGD(model.parameters(), lr=lr, momentum=FLAGS['momentum'])
  loss_fn = nn.NLLLoss()

  def train_loop_fn(loader):
    tracker = xm.RateTracker()
    model.train()
    for x, (data, target) in enumerate(loader):
      optimizer.zero_grad()
      output = model(data)
      loss = loss_fn(output, target)
      loss.backward()
      xm.optimizer_step(optimizer)
      tracker.add(FLAGS['batch_size'])
      #if x % FLAGS['log_steps'] == 0:
       # print('[xla:{}]({}) Loss={:.5f} Rate={:.2f} GlobalRate={:.2f} Time={}'.format(
       #     xm.get_ordinal(), x, loss.item(), tracker.rate(),
      #      tracker.global_rate(), time.asctime()), flush=True)

  def test_loop_fn(loader):
    total_samples = 0
    correct = 0
    model.eval()
    data, pred, target = None, None, None
    for data, target in loader:
      output = model(data)
      pred = output.max(1, keepdim=True)[1]
      correct += pred.eq(target.view_as(pred)).sum().item()
      total_samples += data.size()[0]

    accuracy = 100.0 * correct / total_samples
    #print('[xla:{}] Accuracy={:.2f}%'.format(
       # xm.get_ordinal(), accuracy), flush=True)
    return accuracy, data, pred, target

  # Train and eval loops
  accuracy = 0.0
  data, pred, target = None, None, None
  for epoch in range(1, FLAGS['num_epochs'] + 1):
    begin = time.time()
    para_loader = pl.ParallelLoader(train_loader, [device])
    train_loop_fn(para_loader.per_device_loader(device))
    xm.master_print("Finished training epoch {0} in {1} sec".format(epoch, time.time()-begin))

    #para_loader = pl.ParallelLoader(test_loader, [device])
    #accuracy, data, pred, target  = test_loop_fn(para_loader.per_device_loader(device))
    #if FLAGS['metrics_debug']:
     # xm.master_print(met.metrics_report(), flush=True)

  return accuracy, data, pred, target


# Start training processes
def _mp_fn(rank, flags):
  global FLAGS
  FLAGS = flags
  torch.set_default_tensor_type('torch.FloatTensor')
  accuracy, data, pred, target = train_mnist()
  #if rank == 0:
    # Retrieve tensors that are on TPU core 0 and plot.
    #plot_results(data.cpu(), pred.cpu(), target.cpu())

xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=FLAGS['num_cores'],start_method='fork')

#train_mnist()

Moreover if I use only train_mnist function instead of xmp.spawn, the training works but take around 13 seconds which is slower than in Colab (around 3 seconds) so I suspect than something is wrong.
The only difference I have with colab is that :

  • update = threading.Thread(target=update_server_xrt) is commented
  • put the good TPU_ADDRESS instead of Colab TPU IP

I used the custom image with pytorch XLA already installed on GCP. Should I installed something else ? I think there should be no issue with the code as it works on colab,
I am using torch XLA nightly.

I thank you for your advices :slight_smile:

1 Like

Hi, sorry for the late reply! Just want to let you know that for all pytorch/xla (or pytorch on tpu) questions, please open an issue in https://github.com/pytorch/xla.
Thanks!