How do I understand PyTorch anomaly detection?

I just started using PyTorch’s anomaly detection Automatic differentiation package - torch.autograd — PyTorch master documentation and was hoping to get some help in reading the output.

Does the error message indicate that the derivative of the line below results in x being a nan of inf?

return self.mu(x), torch.log(torch.exp(self.sigma(x))+1)

Error messages

Warning: NaN or Inf found in input tensor.

sys:1: RuntimeWarning: Traceback of forward call that caused the error:
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/runpy.py”, line 193, in _run_module_as_main
main”, mod_spec)
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/runpy.py”, line 85, in _run_code
exec(code, run_globals)
File “/home/kong/.local/lib/python3.5/site-packages/ipykernel_launcher.py”, line 16, in
app.launch_new_instance()
File “/home/kong/.local/lib/python3.5/site-packages/traitlets/config/application.py”, line 658, in launch_instance
app.start()
File “/home/kong/.local/lib/python3.5/site-packages/ipykernel/kernelapp.py”, line 563, in start
self.io_loop.start()
File “/home/kong/.local/lib/python3.5/site-packages/tornado/platform/asyncio.py”, line 148, in start
self.asyncio_loop.run_forever()
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/asyncio/base_events.py”, line 421, in run_forever
self._run_once()
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/asyncio/base_events.py”, line 1425, in _run_once
handle._run()
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/asyncio/events.py”, line 127, in _run
self._callback(*self._args)
File “/home/kong/.local/lib/python3.5/site-packages/tornado/ioloop.py”, line 690, in
lambda f: self._run_callback(functools.partial(callback, future))
File “/home/kong/.local/lib/python3.5/site-packages/tornado/ioloop.py”, line 743, in _run_callback
ret = callback()
File “/home/kong/.local/lib/python3.5/site-packages/tornado/gen.py”, line 787, in inner
self.run()
File “/home/kong/.local/lib/python3.5/site-packages/tornado/gen.py”, line 748, in run
yielded = self.gen.send(value)
File “/home/kong/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py”, line 365, in process_one
yield gen.maybe_future(dispatch(*args))
File “/home/kong/.local/lib/python3.5/site-packages/tornado/gen.py”, line 209, in wrapper
yielded = next(result)
File “/home/kong/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py”, line 272, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File “/home/kong/.local/lib/python3.5/site-packages/tornado/gen.py”, line 209, in wrapper
yielded = next(result)
File “/home/kong/.local/lib/python3.5/site-packages/ipykernel/kernelbase.py”, line 542, in execute_request
user_expressions, allow_stdin,
File “/home/kong/.local/lib/python3.5/site-packages/tornado/gen.py”, line 209, in wrapper
yielded = next(result)
File “/home/kong/.local/lib/python3.5/site-packages/ipykernel/ipkernel.py”, line 294, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File “/home/kong/.local/lib/python3.5/site-packages/ipykernel/zmqshell.py”, line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File “/home/kong/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py”, line 2855, in run_cell
raw_cell, store_history, silent, shell_futures)
File “/home/kong/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py”, line 2881, in _run_cell
return runner(coro)
File “/home/kong/.local/lib/python3.5/site-packages/IPython/core/async_helpers.py”, line 68, in pseudo_sync_runner
coro.send(None)
File “/home/kong/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py”, line 3058, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File “/home/kong/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py”, line 3249, in run_ast_nodes
if (await self.run_code(code, result, async
=asy)):
File “/home/kong/.local/lib/python3.5/site-packages/IPython/core/interactiveshell.py”, line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File “”, line 114, in
prior_mu, prior_sigma, posterior_mu, posterior_sigma, x_pred = net(x_true)
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/site-packages/torch/nn/modules/module.py”, line 489, in call
result = self.forward(*input, **kwargs)
File “/home/kong/Desktop/AST/scripts/experiments_block/vrnn_gru/model.py”, line 363, in forward
f_posterior_mu, f_posterior_sigma = self.f_posterior(torch.cat((state_convlstm,self.CNN(x)),1))
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/site-packages/torch/nn/modules/module.py”, line 489, in call
result = self.forward(*input, **kwargs)
File “/home/kong/Desktop/AST/scripts/experiments_block/vrnn_gru/model.py”, line 179, in forward
return self.mu(x), torch.log(torch.exp(self.sigma(x))+1)
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/site-packages/torch/nn/modules/module.py”, line 489, in call
result = self.forward(*input, **kwargs)
File “/home/kong/anaconda3/envs/ffp/lib/python3.5/site-packages/torch/nn/modules/conv.py”, line 320, in forward
self.padding, self.dilation, self.groups)

-
RuntimeErrorTraceback (most recent call last)
<ipython-input-1-a59af95c119f> in <module>
    126 
    127             optimizer.zero_grad()
--> 128             loss.backward()
    129             optimizer.step()
    130 

~/anaconda3/envs/ffp/lib/python3.5/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
    100                 products. Defaults to ``False``.
    101         """
--> 102         torch.autograd.backward(self, gradient, retain_graph, create_graph)
    103 
    104     def register_hook(self, hook):

~/anaconda3/envs/ffp/lib/python3.5/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
     88     Variable._execution_engine.run_backward(
     89         tensors, grad_tensors, retain_graph, create_graph,
---> 90         allow_unreachable=True)  # allow_unreachable flag
     91 
     92 

RuntimeError: Function 'CudnnConvolutionBackward' returned nan values in its 0th output.

Hi,

This means that the gradients computed by the convolution at this line (self.mu I guess?) returned gradients for its 0th input (x in this case) that contains nan.
It’s not that x is nan but that its gradients contain nan.