Replacement of np.nditer for torch

Given a tensor, an element is interior if it has its upper and down neighbors in each direction. For instance, for a tensor

t = np.arange(12).reshape(3,4).astype(float)
t
> array([[ 0.,  1.,  2.,  3.],
       [ 4.,  5.,  6.,  7.],
       [ 8.,  9., 10., 11.]])

5 and 6 are interior elements. 5 has its neighbors [1, 4, 9, 6].

By averaging a tensor, we mean each elements of interior has been replaced by average of its squared neighbors. In the above, 5 and 6 shall be replaced by new values. For instance, 5 <--- np.mean([1**2, 4**2, 9**2, 6**2]).

For test purpose, here is another 3-d tensor, where its interior elements are 16 and 19:

t1 = np.arange(36).reshape(3,4,3).astype(float)
t1
> array([[[ 0.,  1.,  2.],
        [ 3.,  4.,  5.],
        [ 6.,  7.,  8.],
        [ 9., 10., 11.]],

       [[12., 13., 14.],
        [15., 16., 17.],
        [18., 19., 20.],
        [21., 22., 23.]],

       [[24., 25., 26.],
        [27., 28., 29.],
        [30., 31., 32.],
        [33., 34., 35.]]])

Here is numpy version of such a function for averaging any dimensional tensor.

def averaging(t):
    t_shape = np.array(t.shape)
    t_dim = len(t.shape)
    t_new = t.astype(float).copy()
    
    it = np.nditer(t, flags=['multi_index'])
    while not it.finished:
        #ipdb.set_trace()
        ind = it.multi_index      
        ind_np = np.array(ind)
        #ipdb.set_trace()
        if np.min(ind_np)>0 and np.min(t_shape - ind_np>1): #if ind is not at the boundary
            tmp = 0
            for i in range(t_dim): #i-th direction
                ind_np_up = ind_np.copy()
                ind_np_up[i] += 1 #upper index
                ind_np_dn = ind_np.copy()
                ind_np_dn[i] -= 1 #down index
                tmp += t[tuple((ind_np_up).tolist())]**2 + t[tuple((ind_np_dn).tolist())]**2. #add sum of squares
            t_new[ind] = tmp/t_dim/2. #avaerage and fill new value
        it.iternext()
        
    return t_new

Here are two test results:

> averaging(t)
> array([[ 0. ,  1. ,  2. ,  3. ],
       [ 4. , 33.5, 44.5,  7. ],
       [ 8. ,  9. , 10. , 11. ]])
> averaging(t1)
> array([[[  0.        ,   1.        ,   2.        ],
        [  3.        ,   4.        ,   5.        ],
        [  6.        ,   7.        ,   8.        ],
        [  9.        ,  10.        ,  11.        ]],

       [[ 12.        ,  13.        ,  14.        ],
        [ 15.        , 307.33333333,  17.        ],
        [ 18.        , 412.33333333,  20.        ],
        [ 21.        ,  22.        ,  23.        ]],

       [[ 24.        ,  25.        ,  26.        ],
        [ 27.        ,  28.        ,  29.        ],
        [ 30.        ,  31.        ,  32.        ],
        [ 33.        ,  34.        ,  35.        ]]])

How can I use torch to have the same function on tensors? I essentially want to know if there is replacement of np.nditer for torch.

If I understand correctly, I can do it as follow:

import numpy as np
import torch
from torch.nn.functional import conv2d

def averaging(t):
    assert len(t.shape) == 2

    t_tensor = torch.from_numpy(t)[None, None, ...].float()
    kernel = torch.tensor([[0, 1, 0],
                          [1, 0, 1],
                          [0, 1, 0]])[None, None, ...].float()
    t_tensor = (conv2d(t_tensor * t_tensor, kernel, padding=1) / 4).squeeze().numpy()
    
    new_t = t.copy()
    new_t[1:-1, 1:-1] = t_tensor[1:-1, 1:-1]
    return new_t

> t = np.arange(12).reshape(3,4).astype(float)
> print(averaging(t))

About the replacement of np.nditer, I don’t know.:disappointed_relieved:

I guess it does the job only for 2 dim tensor? For clarification, I’ve added an example of 3-d averaging. But thanks a lot. I did not think of convolution though.

Here is the code without nditer, provided from stackoverflow.

#alternative for nditer, but for both tensor and ndarray
def deep_iter(data, ix=tuple()):
    try:
        for i, element in enumerate(data):
            yield from deep_iter(element, ix + (i,))
    except:
        yield ix, data
#do average calculation for a specific element
def calc_average(t, *ix):
    t_dim = len(t.shape)
    out = 0.
    for i in range(t_dim):
        #ipdb.set_trace()
        ix_ = list(ix)
        ix_[i] +=1
        out += t[tuple(ix_)]**2
        ix_[i] -=2
        out += t[tuple(ix_)]**2
    return out/(2*t_dim)    
#loop over entire tensor
def averaging(t):
    new_values = {}
    t_dim = len(t.shape)
    for ix, value in deep_iter(t):
        if all([0<ix[j]<t.shape[j]-1 for j in range(t_dim)]):
            new_values[ix] = calc_average(t, *ix) 

    for ix, new_value in new_values.items():
        t[ix] = new_value
        
    return t
#test
t1 = torch.arange(36, dtype= torch.float32).reshape(3,4,3)
t1
> tensor([[[ 0.,  1.,  2.],
         [ 3.,  4.,  5.],
         [ 6.,  7.,  8.],
         [ 9., 10., 11.]],

        [[12., 13., 14.],
         [15., 16., 17.],
         [18., 19., 20.],
         [21., 22., 23.]],

        [[24., 25., 26.],
         [27., 28., 29.],
         [30., 31., 32.],
         [33., 34., 35.]]])
> averaging(t1)
>  tensor([[[  0.0000,   1.0000,   2.0000],
         [  3.0000,   4.0000,   5.0000],
         [  6.0000,   7.0000,   8.0000],
         [  9.0000,  10.0000,  11.0000]],

        [[ 12.0000,  13.0000,  14.0000],
         [ 15.0000, 307.3333,  17.0000],
         [ 18.0000, 412.3333,  20.0000],
         [ 21.0000,  22.0000,  23.0000]],

        [[ 24.0000,  25.0000,  26.0000],
         [ 27.0000,  28.0000,  29.0000],
         [ 30.0000,  31.0000,  32.0000],
         [ 33.0000,  34.0000,  35.0000]]])

1 Like