check this out but summary use torch.stack
if you want to respect the original nesting of the lists (by having a tensor with many indices and dimensions) and incrementally create it. There might be better ways but that works for me.
# %%
import torch
# stack vs cat
# cat "extends" a list in the given dimension e.g. adds more rows or columns
x = torch.randn(2, 3)
print(f'{x.size()}')
# add more rows (thus increasing the dimensionality of the column space to 2 -> 6)
xnew_from_cat = torch.cat((x, x, x), 0)
print(f'{xnew_from_cat.size()}')
# add more columns (thus increasing the dimensionality of the row space to 3 -> 9)
xnew_from_cat = torch.cat((x, x, x), 1)
print(f'{xnew_from_cat.size()}')
print()
# stack serves the same role as append in lists. i.e. it doesn't change the original
# vector space but instead adds a new index to the new tensor, so you retain the ability
# get the original tensor you added to the list by indexing in the new dimension
xnew_from_stack = torch.stack((x, x, x, x), 0)
print(f'{xnew_from_stack.size()}')
xnew_from_stack = torch.stack((x, x, x, x), 1)
print(f'{xnew_from_stack.size()}')
xnew_from_stack = torch.stack((x, x, x, x), 2)
print(f'{xnew_from_stack.size()}')
# default appends at the from
xnew_from_stack = torch.stack((x, x, x, x))
print(f'{xnew_from_stack.size()}')
print('I like to think of xnew_from_stack as a \"tensor list\" that you can pop from the front')
print()
lst = []
print(f'{x.size()}')
for i in range(10):
x += i # say we do something with x at iteration i
lst.append(x)
# lstt = torch.stack([x for _ in range(10)])
lstt = torch.stack(lst)
print(lstt.size())
print()
# lst = []
# print(f'{x.size()}')
# for i in range(10):
# x += i # say we do something with x at iteration i
# for j in range(11):
# x += j
# lstx
# lst.append(x)
# # lstt = torch.stack([x for _ in range(10)])
# lstt = torch.stack(lst)
# print(lstt.size())