Skip to main content

Tensor


Features

  • Similar to NumPy’s ndarrays, except that tensors can run on GPUs or other hardware accelerators.
  • Share the same underlying memory, eliminating the need to copy data.
  • Optimized for automatic differentiation.

Tensor Initialization

  • Directly from data. Use torch.tensor()
  • From a NumPy array. Use torch.from_numpy()
  • From another tensor. Use torch.randn_like() or torch.ones_like()
  • With random or constant values.

Atributions

tensor = torch.rand(3,4)

print(f"Shape of tensor: {tensor.shape}")
print(f"Datatype of tensor: {tensor.dtype}")
print(f"Device tensor is stored on: {tensor.device}")

Operations

# We move our tensor to the GPU if available
if torch.cuda.is_available():
    tensor = tensor.to('cuda')

# Standard numpy-like indexing and slicing
tensor = torch.ones(4, 4)
print('First row: ',tensor[0])
print('First column: ', tensor[:, 0])
print('Last column: ', tensor[..., -1])
tensor[:,1] = 0

# Joining tensors
t1 = torch.cat([tensor, tensor, tensor], dim=1)

# Arithmetic operations
# This computes the matrix multiplication between two tensors. y1, y2, y3 will have the same value
y1 = tensor @ tensor.T
y2 = tensor.matmul(tensor.T)

y3 = torch.rand_like(y1) # override dtype and device of y1
torch.matmul(tensor, tensor.T, out=y3) # `out=` specify the output tensor, 

# This computes the element-wise product. z1, z2, z3 will have the same value
z1 = tensor * tensor
z2 = tensor.mul(tensor)

# Single-element tensors
agg = tensor.sum()
agg_item = agg.item()

# In-place operations
print(tensor, "\n")
tensor.add_(5)
print(tensor)

Bridge with NumPy

t = torch.ones(5)
print(f"t: {t}")
n = t.numpy()
print(f"n: {n}")

Autogard

# Create tensors.
x = torch.ones(5, requires_grad=True)
print(x)

# Do an operation of tensor:
y = x + 3
print(y)

# y was created as a result of an operation, so it has a grad_fn.
print(y.grad_fn)

# Do more operations on y
z = y * y * 2
out = z.mean()

print(z, out)

# .requires_grad_( ... ) changes an existing Tensor’s requires_grad flag in-place. The input flag defaults to False if not given.
a = torch.randn(2, 2)
a = ((a * 3) / (a - 1))
print(a.requires_grad)
a.requires_grad_(True)
print(a.requires_grad)
b = (a * a).sum()
print(b.grad_fn)