Learning about tensors
This commit is contained in:
parent
872947e867
commit
f9fbfae1c1
@ -3,4 +3,5 @@ torch
|
|||||||
--index-url https://download.pytorch.org/whl/cu117
|
--index-url https://download.pytorch.org/whl/cu117
|
||||||
torchvision
|
torchvision
|
||||||
--index-url https://download.pytorch.org/whl/cu117
|
--index-url https://download.pytorch.org/whl/cu117
|
||||||
torchaudio
|
torchaudio
|
||||||
|
numpy
|
312
tensors.ipynb
Normal file
312
tensors.ipynb
Normal file
@ -0,0 +1,312 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import torch\n",
|
||||||
|
"import numpy as np\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Tensors are a specialized data structure that are very similar to arrays and matrices. In PyTorch, \n",
|
||||||
|
"we use tensors to encode the inputs and outputs of a model, as well as the model’s parameters.\n",
|
||||||
|
"Tensors are similar to NumPy’s ndarrays, except that tensors can run on GPUs or other hardware accelerators. In fact, tensors and NumPy arrays can often share the same underlying memory, eliminating the need to copy data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Ones Tensor: \n",
|
||||||
|
" tensor([[1., 1.],\n",
|
||||||
|
" [1., 1.]]) \n",
|
||||||
|
"\n",
|
||||||
|
"Random Tensor: \n",
|
||||||
|
" tensor([[0.3247, 0.4553],\n",
|
||||||
|
" [0.8209, 0.3013]]) \n",
|
||||||
|
"\n",
|
||||||
|
"Random Tensor: \n",
|
||||||
|
" tensor([[0.5109, 0.3653, 0.7545],\n",
|
||||||
|
" [0.7229, 0.7191, 0.2993]])\n",
|
||||||
|
"\n",
|
||||||
|
"Ones Tensor: \n",
|
||||||
|
" tensor([[1., 1., 1.],\n",
|
||||||
|
" [1., 1., 1.]])\n",
|
||||||
|
"\n",
|
||||||
|
"Zeros Tensor: \n",
|
||||||
|
" tensor([[0., 0., 0.],\n",
|
||||||
|
" [0., 0., 0.]])\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Initializing a tensor \n",
|
||||||
|
"\n",
|
||||||
|
"# Directly from data\n",
|
||||||
|
"data = [[1,2], [3,4]]\n",
|
||||||
|
"x_data = torch.Tensor(data)\n",
|
||||||
|
"\n",
|
||||||
|
"# From NumPy array\n",
|
||||||
|
"np_array = np.array(data)\n",
|
||||||
|
"x_np = torch.from_numpy(np_array)\n",
|
||||||
|
"\n",
|
||||||
|
"# From another tensor\n",
|
||||||
|
"x_ones = torch.ones_like(x_data) # retains the properties of x_data\n",
|
||||||
|
"print(f\"Ones Tensor: \\n {x_ones} \\n\")\n",
|
||||||
|
"x_rand = torch.rand_like(x_data, dtype=torch.float)\n",
|
||||||
|
"print(f\"Random Tensor: \\n {x_rand} \\n\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Random values or constant values\n",
|
||||||
|
"shape = (2,3,)\n",
|
||||||
|
"rand_tensor = torch.rand(shape)\n",
|
||||||
|
"ones_tensor = torch.ones(shape)\n",
|
||||||
|
"zeros_tensor = torch.zeros(shape)\n",
|
||||||
|
"\n",
|
||||||
|
"print(f\"Random Tensor: \\n {rand_tensor}\\n\")\n",
|
||||||
|
"print(f\"Ones Tensor: \\n {ones_tensor}\\n\")\n",
|
||||||
|
"print(f\"Zeros Tensor: \\n {zeros_tensor}\")\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Tensor attributes describe their shape, datatype, and the device on which they are stored."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Shape of tensor: torch.Size([3, 4])\n",
|
||||||
|
"Datatype of tensor: torch.float32\n",
|
||||||
|
"Device tensor is stored on: cpu\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"tensor = torch.rand(3,4)\n",
|
||||||
|
"\n",
|
||||||
|
"print(f\"Shape of tensor: {tensor.shape}\")\n",
|
||||||
|
"print(f\"Datatype of tensor: {tensor.dtype}\")\n",
|
||||||
|
"print(f\"Device tensor is stored on: {tensor.device}\")\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Over 100 tensor operations, including arithmetic, linear algebra, matrix manipulation (transposing, indexing, slicing), sampling and more.\n",
|
||||||
|
"Each of these operations can be run on the GPU (at typically higher speeds than on a CPU). If you’re using Colab, allocate a GPU by going to Runtime > Change runtime type > GPU.\n",
|
||||||
|
"By default, tensors are created on the CPU. We need to explicitly move tensors to the GPU using .to method (after checking for GPU availability). Keep in mind that copying large tensors across devices can be expensive in terms of time and memory!\n",
|
||||||
|
"\n",
|
||||||
|
"Example: We move our tensor to the GPU if available\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"```python\n",
|
||||||
|
"\n",
|
||||||
|
"if torch.cuda.is_available():\n",
|
||||||
|
" tensor = tensor.to(\"cuda\")\n",
|
||||||
|
"\n",
|
||||||
|
"```\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"First row: tensor([1., 1., 1., 1.])\n",
|
||||||
|
"first column: tensor([1., 1., 1., 1.])\n",
|
||||||
|
"Last column: tensor([1., 1., 1., 1.])\n",
|
||||||
|
"tensor([[1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.]])\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Indexing and slicing tensors\n",
|
||||||
|
"tensor = torch.ones(4,4)\n",
|
||||||
|
"print(f\"First row: {tensor[0]}\")\n",
|
||||||
|
"print (f\"first column: {tensor[:, 0]}\")\n",
|
||||||
|
"print(f\"Last column: {tensor[...,-1]}\")\n",
|
||||||
|
"tensor[:,1] = 0\n",
|
||||||
|
"print(tensor)\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"tensor([[1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.]])\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Joining tensors\n",
|
||||||
|
"t1 = torch.cat([tensor, tensor, tensor], dim=1)\n",
|
||||||
|
"print(t1)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"tensor([[1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.]])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Arithmetic operations \n",
|
||||||
|
"# # computes the matrix multiplication between two tensors\n",
|
||||||
|
"\n",
|
||||||
|
"# 'tensor.T' return the transpose of a tensor\n",
|
||||||
|
"y1 = tensor @ tensor.T\n",
|
||||||
|
"y2 = tensor.matmul(tensor.T)\n",
|
||||||
|
"\n",
|
||||||
|
"y3 = torch.rand_like(y1)\n",
|
||||||
|
"torch.matmul(tensor, tensor.T, out=y3)\n",
|
||||||
|
"\n",
|
||||||
|
"# Computes the element-wise product\n",
|
||||||
|
"z1 = tensor * tensor\n",
|
||||||
|
"z2 = tensor.mul(tensor)\n",
|
||||||
|
"\n",
|
||||||
|
"z3 = torch.rand_like(tensor)\n",
|
||||||
|
"torch.mul(tensor, tensor, out=z3)\n",
|
||||||
|
"\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"12.0 <class 'float'>\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Gets the python value from a tensor\n",
|
||||||
|
"agg = tensor.sum()\n",
|
||||||
|
"agg_item = agg.item()\n",
|
||||||
|
"print(agg_item, type(agg_item))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Operations that store the result into the operand are called in-place. They are denoted by a _ suffix"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"tensor([[1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.],\n",
|
||||||
|
" [1., 0., 1., 1.]])\n",
|
||||||
|
"\n",
|
||||||
|
"tensor([[6., 5., 6., 6.],\n",
|
||||||
|
" [6., 5., 6., 6.],\n",
|
||||||
|
" [6., 5., 6., 6.],\n",
|
||||||
|
" [6., 5., 6., 6.]])\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# in-place operations\n",
|
||||||
|
"print(f\"{tensor}\\n\")\n",
|
||||||
|
"tensor.add_(5)\n",
|
||||||
|
"print(tensor)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "venv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.10"
|
||||||
|
},
|
||||||
|
"orig_nbformat": 4,
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "f4b5116e1c1eac4da82e4f519e811a9a213a412fad4fdb2c86d0bd3e5d22b3b4"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user