# Regression: Torch as a Tensor library¶

$H(x) = Wx+b$

$\begin{gathered} cost=\frac{1}{m} \sum_{i=1}^{m}\left(H\left(x^{(i)}\right)-y^{(i)}\right)^{2} \\ H(x)=W x+b \end{gathered}$

$\operatorname{cost}(W, b)=\frac{1}{m} \sum_{i=1}^{m}\left(H\left(x^{(i)}\right)-y^{(i)}\right)^{2}$

$\min_{W, b} \operatorname{cost}(W, b)$

x Y
1 1
2 2
3 3

import torch
import numpy as np

def cost(W: torch.Tensor, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
x = x.unsqueeze(-1)
y = y.unsqueeze(-1)

x = torch.tensor([1., 2., 3.])
y = torch.tensor([1., 2., 3.])

print(cost(W, x, y)) # tensor(4.6667)
W = torch.ones(3)
print(cost(W, x, y)) # tensor(0.)
W = torch.full((3,), 2)
print(cost(W, x, y)) # tensor(4.6667)
# What does cost(W) look like?
%matplotlib inline
import matplotlib.pyplot as plt
rng = torch.arange(-3., 5., 0.01)
inp = torch.stack([rng]*3)

output = cost(inp, x, y)
plt.plot(rng.numpy(), output.numpy())

$\operatorname{cost}(W)=\frac{1}{2 m} \sum_{i=1}^{m}\left(W x^{(i)}-y^{(i)}\right)^{2}$

$\frac{\partial }{\partial W}\mathrm{cost}(W) = \frac{1}{m} \sum_{i=1}^{m} x^{(i)} \left(W x^{(i)}-y^{(i)}\right)$

$\begin{gathered} W:=W-\alpha \frac{\partial}{\partial W} \operatorname{cost}(W) \\ W:=W-\alpha \frac{1}{m} \sum_{i=1}^{m}\left(W x^{(i)}-y^{(i)}\right) x^{(i)} \end{gathered}$
import torch
from torch.utils.tensorboard import SummaryWriter
import numpy as np
np.random.seed(42)
import matplotlib.pyplot as plt

class FakeDataset(Dataset):
def __init__(self):
super(FakeDataset, self).__init__()
k = np.random.random()
self.x = x = np.arange(1, 4)
b = np.arange(1, 4)
self.val = k * x + b
# print("Guess: ", k, b)
self.data = list(zip(x, k*x+b))

def __getitem__(self, index):
return self.data[index]

def __len__(self):
return len(self.data);

class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()

def forward(self, input):
return input * self.k + self.b

def loss_fn(out, target):
return (out-target)**2

if __name__ == '__main__':
dataset = FakeDataset()
model = Net()
tb = SummaryWriter(log_dir="runs")

step = 1
lr = 0.01
EPOCHS = 1000
for epoch in range(1, EPOCHS):
for x, val in dataset:
out = model(x)
loss = loss_fn(out, val)
plt.show()