1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
| import torch from torch import nn,optim from d2l import *
num_train=20 num_test=100 num_inputs=200 batch_size=5
true_w,true_b=torch.ones(num_inputs)*0.01,0.05
train_data=synthetic_data(true_w,true_b,num_train) train_iter=data_loader(train_data,batch_size) test_data=synthetic_data(true_w,true_b,num_test) test_iter=data_loader(test_data,batch_size)
w=torch.normal(0,1,size=true_w.shape,requires_grad=True) b=torch.zeros(1,requires_grad=True)
def L2_penalty(lambada,w): return torch.sum(w**2)/2*lambada
def liner_reg(w,b,X): return torch.matmul(X,w)+b
def net(X): return liner_reg(w,b,X)
def squared_loss_L2(y_hat,y): loss=(y_hat.view(y.shape)-y)**2/2/len(y) return (loss+L2_penalty(0.5,w)).sum()
opt=optim.SGD([w,b],lr=0.01)
train(100,squared_loss_L2,opt,net,train_iter,test_iter)
|