pytorch神经网络基础
import torchfrom torch import nnfrom torch .nn import functional as F# 定义了一个特殊的module# net = nn.Sequential(nn.Linear(20,256),nn.ReLU(),nn.Linear(256,10))x=torch.rand(2,20)# print(net(x))# 感知机class MLP
·
import torch
from torch import nn
from torch .nn import functional as F
# 定义了一个特殊的module
# net = nn.Sequential(nn.Linear(20,256),nn.ReLU(),nn.Linear(256,10))
x=torch.rand(2,20)
# print(net(x))
# 感知机
class MLP(nn.Module): #继承module父类,module有两个非常重要的函数
def __init__(self):#参数初始化
super().__init__()#调用父类参数
self.hidden=nn.Linear(20,256)
self.out=nn.Linear(256,5)
def forward(self,X):#给定输入,继承初始参数
return self.out(F.relu(self.hidden(X)))
net=MLP()
print(net(x))
# 顺序块
class Mysequential(nn.Module):
def __init__(self,*args):
super().__init__()
for block in args:
self._modules[block]=block
def forward(self,X):
for block in self._modules.values():
X=block(X)
return X
net=Mysequential(nn.Linear(20,256),nn.ReLU(),nn.Linear(256,10))
print(net(x))
class FixehiddenMLP(nn.Module):
def __init__(self):
super().__init__()
self.rand_weight=torch.rand((20,20),requires_grad=False)
self.linear=nn.Linear(20,20)
def forward(self,x):
print(self.linear.weight)
x=self.linear(x)
x=F.relu(torch.mm(x,self.rand_weight)+1)
x=self.linear(x)
while x.abs().sum()>1:
x/=2
return x.sum()
print('+++++++++++++++')
net=FixehiddenMLP()
print(net(x))
print('________________________')
x=torch.rand(2,20)
print(x)
y=nn.Linear(20,2)
print(y(x))更多推荐



所有评论(0)