深度学习第二章-AlexNet

深度学习第二章-AlexNet搭建

本模型存放于目录:

E:\python文件\deep-learning-for-image-processing-master\tensorflow_classification\Test2_alexnet

一.模型介绍

  • 首次利用GPU进行网络加速训练。
  • 使用了ReLU激活函数,而不是传统的Sigmoid激活函数以及Tanh激活函数。
  • 使用了LRN局部响应归一化。
  • 在全连接层的前两层中使用了Dropout随机失活神经元操作,以减少过拟合。
    • pAAq9vn.png

二.数据集-花分类数据集

1.定义预处理函数

1
2
3
4
5
6
7
8
9
data_transform = { #处理训练集与测试集的方法
"train": transforms.Compose([transforms.RandomResizedCrop(224), #随机裁剪
transforms.RandomHorizontalFlip(), #随机翻转
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
"val": transforms.Compose([transforms.Resize((224, 224)), # cannot 224, must (224, 224)
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

2.从磁盘中读取数据集

1
2
3
4
5
6
7
8
9
data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # 计算数据集的根目录,从当前目录向上回溯两级目录
image_path = os.path.join(data_root, "data_set", "flower_data") # 从根目录下找到数据集所在的目录
assert os.path.exists(image_path), "{} path does not exist.".format(image_path) #检查图片路径是否存在,若不存在,则抛出一个异常
train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),
transform=data_transform["train"]) #从指定的路径加载,创建训练数据集的对象,并使用预处理方法
train_num = len(train_dataset) #计算训练集中的样本总数
validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "val"),
transform=data_transform["val"]) #从指定的路径加载,创建测试数据集的对象,并使用预处理方法
val_num = len(validate_dataset) #计算测试集中的样本总数

3.保存各类别的字典索引

1
2
3
4
5
6
# {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list = train_dataset.class_to_idx #保存一个字典文件,将每个类别的名称映射到该类别的索引
cla_dict = dict((val, key) for key, val in flower_list.items()) #创建一个逆映射,将每个类别的索引映射到该类别的名称
json_str = json.dumps(cla_dict, indent=4) #将cla_dict字典转换成JSON格式的字符串,通过indent=4参数设置缩进来提高可读性
with open('class_indices.json', 'w') as json_file: #保存到指定的JSON文件中
json_file.write(json_str)

4.加载训练集与测试集

1
2
3
4
5
6
7
8
9
10
11
batch_size = 32
train_loader = torch.utils.data.DataLoader(train_dataset, #加载数据集
batch_size=batch_size, shuffle=True,
num_workers=0)

validate_loader = torch.utils.data.DataLoader(validate_dataset, #加载测试集
batch_size=4, shuffle=False,
num_workers=0)

print("using {} images for training, {} images for validation.".format(train_num,
val_num))

三.网络模型搭建

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
class AlexNet(nn.Module):
def __init__(self, num_classes=1000, init_weights=False): #设置类别个数,以及是否初始化权重
super(AlexNet, self).__init__()
self.features = nn.Sequential( #将各层打包为一个模块, input[3, 224, 224]
nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2), # output[48, 55, 55]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[48, 27, 27]
nn.Conv2d(48, 128, kernel_size=5, padding=2), # output[128, 27, 27]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 13, 13]
nn.Conv2d(128, 192, kernel_size=3, padding=1), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=3, padding=1), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(192, 128, kernel_size=3, padding=1), # output[128, 13, 13]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 6, 6]
)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5), #50%概率使全连接层失活
nn.Linear(128 * 6 * 6, 2048),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(2048, 2048),
nn.ReLU(inplace=True),
nn.Linear(2048, num_classes),
)
if init_weights:
self._initialize_weights()

def forward(self, x):
x = self.features(x)
x = torch.flatten(x, start_dim=1) #展平
x = self.classifier(x)
return x

def _initialize_weights(self): #参数的初始化
for m in self.modules(): #遍历模型的所有模块,判断模块类型并进行权重初始化
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)

四.训练模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
net = AlexNet(num_classes=5, init_weights=True)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0002)

epochs = 10
save_path = './AlexNet.pth' #设置保存参数文件的路径
best_acc = 0.0 #设置最佳准确率
train_steps = len(train_loader) #保存训练集的长度
for epoch in range(epochs):
# 训练
net.train() #将网络设置为训练模式,此时dropout层将发挥作用
running_loss = 0.0
train_bar = tqdm(train_loader, file=sys.stdout) #利用了 tqdm 库来在训练过程中添加一个进度条,使得用户可以直观地看到数据加载和训练的进度
for step, data in enumerate(train_bar):
images, labels = data
optimizer.zero_grad()
outputs = net(images.to(device))
loss = loss_function(outputs, labels.to(device))
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# 更新 train_bar(即之前通过 tqdm 包装的 train_loader 迭代器)的描述(description)字段。这个描述字段通常用于在进度条旁边显示额外的信息,比如当前的训练轮次(epoch)、总轮次、以及某个指标(如损失值)的当前值。
train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
epochs,
loss)
net.eval() #设置为验证模式,此时dropout层效果失效
acc = 0.0 # accumulate accurate number / epoch
with torch.no_grad():
val_bar = tqdm(validate_loader, file=sys.stdout) #进度条显示
for val_data in val_bar:
val_images, val_labels = val_data
outputs = net(val_images.to(device))
predict_y = torch.max(outputs, dim=1)[1]
acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

val_accurate = acc / val_num
print('[epoch %d] train_loss: %.3f val_accuracy: %.3f' % #每一次迭代后,打印相关信息
(epoch + 1, running_loss / train_steps, val_accurate))

if val_accurate > best_acc: #保存历史最优准确率,并将该准确率下的参数情况传入到指定文件中
best_acc = val_accurate
torch.save(net.state_dict(), save_path)

print('Finished Training')

训练结果如下:

pAEsrFK.png

五.测试模型效果

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

data_transform = transforms.Compose(
[transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

# load image
img_path = "../tulip.jpg"
assert os.path.exists(img_path), "file: '{}' dose not exist.".format(img_path)
img = Image.open(img_path)

plt.imshow(img) #展示图片
# [N, C, H, W]
img = data_transform(img) #对图片进行预处理
# expand batch dimension
img = torch.unsqueeze(img, dim=0) #添加一个维度

# read class_indict
json_path = './class_indices.json'
assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)

with open(json_path, "r") as f: #读取类别映射文件,并解码
class_indict = json.load(f)

# create model
model = AlexNet(num_classes=5).to(device)

# load model weights
weights_path = "./AlexNet.pth"
assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)
model.load_state_dict(torch.load(weights_path)) #读取之前保存的参数文件

model.eval() #设置为验证模式
with torch.no_grad():
# predict class
output = torch.squeeze(model(img.to(device))).cpu() #输出,并移除张量中所有大小为1的维度
predict = torch.softmax(output, dim=0) #使用softmax将输出中的张量变成概率分布形式
predict_cla = torch.argmax(predict).numpy() #找到其中最大的值

print_res = "class: {} prob: {:.3}".format(class_indict[str(predict_cla)], #返回预测的类别名称与预测概率
predict[predict_cla].numpy())
plt.title(print_res) #展示图片与预测结果
for i in range(len(predict)): #遍历显示所用的预测结果
print("class: {:10} prob: {:.3}".format(class_indict[str(i)],
predict[i].numpy()))
plt.show()

效果如图所示:

  • pAEyAmR.png

  • pAEySYT.png

-------------本文结束-------------