似乎损失正在减少,算法工作正常。但是准确性并没有提高并且卡住了。
import numpy as np
import cv2
from os import listdir
from os.path import isfile, join
from sklearn.utils import shuffle
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.utils.data
file_path_0 = [f for f in listdir("data/0") if isfile(join("data/0", f))]
file_path_1 = [f for f in listdir("data/1") if isfile(join("data/1", f))]
data_x = []
data_y = []
for i in range(len(file_path_0)):
image = cv2.imread("data/0/" + file_path_0[i])
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
data_x.append(image)
data_y.append(0)
for i in range(len(file_path_1)):
image = cv2.imread("data/1/" + file_path_1[i])
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
data_x.append(image)
data_y.append(1)
data_x = np.array(data_x).astype(np.double) / 255
data_y = np.array(data_y).astype(np.double).reshape(-1, 1)
data_x, data_y = shuffle(data_x, data_y)
t_data_x = torch.stack([torch.Tensor(i) for i in data_x]) # transform to torch tensors
t_data_y = torch.stack([torch.tensor(i, dtype=torch.float) for i in data_y])
##############################
batch_size = 10
t_dataset = torch.utils.data.TensorDataset(t_data_x, t_data_y) # create your dataset
train_size = int(0.8 * len(t_dataset))
test_size = len(t_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(t_dataset, [train_size, test_size])
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size) # create your dataloader
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size) # create your dataloader
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(720, 10, kernel_size=3)
self.conv2 = nn.Conv2d(10, 5, kernel_size=3)
self.mp1 = nn.MaxPool2d(5)
self.fc = nn.Linear(2550, 1)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp1(self.conv1(x)))
x = x.view(in_size, -1) # Dense
x = self.fc(x)
x = F.sigmoid(x)
return x
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.001)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_dataloader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.binary_cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_dataloader.dataset),
100. * batch_idx / len(train_dataloader),
loss.data.item()))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_dataloader:
data, target = Variable(data, requires_grad=True), Variable(target)
output = model(data)
# sum up batch loss
test_loss += F.binary_cross_entropy(output, target, size_average=False).data.item()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
correct += pred \
.eq(target
.data
.view_as(pred).long()) \
.cpu() \
.sum()
test_loss /= len(test_dataloader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_dataloader.dataset),
100. * correct / len(test_dataloader.dataset)))
for epoch in range(1, 10):
train(epoch)
test()
训练纪元:7 [0/249 (0%)] 损失:0.537067 训练纪元:7 [100/249 (40%)] 损失:0.597774 训练纪元:7 [200/249 (80%)] 损失:0.554897 测试集:平均损失:0.5094,准确度:37/63 (58%) 训练纪元:8 [0/249 (0%)] 损失:0.481739 训练纪元:8 [100/249 (40%)] 损失:0.564388 训练纪元: 8 [200/249 (80%)] 损失:0.517878 测试集:平均损失:0.4522,准确度:37/63 (58%) 训练纪元:9 [0/249 (0%)] 损失:0.420650 训练纪元:9 [100/249 (40%)] 损失:0.521278 训练周期:9 [200/249 (80%)] 损失:0.480884 测试集:平均损失:0.3944,准确度:37/63 (58%)