我试图理解一个简单的前向神经网络是如何工作的......从这里的例子开始,我已经简化了它以制作一个通常给出 100% 准确的“AND”神经元的训练器:
# import the necessary packages
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Activation
from keras.optimizers import SGD
from keras.layers import Dense
from keras.utils import np_utils
from imutils import paths
import numpy as np
import argparse
import cv2
import os
def image_to_feature_vector(image, size=(32, 32)):
# resize the image to a fixed size, then flatten the image into
# a list of raw pixel intensities
return cv2.resize(image, size).flatten()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to output model file")
args = vars(ap.parse_args())
# grab the list of images that we'll be describing
#print("[INFO] describing images...")
#imagePaths = list(paths.list_images(args["dataset"]))
## initialize the data matrix and labels list
#data = []
#labels = []
## loop over the input images
#for (i, imagePath) in enumerate(imagePaths):
## load the image and extract the class label (assuming that our
## path as the format: /path/to/dataset/{class}.{image_num}.jpg
#image = cv2.imread(imagePath)
#label = imagePath.split(os.path.sep)[-1].split(".")[0]
## construct a feature vector raw pixel intensities, then update
## the data matrix and labels list
#features = image_to_feature_vector(image)
#data.append(features)
#labels.append(label)
## show an update every 1,000 images
#if i > 0 and i % 1000 == 0:
#print("[INFO] processed {}/{}".format(i, len(imagePaths)))
#print(labels)
#exit()
data = [[0,0],[0,1],[1,0],[1,1]]
labels = [0,0,0,1]
# encode the labels, converting them from strings to integers
le = LabelEncoder()
labels = le.fit_transform(labels)
# scale the input image pixels to the range [0, 1], then transform
# the labels into vectors in the range [0, num_classes] -- this
# generates a vector for each label where the index of the label
# is set to `1` and all other entries to `0`
data = np.array(data)# / 255.0
labels = np_utils.to_categorical(labels, 2)
# partition the data into training and testing splits, using 75%
# of the data for training and the remaining 25% for testing
print("[INFO] constructing training/testing split...")
(trainData, testData, trainLabels, testLabels) = train_test_split(
data, labels, test_size=0.0, random_state=42)
print('train')
print(trainData)
print(trainLabels)
print('test')
print(testData) #oopse, empty...
print(testLabels)
testData = trainData
testLabels = trainLabels
# define the architecture of the network
model = Sequential()
model.add(Dense(2, input_dim=2, kernel_initializer="uniform",
activation="relu"))
model.add(Activation("softmax"))
# train the model using SGD
print("[INFO] compiling model...")
sgd = SGD(lr=0.35)
model.compile(loss="binary_crossentropy", optimizer=sgd,
metrics=["accuracy"])
print(model.fit.__doc__)
model.fit(trainData, trainLabels, epochs=50, batch_size=128,
verbose=False)
print(model.predict(np.array([[0,1]])))
print('Should be [1,0]')#false
print(model.predict(np.array([[1,0]])))
print('Should be [1,0]')#false
print(model.predict(np.array([[0,0]])))
print('Should be [1,0]')#false
print(model.predict(np.array([[1,1]])))
print('Should be [0,1] true.')
# show the accuracy on the testing set
print("[INFO] evaluating on testing set...")
(loss, accuracy) = model.evaluate(testData, testLabels,
batch_size=128, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,
accuracy * 100))
# dump the network architecture and weights to file
print("[INFO] dumping architecture and weights to file...")
model.save(args["model"])
现在运行它时,python3 ./simple_andNN.py --model ./outputfile.hdf5
我可以在 Hdfview 应用程序中打开输出模型,这就是我所看到的:
Now I would expect the value of [1 1] (the only one classified in the positive group, result=[smaller, larger number]) to be dot product of a matrix (the kernel matrix in this simple case?), plus some constant bias, but when I try that it doesn't seem to add up to anything output is saying. Am I misunderstanding what this "neuron" is supposed to be doing based on this data?