0

我是 AI 新手,根据本网站的教程,此代码是关于 mnist 数字识别的:tensorflow 教程。出于某种原因,我不能像教程中那样直接使用 mnist 数据。我的方法是先下载数据,然后解压到一个文件夹中。然后将数据读取为二进制数据,解码成numpy。我不知道为什么,准确率只有 0.098,与假设值 0.92 相差甚远。我的代码在这里:

 import numpy as np
 import struct
 import matplotlib.pyplot as plt


class MnistData():

    def printImg(self,img):
        fig = plt.figure()
        plotwindow = fig.add_subplot(111)
        plt.imshow(img , cmap = 'gray')
        plt.show()
        return

    def getBinFile(self,filename):
        binfile = open(filename , 'rb')  
        buf = binfile.read()
        return buf


    def decodeLabelsFromBin(self,binFile):
        offset = 0
        fmt_header = '>ii'
        magic_number, num_images = struct.unpack_from(fmt_header, binFile, offset)
        offset += struct.calcsize(fmt_header)
        fmt_image = '>B'
        labels = np.empty(num_images)
        for i in range(num_images):
            labels[i] = struct.unpack_from(fmt_image, binFile, offset)[0]
            offset += struct.calcsize(fmt_image)
        return labels


    def decodeImagesFromBin(self,buf):    
        #读取头四个32bit的interger:
        index = 0
        magic, numImages, numRows, numColumns = struct.unpack_from('>IIII' , buf , index)
        index += struct.calcsize('>IIII')
        imageSize = numRows * numColumns
        fmtImage = '>' + str(imageSize) + 'B'
        images = np.empty((numImages, numRows, numColumns))
        for i in range(numImages):   
            images[i] = np.array(struct.unpack_from(fmtImage, buf, index)).reshape((numRows, numColumns))
            index += struct.calcsize(fmtImage)
        return images


    def __init__(self):
        trainImgPath = 'E:/AI projects/mnistPJ/mnist/train-images.idx3-ubyte'
        trainLabelPath = 'E:/AI projects/mnistPJ/mnist/train-labels.idx1-ubyte'
        testImgPath = 'E:/AI projects/mnistPJ/mnist/t10k-images.idx3-ubyte'
        testLabelPath = 'E:/AI projects/mnistPJ/mnist/t10k-labels.idx1-ubyte'

        trainImgBinFile = self.getBinFile(trainImgPath)
        self.trainImgs = self.decodeImagesFromBin(trainImgBinFile)

        trainLabelBinFile = self.getBinFile(trainLabelPath)
        self.trainLabels = self.decodeLabelsFromBin(trainLabelBinFile)

        testImgBinFile = self.getBinFile(testImgPath)
        self.testImgs = self.decodeImagesFromBin(testImgBinFile)

        testLabelBinFile = self.getBinFile(testLabelPath)
        self.testLabels = self.decodeLabelsFromBin(testLabelBinFile)
        return 

此类用于处理 minist 数据。

我的 main.py 如下:

import tensorflow as tf
import mnistDecode
import random;
import numpy as np

sess = tf.InteractiveSession()
mdata = mnistDecode.MnistData()
x = tf.placeholder(tf.float32, shape=[None, 784])  
y_ = tf.placeholder(tf.float32, shape=[None, 10])  
W = tf.Variable(tf.zeros([784,10])) 
b = tf.Variable(tf.zeros(10)) 
y = tf.nn.softmax(tf.matmul(x,W)+b) 
cross_entrophy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),
                                               reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entrophy)
tf.global_variables_initializer().run()
len = mdata.trainLabels.shape[0]  
COUNT = 100;   
turns = 1000;   

for j in range(turns):
    batch_xs = np.empty((COUNT, 784))
    batch_ys = np.zeros((COUNT, 10)) 
    index = random.sample(range(len),COUNT); 
    imgs = mdata.trainImgs[index]
    lbs = mdata.trainLabels[index]
    for i in range(COUNT):
        batch_xs[i] = imgs[i].reshape(1,784)  
        batch_ys[i,int(lbs[i])] = 1
    train_step.run({x:batch_xs,y_:batch_ys})

correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy= tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

testLen = mdata.testLabels.shape[0]
timgs = np.empty((testLen, 784))
tlabels = np.zeros((testLen, 10))

for i in range(testLen):
    timgs[i] = mdata.testImgs[i].reshape(1,784)  
    tlabels[i,int(mdata.testLabels[i])] = 1

print(accuracy.eval({x:timgs,y_:tlabels}))
sess.close()
4

0 回答 0