■ 컨볼루션 신경망 만들기 (MNIST)

----------------------------------------------------------------------------------------------------

import numpy as np

import tensorflow as tf

import tensorflow.examples.tutorials.mnist as mnist

 

batchSize   = 128

testSize    = 256

imageSize   = 28

outputCount = 10

 

convolutionDropoutRatio    = 0.8

fullyConnectedDropoutRatio = 0.5

 

mnistDatasets = mnist.input_data.read_data_sets("data", one_hot = True)

 

trainInputNDArray       = mnistDatasets.train.images

trainRightOutputNDArray = mnistDatasets.train.labels

testInputNDArray        = mnistDatasets.test.images

testRightOutputNDArray  = mnistDatasets.test.labels

 

trainInputNDArray = trainInputNDArray.reshape(-1, imageSize, imageSize, 1)

testInputNDArray  = testInputNDArray.reshape (-1, imageSize, imageSize, 1)

 

inputTensor       = tf.placeholder("float", [None, imageSize, imageSize, 1])

rightOutputTensor = tf.placeholder("float", [None, outputCount])

 

convolutionLayer1WeightVariable   = tf.Variable(tf.random_normal([3, 3, 1 , 32], stddev = 0.01))

convolutionLayer2WeightVariable   = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev = 0.01))

convolutionLayer3WeightVariable   = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.01))

fullyConnectedLayerWeightVariable = tf.Variable(tf.random_normal([128 * 4 * 4, 625], stddev = 0.01))

outputLayerWeightVariable         = tf.Variable(tf.random_normal([625, outputCount], stddev = 0.01))

 

convolutionDropoutRatioTensor    = tf.placeholder("float")

fullyConnectedDropoutRatioTensor = tf.placeholder("float")

 

convolutionLayer1OutputTensor        = tf.nn.conv2d(inputTensor, convolutionLayer1WeightVariable, strides = [1, 1, 1, 1], padding = "SAME")

convolutionLayer1OutputTensorReLU    = tf.nn.relu(convolutionLayer1OutputTensor)

convolutionLayer1OutputTensorMaxPool = tf.nn.max_pool(convolutionLayer1OutputTensorReLU, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1],\

    padding = "SAME")

convolutionLayer1OutputTensorDropout = tf.nn.dropout(convolutionLayer1OutputTensorMaxPool, convolutionDropoutRatioTensor)

 

convolutionLayer2OutputTensor        = tf.nn.conv2d(convolutionLayer1OutputTensorDropout, convolutionLayer2WeightVariable,\

    strides = [1, 1, 1, 1], padding = "SAME")

convolutionLayer2OutputTensorReLU    = tf.nn.relu(convolutionLayer2OutputTensor)

convolutionLayer2OutputTensorMaxPool = tf.nn.max_pool(convolutionLayer2OutputTensorReLU, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1],\

    padding = "SAME")

convolutionLayer2OutputTensorDropout = tf.nn.dropout(convolutionLayer2OutputTensorMaxPool, convolutionDropoutRatioTensor)

 

convolutionLayer3OutputTensor        = tf.nn.conv2d(convolutionLayer2OutputTensorDropout, convolutionLayer3WeightVariable,\

    strides = [1, 1, 1, 1], padding = "SAME")

convolutionLayer3OutputTensorReLU    = tf.nn.relu(convolutionLayer3OutputTensor)

convolutionLayer3OutputTensorMaxPool = tf.nn.max_pool(convolutionLayer3OutputTensorReLU, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1],\

    padding = "SAME")

convolutionLayer3OutputTensorReshape = tf.reshape(convolutionLayer3OutputTensorMaxPool,\

    [-1, fullyConnectedLayerWeightVariable.get_shape().as_list()[0]])

convolutionLayer3OutputTensorDropout = tf.nn.dropout(convolutionLayer3OutputTensorReshape, convolutionDropoutRatioTensor)

 

fullyConnectedLayerOutputTensor        = tf.matmul(convolutionLayer3OutputTensorDropout, fullyConnectedLayerWeightVariable)

fullyConnectedLayerOutputTensorReLU    = tf.nn.relu(fullyConnectedLayerOutputTensor)

fullyConnectedLayerOutputTensorDropout = tf.nn.dropout(fullyConnectedLayerOutputTensorReLU, fullyConnectedDropoutRatioTensor)

 

outputTensor = tf.matmul(fullyConnectedLayerOutputTensorDropout, outputLayerWeightVariable)

 

costTensor = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = outputTensor, labels = rightOutputTensor))

 

optimizerOperation = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(costTensor)

 

predictTensor = tf.argmax(outputTensor, 1)

 

with tf.Session() as session:

    session.run(tf.global_variables_initializer())

    for i in range(10):

        trainingBatch = zip(range(0, len(trainInputNDArray), batchSize), range(batchSize, len(trainInputNDArray) + 1, batchSize))

        for startIndex, endIndex in trainingBatch:

            session.run(optimizerOperation, feed_dict = {inputTensor : trainInputNDArray[startIndex:endIndex],\

                rightOutputTensor: trainRightOutputNDArray[startIndex:endIndex], convolutionDropoutRatioTensor : convolutionDropoutRatio,\

                fullyConnectedDropoutRatioTensor : fullyConnectedDropoutRatio})

        testIndexNDArray = np.arange(len(testInputNDArray))

        np.random.shuffle(testIndexNDArray)

        testIndexNDArray = testIndexNDArray[0:testSize]

        print("Epoch : ", i + 1, "정확도 : ", np.mean(np.argmax(testRightOutputNDArray[testIndexNDArray],\

            axis = 1) == session.run(predictTensor, feed_dict = {inputTensor : testInputNDArray[testIndexNDArray],\

            rightOutputTensor : testRightOutputNDArray[testIndexNDArray], convolutionDropoutRatioTensor : 1.0,\

            fullyConnectedDropoutRatioTensor : 1.0})))

    scoreTensor    = tf.equal(tf.argmax(outputTensor, 1), tf.argmax(rightOutputTensor, 1))

    accuracyTensor = tf.reduce_mean(tf.cast(scoreTensor, tf.float32))

    print("정확도 : ", session.run(accuracyTensor, feed_dict = {inputTensor : testInputNDArray, rightOutputTensor : testRightOutputNDArray,\

        convolutionDropoutRatioTensor : 1.0, fullyConnectedDropoutRatioTensor : 1.0}))

----------------------------------------------------------------------------------------------------

Posted by 사용자 icodebroker

댓글을 달아 주세요