첨부 실행 코드는 나눔고딕코딩 폰트를 사용합니다.
728x90
반응형
728x170

■ 순환 신경망을 만드는 방법을 보여준다.

 

▶ 예제 코드 (PY)

import tensorflow as tf
import tensorflow.contrib.rnn as rnn
import tensorflow.examples.tutorials.mnist as mnist

learningRate  = 0.001
trainingCount = 100000
batchSize     = 128
displayStep   = 10

inputLayerNodeCount     = 28
inputLayerNodeStepCount = 28
hiddenLayerNodeCount    = 128
outputLayerNodeCount    = 10

mnistDatasets = mnist.input_data.read_data_sets("data", one_hot = True)

inputLayerTensor    = tf.placeholder("float", [None, inputLayerNodeStepCount, inputLayerNodeCount])
correctOutputTensor = tf.placeholder("float", [None, outputLayerNodeCount])

outputLayerWeightVariable = tf.Variable(tf.random_normal([hiddenLayerNodeCount, outputLayerNodeCount]))
outputLayerBiasVariable   = tf.Variable(tf.random_normal([outputLayerNodeCount]))

inputLayerTensorTranspose = tf.transpose(inputLayerTensor, [1, 0, 2])
inputLayerTensorReshape   = tf.reshape(inputLayerTensorTranspose, [-1, inputLayerNodeCount])
inputLayerTensorSplit     = tf.split(axis = 0, num_or_size_splits = inputLayerNodeStepCount, value = inputLayerTensorReshape)

basicLSTMCell = rnn.BasicLSTMCell(hiddenLayerNodeCount, forget_bias = 1.0)

outputList, lstmStateTuple = rnn.static_rnn(basicLSTMCell, inputLayerTensorSplit, dtype = tf.float32)

outputLayerOutperTensor = tf.matmul(outputList[-1], outputLayerWeightVariable) + outputLayerBiasVariable

costTensor = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = outputLayerOutperTensor, labels = correctOutputTensor))

optimizerOperation = tf.train.AdamOptimizer(learningRate).minimize(costTensor)

correctPredictionTensor = tf. equal(tf.argmax(outputLayerOutperTensor,1), tf.argmax(correctOutputTensor, 1))
accuracyTensor          = tf.reduce_mean(tf.cast(correctPredictionTensor, tf.float32))

with tf.Session() as session:
    session.run(tf.global_variables_initializer())
    step = 1
    while step * batchSize < trainingCount:
        batchInputNDArray, batchCorrectOutputNDArray = mnistDatasets.train.next_batch(batchSize)
        batchInputNDArray = batchInputNDArray.reshape((batchSize, inputLayerNodeStepCount, inputLayerNodeCount))
        session.run(optimizerOperation, feed_dict = {inputLayerTensor : batchInputNDArray, correctOutputTensor : batchCorrectOutputNDArray})
        if step % displayStep == 0:
            accuracy = session.run(accuracyTensor, feed_dict = {inputLayerTensor : batchInputNDArray, correctOutputTensor : batchCorrectOutputNDArray})
            loss     = session.run(costTensor, feed_dict = {inputLayerTensor : batchInputNDArray, correctOutputTensor : batchCorrectOutputNDArray})
            print("훈련 카운트 : " + str(step * batchSize) + ", 미니 배치 손실 = " + "{:.6f}".format(loss) + ", 훈련 정확도 = " + "{:.5f}".format(accuracy))
        step += 1
    print("최적화가 완료되었습니다!")

    testSize  = 128
    testInputNDArray         = mnistDatasets.test.images[:testSize].reshape((-1, inputLayerNodeStepCount, inputLayerNodeCount))
    testCorrectOutputNDArray = mnistDatasets.test.labels[:testSize]
    print("테스트 정확도 : ", session.run(accuracyTensor, feed_dict = {inputLayerTensor : testInputNDArray, correctOutpu
728x90
반응형
그리드형(광고전용)
Posted by icodebroker

댓글을 달아 주세요