1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("./mnist/data/", one_hot=True) # 데이터 모음 input_data_train = mnist.train.images output_data_train = mnist.train.labels # 데이터 셋 입력 텐서 X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) # 변수 입력 텐서 import random W1 = tf.Variable(tf.random_normal([784, 256], stddev=0.1)) b1 = tf.Variable(tf.random_normal([256], stddev=0.1)) W2 = tf.Variable(tf.random_normal([256, 64], stddev=0.1)) b2 = tf.Variable(tf.random_normal([64], stddev=0.1)) W3 = tf.Variable(tf.random_normal([64, 10], stddev=0.1)) b3 = tf.Variable(tf.random_normal([10], stddev=0.1)) out_1 = tf.matmul(X, W1)+b1 out_1 = tf.nn.relu(out_1) out_2 = tf.matmul(out_1, W2)+b2 out_2 = tf.nn.relu(out_2) out_3 = tf.matmul(out_2, W3)+b3 result = tf.nn.softmax(out_3) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=result, labels= Y), axis=0) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.3).minimize(cost) correct = tf.equal(tf.arg_max(result, 1), tf.arg_max(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), axis=0) tf.set_random_seed(100) with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) epoch = 10 batch_size = 1000 batch_num = int(mnist.train.num_examples/batch_size) # 훈련 과정 for i in range(epoch): avg_cost = 0 for j in range(batch_num): batch_x, batch_y = mnist.train.next_batch(batch_size) cost_rst, optimizer_rst = sess.run([cost, optimizer], feed_dict={X: batch_x, Y: batch_y}) avg_cost += cost_rst avg_cost= avg_cost/batch_num print("cost>> ", avg_cost) # 최종 정확도 검사 - test data 평가하기 acc = accuracy.eval(feed_dict={X: mnist.test.images, Y: mnist.test.labels}) print("최종 정확도 : ", acc*100,"%") # 라벨 실제로 예측하기 idx = random.randint(0, mnist.test.num_examples-1) label_n = mnist.test.labels[idx:idx+1] label_n = tf.arg_max(label_n, 1) label_n = sess.run(label_n) img = mnist.test.images[idx:idx+1] predict = sess.run(result, feed_dict={X: img}) predict = tf.arg_max(predict, 1) predict_n = sess.run(predict) print("Real Num : ", label_n) print("predicted Num : ", predict_n) | cs |
1. 데이터 셋 : MNIST
2. 레이어 갯수 : 레이어 3개
3. cost function : 크로스 엔트로피
4. optimizer : Gradient Descent
5. 전체적인 라이브러리 : 텐서플로우 하나
'기계학습 > 이미지 머신러닝' 카테고리의 다른 글
남자 여자 판독기 - CNN 모델 (0) | 2020.02.25 |
---|---|
MNIST 실습 - GAN (0) | 2020.02.11 |
MNIST 실습 - CNN 모델 (0) | 2020.02.10 |
MNIST - 기본 1 layer 머신러닝 (0) | 2020.02.08 |