1. MNISTの手書き数字認識のニューラルネットワークを実装

入力層:784画素の手書き数字画像 -> 中間層:ReLU関数 -> 出力層:softmax関数 -> 損失関数:二乗誤差

1.1 データセット準備及びTensor Boardの変数サマリーを関数化

In [1]:
import tensorflow as tf
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.examples.tutorials.mnist import input_data

def variable_summaries(var):
    """ Tensor Boardで変数のサマリーをビジュアル化する """
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var)

mnist = input_data.read_data_sets('data/', one_hot=True)
Extracting data/train-images-idx3-ubyte.gz
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz

1.2 モデル構築(入力層~中間層)

In [2]:
with tf.name_scope("input"):
    x = tf.placeholder(tf.float32, [None, 784], name="x-input")
    y = tf.placeholder(tf.float32, [None, 10], name="y-input")
    
with tf.name_scope("layer1"):
    with tf.name_scope("weights"):
        w_1 = tf.Variable(tf.truncated_normal([784,64], stddev=0.1), name="w1") # 重み
        variable_summaries(w_1)
    with tf.name_scope("biases"):
        b_1 = tf.Variable(tf.zeros([64]), name="b1") # バイアス
        variable_summaries(b_1)
    with tf.name_scope("x_matmul_w_plus_b"):
        pre_act_1 = tf.matmul(x, w_1) + b_1
        tf.summary.histogram("pre_activations", pre_act_1)
    h_1 = tf.nn.relu(pre_act_1, name="activation") # ReLU
    tf.summary.histogram("activations", h_1)

1.3 モデル構築(中間層~出力層)

In [3]:
with tf.name_scope("layer2"):
    with tf.name_scope("weights"):
        w_2 = tf.Variable(tf.truncated_normal([64, 10], stddev=0.1), name="w2") # 重み
        variable_summaries(w_2)
    with tf.name_scope("biases"):
        b_2 = tf.Variable(tf.zeros([10]), name="b2") # バイアス
        variable_summaries(b_2)
    with tf.name_scope("x_matmul_w_plus_b"):
        pre_out_2 = tf.matmul(h_1, w_2) + b_2
        tf.summary.histogram("pre_out", pre_out_2)
    out = tf.nn.softmax(pre_out_2, name="out")
    tf.summary.histogram("out", out)

1.4 モデル構築(誤差計算と重み(バイアス)の調整(訓練))

In [4]:
with tf.name_scope("loss"):
    loss = tf.reduce_mean(tf.square(y - out)) # 損失関数(2乗誤差)
tf.summary.scalar("loss", loss)

with tf.name_scope("train"):
    train_step = tf.train.AdagradOptimizer(0.5).minimize(loss) # 重み(とバイアス)の調整 学習率 = 0.5

1.5 モデル構築(評価)

In [5]:
with tf.name_scope("accuracy"):
    with tf.name_scope("correct_prediction"):
        correct = tf.equal(tf.argmax(out, 1), tf.argmax(y, 1))
    with tf.name_scope("accuracy"):
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.summary.scalar("accuracy", accuracy)
Out[5]:
<tf.Tensor 'accuracy_1:0' shape=() dtype=string>

1.6 実行(訓練と評価)

In [6]:
with tf.Session() as sess:
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter("C:/tmp/mnist/train", sess.graph)
    test_writer = tf.summary.FileWriter("C:/tmp/mnist/test")
    sess.run(tf.global_variables_initializer()) # 変数初期化
    test_images = mnist.test.images
    test_labes = mnist.test.labels
    for step in  range(1000):
        train_images, train_labels = mnist.train.next_batch(50)
        summary, _ = sess.run([merged, train_step], feed_dict={x:train_images, y:train_labels})
        #sess.run(train_step, feed_dict={x:train_images, y:train_labels})
        train_writer.add_summary(summary, step)
        if step % 10 == 0:
            summary, acc_val = sess.run([merged, accuracy], feed_dict={x:test_images, y:test_labes})
            #acc_val = sess.run(accuracy, feed_dict={x:test_images, y:test_labes})
            test_writer.add_summary(summary, step)
            print('Step %d: accuracy = %.2f' % (step, acc_val))
            print('w1 = %s, b1 = %s, w2 = %s, b2 = %s' % (sess.run(w_1)[0][0], sess.run(b_1)[0], sess.run(w_2)[0][0], sess.run(b_2)[0]))
Step 0: accuracy = 0.12
w1 = -0.104626, b1 = 0.000176426, w2 = 0.0643459, b2 = -0.00121821
Step 10: accuracy = 0.25
w1 = -0.104626, b1 = -0.00158952, w2 = 0.0630961, b2 = 0.008474
Step 20: accuracy = 0.39
w1 = -0.104626, b1 = -0.00221047, w2 = 0.0621374, b2 = 0.00565392
Step 30: accuracy = 0.46
w1 = -0.104626, b1 = -0.0028385, w2 = 0.0597172, b2 = 0.00260822
Step 40: accuracy = 0.49
w1 = -0.104626, b1 = -0.00315679, w2 = 0.0580006, b2 = 0.00518681
Step 50: accuracy = 0.62
w1 = -0.104626, b1 = -0.00400972, w2 = 0.0555259, b2 = 0.00180138
Step 60: accuracy = 0.69
w1 = -0.104626, b1 = -0.00439423, w2 = 0.0549357, b2 = -0.00331187
Step 70: accuracy = 0.70
w1 = -0.104626, b1 = -0.00430666, w2 = 0.0535186, b2 = -0.00718597
Step 80: accuracy = 0.76
w1 = -0.104626, b1 = -0.00424763, w2 = 0.0524154, b2 = -0.0104198
Step 90: accuracy = 0.77
w1 = -0.104626, b1 = -0.00501861, w2 = 0.0508529, b2 = -0.0159984
Step 100: accuracy = 0.79
w1 = -0.104626, b1 = -0.00435213, w2 = 0.0510546, b2 = -0.0168623
Step 110: accuracy = 0.81
w1 = -0.104626, b1 = -0.00185144, w2 = 0.050715, b2 = -0.0160776
Step 120: accuracy = 0.81
w1 = -0.104626, b1 = 0.000633451, w2 = 0.0511514, b2 = -0.0211656
Step 130: accuracy = 0.83
w1 = -0.104626, b1 = 0.00269456, w2 = 0.0508174, b2 = -0.0204422
Step 140: accuracy = 0.83
w1 = -0.104626, b1 = 0.00458991, w2 = 0.0495512, b2 = -0.0210369
Step 150: accuracy = 0.85
w1 = -0.104626, b1 = 0.00672118, w2 = 0.0497294, b2 = -0.0212922
Step 160: accuracy = 0.85
w1 = -0.104626, b1 = 0.00843459, w2 = 0.0502616, b2 = -0.0214489
Step 170: accuracy = 0.86
w1 = -0.104626, b1 = 0.00829172, w2 = 0.0502679, b2 = -0.0221734
Step 180: accuracy = 0.86
w1 = -0.104626, b1 = 0.00969124, w2 = 0.0488761, b2 = -0.0271892
Step 190: accuracy = 0.86
w1 = -0.104626, b1 = 0.0114646, w2 = 0.0483821, b2 = -0.0238132
Step 200: accuracy = 0.87
w1 = -0.104626, b1 = 0.0118539, w2 = 0.0491366, b2 = -0.0243161
Step 210: accuracy = 0.86
w1 = -0.104626, b1 = 0.0118413, w2 = 0.0484582, b2 = -0.0285211
Step 220: accuracy = 0.87
w1 = -0.104626, b1 = 0.0132644, w2 = 0.0488515, b2 = -0.0325352
Step 230: accuracy = 0.87
w1 = -0.104626, b1 = 0.0125408, w2 = 0.0477489, b2 = -0.0315848
Step 240: accuracy = 0.87
w1 = -0.104626, b1 = 0.0156511, w2 = 0.0491645, b2 = -0.0298207
Step 250: accuracy = 0.88
w1 = -0.104626, b1 = 0.0153061, w2 = 0.0502617, b2 = -0.0280918
Step 260: accuracy = 0.88
w1 = -0.104626, b1 = 0.0157638, w2 = 0.0513601, b2 = -0.0301804
Step 270: accuracy = 0.88
w1 = -0.104626, b1 = 0.0168857, w2 = 0.0496235, b2 = -0.0284943
Step 280: accuracy = 0.88
w1 = -0.104626, b1 = 0.017354, w2 = 0.050668, b2 = -0.033228
Step 290: accuracy = 0.88
w1 = -0.104626, b1 = 0.0167057, w2 = 0.0476422, b2 = -0.0323884
Step 300: accuracy = 0.89
w1 = -0.104626, b1 = 0.018102, w2 = 0.0469485, b2 = -0.0346551
Step 310: accuracy = 0.88
w1 = -0.104626, b1 = 0.0175172, w2 = 0.046957, b2 = -0.0350504
Step 320: accuracy = 0.88
w1 = -0.104626, b1 = 0.0183029, w2 = 0.0477744, b2 = -0.0329131
Step 330: accuracy = 0.89
w1 = -0.104626, b1 = 0.0208276, w2 = 0.0458438, b2 = -0.0346926
Step 340: accuracy = 0.89
w1 = -0.104626, b1 = 0.019926, w2 = 0.0456157, b2 = -0.036132
Step 350: accuracy = 0.88
w1 = -0.104626, b1 = 0.0191752, w2 = 0.0448709, b2 = -0.0363716
Step 360: accuracy = 0.89
w1 = -0.104626, b1 = 0.0191416, w2 = 0.0449009, b2 = -0.0387677
Step 370: accuracy = 0.89
w1 = -0.104626, b1 = 0.0204091, w2 = 0.0452307, b2 = -0.0379114
Step 380: accuracy = 0.89
w1 = -0.104626, b1 = 0.0202292, w2 = 0.0453764, b2 = -0.0354987
Step 390: accuracy = 0.89
w1 = -0.104626, b1 = 0.0199275, w2 = 0.0460904, b2 = -0.0376973
Step 400: accuracy = 0.89
w1 = -0.104626, b1 = 0.0184227, w2 = 0.0460075, b2 = -0.0413506
Step 410: accuracy = 0.90
w1 = -0.104626, b1 = 0.0196339, w2 = 0.0457153, b2 = -0.0402573
Step 420: accuracy = 0.90
w1 = -0.104626, b1 = 0.0199804, w2 = 0.045467, b2 = -0.0367394
Step 430: accuracy = 0.89
w1 = -0.104626, b1 = 0.0193179, w2 = 0.0442481, b2 = -0.0362118
Step 440: accuracy = 0.90
w1 = -0.104626, b1 = 0.0192232, w2 = 0.0446539, b2 = -0.0367133
Step 450: accuracy = 0.90
w1 = -0.104626, b1 = 0.0206981, w2 = 0.0455528, b2 = -0.0371239
Step 460: accuracy = 0.90
w1 = -0.104626, b1 = 0.0230573, w2 = 0.0464065, b2 = -0.0354767
Step 470: accuracy = 0.90
w1 = -0.104626, b1 = 0.0240116, w2 = 0.046833, b2 = -0.0385503
Step 480: accuracy = 0.90
w1 = -0.104626, b1 = 0.0245807, w2 = 0.0453478, b2 = -0.0407147
Step 490: accuracy = 0.91
w1 = -0.104626, b1 = 0.0247226, w2 = 0.0454282, b2 = -0.0429847
Step 500: accuracy = 0.90
w1 = -0.104626, b1 = 0.0231175, w2 = 0.0443457, b2 = -0.0393079
Step 510: accuracy = 0.89
w1 = -0.104626, b1 = 0.023713, w2 = 0.0450048, b2 = -0.044091
Step 520: accuracy = 0.90
w1 = -0.104626, b1 = 0.021765, w2 = 0.0450365, b2 = -0.0439013
Step 530: accuracy = 0.90
w1 = -0.104626, b1 = 0.021658, w2 = 0.0454405, b2 = -0.0400919
Step 540: accuracy = 0.91
w1 = -0.104626, b1 = 0.0206637, w2 = 0.0449016, b2 = -0.0411744
Step 550: accuracy = 0.91
w1 = -0.104626, b1 = 0.0208236, w2 = 0.04699, b2 = -0.0387063
Step 560: accuracy = 0.91
w1 = -0.104626, b1 = 0.0197812, w2 = 0.0484956, b2 = -0.0420222
Step 570: accuracy = 0.90
w1 = -0.104626, b1 = 0.0186318, w2 = 0.0464296, b2 = -0.03958
Step 580: accuracy = 0.91
w1 = -0.104626, b1 = 0.0194262, w2 = 0.0465777, b2 = -0.0413907
Step 590: accuracy = 0.90
w1 = -0.104626, b1 = 0.0196803, w2 = 0.0460711, b2 = -0.0418789
Step 600: accuracy = 0.91
w1 = -0.104626, b1 = 0.0213431, w2 = 0.0470094, b2 = -0.0429891
Step 610: accuracy = 0.91
w1 = -0.104626, b1 = 0.0218252, w2 = 0.0471165, b2 = -0.0448679
Step 620: accuracy = 0.91
w1 = -0.104626, b1 = 0.0216487, w2 = 0.0475807, b2 = -0.042104
Step 630: accuracy = 0.91
w1 = -0.104626, b1 = 0.0223767, w2 = 0.0478063, b2 = -0.0405012
Step 640: accuracy = 0.91
w1 = -0.104626, b1 = 0.0209831, w2 = 0.0475094, b2 = -0.0386126
Step 650: accuracy = 0.91
w1 = -0.104626, b1 = 0.022085, w2 = 0.0467915, b2 = -0.0402629
Step 660: accuracy = 0.91
w1 = -0.104626, b1 = 0.0236986, w2 = 0.0459648, b2 = -0.0412283
Step 670: accuracy = 0.91
w1 = -0.104626, b1 = 0.023384, w2 = 0.0449472, b2 = -0.0390679
Step 680: accuracy = 0.91
w1 = -0.104626, b1 = 0.0245065, w2 = 0.0454489, b2 = -0.043425
Step 690: accuracy = 0.91
w1 = -0.104626, b1 = 0.0236328, w2 = 0.0449737, b2 = -0.0394318
Step 700: accuracy = 0.91
w1 = -0.104626, b1 = 0.0261547, w2 = 0.0457287, b2 = -0.0428488
Step 710: accuracy = 0.91
w1 = -0.104626, b1 = 0.026106, w2 = 0.0457642, b2 = -0.045754
Step 720: accuracy = 0.91
w1 = -0.104626, b1 = 0.0236385, w2 = 0.0462153, b2 = -0.0490188
Step 730: accuracy = 0.91
w1 = -0.104626, b1 = 0.0242546, w2 = 0.0471421, b2 = -0.045656
Step 740: accuracy = 0.91
w1 = -0.104626, b1 = 0.0261029, w2 = 0.0464358, b2 = -0.0444925
Step 750: accuracy = 0.91
w1 = -0.104626, b1 = 0.0247439, w2 = 0.0446578, b2 = -0.0457674
Step 760: accuracy = 0.91
w1 = -0.104626, b1 = 0.0256695, w2 = 0.0419529, b2 = -0.0491441
Step 770: accuracy = 0.91
w1 = -0.104626, b1 = 0.0277884, w2 = 0.0433234, b2 = -0.0468172
Step 780: accuracy = 0.91
w1 = -0.104626, b1 = 0.0246422, w2 = 0.0444741, b2 = -0.0485368
Step 790: accuracy = 0.91
w1 = -0.104626, b1 = 0.0247197, w2 = 0.0436097, b2 = -0.0468266
Step 800: accuracy = 0.92
w1 = -0.104626, b1 = 0.0248429, w2 = 0.0437799, b2 = -0.0464189
Step 810: accuracy = 0.92
w1 = -0.104626, b1 = 0.0261918, w2 = 0.0448315, b2 = -0.0478121
Step 820: accuracy = 0.92
w1 = -0.104626, b1 = 0.025253, w2 = 0.0469031, b2 = -0.0455748
Step 830: accuracy = 0.92
w1 = -0.104626, b1 = 0.0234742, w2 = 0.0457896, b2 = -0.0483174
Step 840: accuracy = 0.92
w1 = -0.104626, b1 = 0.0243218, w2 = 0.0461475, b2 = -0.0460308
Step 850: accuracy = 0.92
w1 = -0.104626, b1 = 0.0248464, w2 = 0.0464465, b2 = -0.0455017
Step 860: accuracy = 0.92
w1 = -0.104626, b1 = 0.0252715, w2 = 0.0460436, b2 = -0.0475662
Step 870: accuracy = 0.91
w1 = -0.104626, b1 = 0.0243311, w2 = 0.045874, b2 = -0.0440692
Step 880: accuracy = 0.92
w1 = -0.104626, b1 = 0.0243988, w2 = 0.0440209, b2 = -0.0451299
Step 890: accuracy = 0.92
w1 = -0.104626, b1 = 0.0263941, w2 = 0.0465355, b2 = -0.0479704
Step 900: accuracy = 0.92
w1 = -0.104626, b1 = 0.0263075, w2 = 0.0451031, b2 = -0.0481218
Step 910: accuracy = 0.92
w1 = -0.104626, b1 = 0.0270958, w2 = 0.0446266, b2 = -0.049472
Step 920: accuracy = 0.92
w1 = -0.104626, b1 = 0.0272803, w2 = 0.0421502, b2 = -0.046941
Step 930: accuracy = 0.92
w1 = -0.104626, b1 = 0.0246055, w2 = 0.0399888, b2 = -0.050353
Step 940: accuracy = 0.92
w1 = -0.104626, b1 = 0.0280319, w2 = 0.0416358, b2 = -0.0503216
Step 950: accuracy = 0.91
w1 = -0.104626, b1 = 0.0290783, w2 = 0.0431141, b2 = -0.0506599
Step 960: accuracy = 0.92
w1 = -0.104626, b1 = 0.0270668, w2 = 0.0426826, b2 = -0.0509133
Step 970: accuracy = 0.92
w1 = -0.104626, b1 = 0.0250352, w2 = 0.0418771, b2 = -0.0522786
Step 980: accuracy = 0.92
w1 = -0.104626, b1 = 0.024698, w2 = 0.0414285, b2 = -0.0501539
Step 990: accuracy = 0.92
w1 = -0.104626, b1 = 0.0244779, w2 = 0.0418368, b2 = -0.0499951

2. MNISTの手書き数字認識の畳み込みニューラルネットワークを実装

2.1 データセット準備

1.1の定義をそのまま使用。

2.2 モデル構築(入力層~畳み込み層1)

In [2]:
with tf.name_scope("input"):
    x = tf.placeholder(tf.float32, [None,784], name="x-input")
    y = tf.placeholder(tf.float32, [None, 10], name="y-input")
    
img = tf.reshape(x, [-1,28,28,1])
f1 = tf.Variable(tf.truncated_normal([5,5,1,32], stddev=0.1))
conv1 = tf.nn.conv2d(img, f1, strides=[1,1,1,1], padding='SAME')

2.3 モデル構築(バイアス + プーリング層1)

In [3]:
with tf.name_scope("layer1"):
    with tf.name_scope("biases"):
        b1 = tf.Variable(tf.constant(0.1, shape=[32]), name="b1")
        variable_summaries(b1)
    with tf.name_scope("convolutions"):
        h_conv1 = tf.nn.relu(conv1 + b1)
        tf.summary.histogram("conv1", h_conv1)
    with tf.name_scope("maxpool"):
        h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
        tf.summary.histogram("maxpool1", h_pool1)

2.4 モデル構築(畳み込み層2~プーリング層2)

In [4]:
with tf.name_scope("layer2"):
    with tf.name_scope("filters"):
        f2 = tf.Variable(tf.truncated_normal([5,5,32,64], stddev=0.1), name="f2")
        variable_summaries(f2)
    conv2 = tf.nn.conv2d(h_pool1, f2, strides=[1,1,1,1], padding='SAME')
    with tf.name_scope("biases"):
        b2 = tf.Variable(tf.constant(0.1, shape=[64]), name="b2")
        variable_summaries(b2)
    with tf.name_scope("convolutions"):
        h_conv2 = tf.nn.relu(conv2 + b2)
        tf.summary.histogram("conv2", h_conv2)
    with tf.name_scope("maxpool"):
        h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
        tf.summary.histogram("maxpool2", h_pool2)

2.5 モデル構築(全結合層)

In [5]:
with tf.name_scope("layer3"):
    h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64])
    with tf.name_scope("weight"):
        w_fc1 = tf.Variable(tf.truncated_normal([7*7*64,1024], stddev=0.1), name="w1")
        variable_summaries(w_fc1)
    with tf.name_scope("biases"):
        b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]), name="b1")
        variable_summaries(b_fc1)
    with tf.name_scope("x_matmul_w_plus_b"):
        pre_act_1 = tf.matmul(h_pool2_flat, w_fc1) + b_fc1
        tf.summary.histogram("pre_activations", pre_act_1)
    h_fc1 = tf.nn.relu(pre_act_1)
    tf.summary.histogram("activations", h_fc1)

2.6 モデル構築(出力層~誤差関数)

In [6]:
with tf.name_scope("layer4"):
    with tf.name_scope("weight"):
        w_fc2 = tf.Variable(tf.truncated_normal([1024,10], stddev=0.1), name="w2")
        variable_summaries(w_fc2)
    with tf.name_scope("biases"):
        b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]), name="b2")
        variable_summaries(b_fc2)
    with tf.name_scope("x_matmul_w_plus_b"):
        pre_out_2 = tf.matmul(h_fc1, w_fc2) + b_fc2
        tf.summary.histogram("pre_out", pre_out_2)
    out = tf.nn.softmax(pre_out_2, name="out")
    tf.summary.histogram("out", out)
with tf.name_scope("loss"):
    loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(out), reduction_indices=[1])) # 交差エントロピー誤差
    tf.summary.scalar("loss", loss)

2.7 モデル構築(重み(バイアス)の調整(訓練)~評価)

In [7]:
with tf.name_scope("train"):
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss) # 重み(とバイアス)の調整 学習率 = 0.01
with tf.name_scope("accuracy"):
    with tf.name_scope("correct_prediction"):
        correct = tf.equal(tf.argmax(out, 1), tf.argmax(y, 1))
    with tf.name_scope("accuracy"):
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.summary.scalar("accuracy", accuracy)
Out[7]:
<tf.Tensor 'accuracy_1:0' shape=() dtype=string>

2.8 実行

In [8]:
with tf.Session() as sess:
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter("C:/tmp/mnist2/train", sess.graph)
    test_writer = tf.summary.FileWriter("C:/tmp/mnist2/test")
    sess.run(tf.global_variables_initializer()) # 変数初期化
    test_images = mnist.test.images
    test_labes = mnist.test.labels
    for step in  range(1000):
        train_images, train_labels = mnist.train.next_batch(50)
        summary, _ = sess.run([merged, train_step], feed_dict={x:train_images, y:train_labels})
        #sess.run(train_step, feed_dict={x:train_images, y:train_labels})
        train_writer.add_summary(summary, step)
        if step % 10 == 0:
            summary, acc_val = sess.run([merged, accuracy], feed_dict={x:test_images, y:test_labes})
            #acc_val = sess.run(accuracy, feed_dict={x:test_images, y:test_labes})
            test_writer.add_summary(summary, step)
            print('Step %d: accuracy = %.2f' % (step, acc_val))
            print('w1 = %s, b1 = %s, w2 = %s, b2 = %s' % (sess.run(w_fc1)[0][0], sess.run(b_fc1)[0], sess.run(w_fc2)[0][0], sess.run(b_fc2)[0]))
Step 0: accuracy = 0.12
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.1012
Step 10: accuracy = 0.39
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100617
Step 20: accuracy = 0.56
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100077
Step 30: accuracy = 0.74
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997958
Step 40: accuracy = 0.77
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0994173
Step 50: accuracy = 0.80
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995468
Step 60: accuracy = 0.84
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996446
Step 70: accuracy = 0.81
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997403
Step 80: accuracy = 0.87
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996411
Step 90: accuracy = 0.86
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998492
Step 100: accuracy = 0.84
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0993493
Step 110: accuracy = 0.89
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0994174
Step 120: accuracy = 0.89
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995857
Step 130: accuracy = 0.90
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0993562
Step 140: accuracy = 0.89
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.099673
Step 150: accuracy = 0.91
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0994489
Step 160: accuracy = 0.91
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0994779
Step 170: accuracy = 0.91
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0992059
Step 180: accuracy = 0.88
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997927
Step 190: accuracy = 0.90
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0991495
Step 200: accuracy = 0.92
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0989284
Step 210: accuracy = 0.93
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0990124
Step 220: accuracy = 0.93
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0990616
Step 230: accuracy = 0.91
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.099141
Step 240: accuracy = 0.92
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0992356
Step 250: accuracy = 0.91
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0993443
Step 260: accuracy = 0.93
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0992217
Step 270: accuracy = 0.93
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0990556
Step 280: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0990549
Step 290: accuracy = 0.93
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0990904
Step 300: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0992588
Step 310: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0991969
Step 320: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0990332
Step 330: accuracy = 0.93
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0991642
Step 340: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0989428
Step 350: accuracy = 0.93
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0991554
Step 360: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0990892
Step 370: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0992612
Step 380: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0991943
Step 390: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996204
Step 400: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0992348
Step 410: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0993061
Step 420: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.099345
Step 430: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995629
Step 440: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995332
Step 450: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995248
Step 460: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.099318
Step 470: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996505
Step 480: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995612
Step 490: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996599
Step 500: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.099684
Step 510: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0992291
Step 520: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0994983
Step 530: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996275
Step 540: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996716
Step 550: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995077
Step 560: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995989
Step 570: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996041
Step 580: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995638
Step 590: accuracy = 0.94
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997764
Step 600: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995816
Step 610: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0994916
Step 620: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995616
Step 630: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0995665
Step 640: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.099617
Step 650: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996731
Step 660: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998254
Step 670: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997402
Step 680: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0999841
Step 690: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0996284
Step 700: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997247
Step 710: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0999395
Step 720: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.099692
Step 730: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997841
Step 740: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0999418
Step 750: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998379
Step 760: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997422
Step 770: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998392
Step 780: accuracy = 0.95
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998626
Step 790: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998729
Step 800: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.099946
Step 810: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100109
Step 820: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997875
Step 830: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100068
Step 840: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997182
Step 850: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100016
Step 860: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998823
Step 870: accuracy = 0.97
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0997325
Step 880: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0999875
Step 890: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998336
Step 900: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100033
Step 910: accuracy = 0.97
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100113
Step 920: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0998771
Step 930: accuracy = 0.97
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100202
Step 940: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0999624
Step 950: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100096
Step 960: accuracy = 0.97
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.0999958
Step 970: accuracy = 0.96
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.10005
Step 980: accuracy = 0.97
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100121
Step 990: accuracy = 0.97
w1 = -0.123757, b1 = 0.1, w2 = -0.0361557, b2 = 0.100132