LSTM 文本分类模型的实现

news/2025/3/20 14:52:47/

提示:cnews数据集可在网上自行下载

#模块架构
'''
@author:/huangshaobo
1 构建计算图-LSTM模型embedding层LSTM层FC层train_op+loss
2 训练流程代码
3 数据集封装代码api: next_batch(batch_size)
4 词表封装代码api: sentence_to_id(text_sentence):句子->id
5 类别封装代码api: category_to_id(category)
'''import tensorflow as tf
import numpy as np
import math
import os
import sys#声明在notebook中打印日志
tf.logging.set_verbosity(tf.logging.INFO)def get_default_params():return tf.contrib.training.HParams(num_embedding_size = 16,#32                 #each Vec's lengthnum_timesteps = 50,#600                    #LSTM步长num_lstm_nodes = [32, 32], #[64,64]               #每一层的size32 ,2层num_lstm_layers = 2,#层数num_fc_nodes = 32, #64      #全连接层神经单元数batch_size = 100,clip_lstm_grads = 1.0,#控制LSTM梯度大小learning_rate = 0.001,num_word_threshold = 10)
hps = get_default_params()train_file = 'cnews.train.seg.txt'
val_file = 'cnews.val.seg.txt'
test_file = 'cnews.test.seg.txt'
vocab_file = 'cnews.vocab.txt'
category_file = 'cnews.category.txt'
output_folder = 'run_text_rnn'if not os.path.exists(output_folder):os.mkdir(output_folder)#test    
print(hps.num_word_threshold)

打印出结果:10

#词表封装模块
class Vocab:def __init__(self, filename, num_word_threshold):self._word_to_id = {}self._unk = -1self._num_word_threshold = num_word_thresholdself._read_dict(filename)#读取文件每一行,赋值一个iddef _read_dict(self, filename):with open(filename, 'r', encoding='UTF-8') as f:lines = f.readlines()for line in lines:word, frequency = line.strip('\r\n').split('\t')frequency = int(frequency)if frequency < self._num_word_threshold:continueidx = len(self._word_to_id)if word == '<UNK>':self._unk = idxself._word_to_id[word] = idxdef word_to_id(self, word):return self._word_to_id.get(word, self._unk)@propertydef unk(self):return self._unkdef size(self):return len(self._word_to_id)def sentence_to_id(self, sentence):word_ids = [self.word_to_id(cur_word) \for cur_word in sentence.split()]return word_ids
#test
vocab = Vocab(vocab_file, hps.num_word_threshold)
vocab_size = vocab.size()
tf.logging.info('vocab_size: %d' % vocab_size)test_str = '的 在 了 是'
print(vocab.sentence_to_id(test_str))

INFO:tensorflow:vocab_size: 77323
[2, 4, 6, 7]

#类别封装模块
class CategeoryDict:def __init__(self, filename):self._categeory_to_id = {}with open(filename, 'r', encoding='UTF-8') as f:lines = f.readlines()for line in lines:categeory = line.strip('\r\n')idx = len(self._categeory_to_id)self._categeory_to_id[categeory] = idxdef categeory_to_id(self, categeory):if not categeory in self._categeory_to_id:raise Exception("%s is not in our categeory list" % categeory_name)return self._categeory_to_id[categeory]def size(self):return len(self._categeory_to_id)
#test
categeory_vocab = CategeoryDict(category_file)
test_str= '娱乐'
num_classes = categeory_vocab.size()
tf.logging.info('label: %s,id: %d' % (test_str, categeory_vocab.categeory_to_id(test_str)))
tf.logging.info('num_classes: %d' % num_classes)

INFO:tensorflow? 娱乐,id: 1
INFO:tensorflow:num_classes: 10

#数据集封装模块
class TextDataSet:def __init__(self, filename, vocab, categeory_vocab, num_timesteps):self._vocab = vocabself._categeory_vocab = categeory_vocabself._num_timesteps =num_timestepsself._inputs = []#matrxiself._outputs = []#Vecself._indicator = 0self._parse_file(filename)def _parse_file(self, filename):tf.logging.info('Loading data from %s' , filename)with open(filename,'r',encoding='UTF-8') as f:lines = f.readlines()category_dict = {}for line in lines:label,content = line.strip('\r\n').split('\t') id_label = self._categeory_vocab.categeory_to_id(label)id_words = self._vocab.sentence_to_id(content)id_words = id_words[0:self._num_timesteps]padding_num = self._num_timesteps - len(id_words)id_words = id_words + [self._vocab.unk for i in range(padding_num)]self._inputs.append(id_words)self._outputs.append(id_label)#转换为numpy arrayself._inputs = np.asarray(self._inputs, dtype=np.int32)self._outputs = np.asarray(self._outputs, dtype=np.int32)self._random_shuffle()def _random_shuffle(self):p = np.random.permutation(len(self._inputs))self._inputs = self._inputs[p]self._outputs = self._outputs[p]def next_batch(self,batch_size):end_indicator = self._indicator + batch_sizeif end_indicator > len(self._inputs):self._random_shuffle()self._indicator = 0end_indicator = batch_sizeif end_indicator > len(self._inputs):raise Exception("batch_size: %d is too large" % batch_size)batch_inputs = self._inputs[self._indicator:end_indicator]batch_outputs = self._outputs[self._indicator:end_indicator]self._indicator = end_indicatorreturn batch_inputs, batch_outputs#test
train_dataset = TextDataSet(train_file, vocab, categeory_vocab, hps.num_timesteps)
val_dataset = TextDataSet(val_file, vocab, categeory_vocab, hps.num_timesteps)
test_dataset = TextDataSet(test_file, vocab, categeory_vocab, hps.num_timesteps)print(train_dataset.next_batch(2))
print(val_dataset.next_batch(2))
print(test_dataset.next_batch(2))

INFO:tensorflow:Loading data from cnews.train.seg.txt
INFO:tensorflow:Loading data from cnews.val.seg.txt
INFO:tensorflow:Loading data from cnews.test.seg.txt
(array([[ 128, 14848, 11, 37, 1575, 1755, 470, 75585, 14123,
1, 37, 2, 1755, 1575, 1071, 7958, 13470, 3,
261, 27, 1, 348, 1762, 255, 863, 293, 1575,
381, 6, 7160, 1, 32, 310, 11870, 1, 226,
27, 667, 539, 122, 6787, 2, 3332, 22879, 3,
268, 225, 5961, 9578, 1170],
[45395, 37, 8057, 5227, 87, 11, 13, 7105, 2,
4060, 17, 467, 16, 4, 5533, 4478, 7105, 2,
4060, 11, 25120, 2349, 11, 0, 5, 40814, 114,
0, 1079, 11, 191, 1382, 29, 8208, 2, 4060,
1, 25120, 2, 1505, 4, 37, 2451, 16530, 1,
7810, 4194, 40, 6958, 40]]), array([4, 4]))
(array([[ 1243, 623, 9001, 70, 27738, 74690, 11411, 1141, 23344,
61, 177, 4, 1243, 623, 90, 8, 7509, 9001,
9, 1, 2889, 9098, 8, 74690, 9, 3, 11564,
153, 11499, 1, 11207, 59713, 0, 2, 4957, 772,
29521, 282, 1, 55, 9098, 2, 9001, 70, 1760,
7, 17886, 2, 1, 457],
[ 467, 11, 12987, 104, 9659, 50830, 1039, 0, 24,
1378, 2320, 11, 22177, 3753, 1221, 235, 571, 483,
321, 50, 4, 324, 20038, 25, 254, 106, 638,
1047, 13465, 254, 53153, 2228, 1, 1140, 7, 26,
2786, 2228, 14316, 2, 12481, 1, 53, 25, 54,
3817, 87, 1042, 2, 638]]), array([2, 5]))
(array([[ 0, 4590, 1307, 1534, 172, 4780, 25640, 16712, 59,
4, 1005, 11305, 2, 37, 155, 1307, 217, 335,
396, 32, 1789, 9567, 29, 1, 0, 4590, 191,
1307, 1534, 17, 1607, 282, 8, 0, 9, 16,
512, 494, 1055, 4007, 3, 795, 44, 1, 0,
4590, 1613, 19384, 0, 173],
[ 261, 27, 130, 23, 1114, 10742, 1995, 8987, 2170,
2182, 17, 36349, 0, 978, 16, 0, 4438, 0,
0, 0, 0, 18606, 0, 10782, 12732, 17260, 0,
0, 1455, 0, 0, 0, 14071, 0, 0, 0,
61689, 16564, 65709, 0, 25097, 4129, 23181, 0, 1455,
1981, 0, 200, 2125, 0]]), array([2, 4]))

#计算图构建模块
def create_model(hps, vocab_size, num_classes):num_timesteps = hps.num_timestepsbatch_size = hps.batch_sizeinputs = tf.placeholder(tf.int32, (batch_size, num_timesteps))outputs = tf.placeholder(tf.int32, (batch_size,))#随机失活剩下的神经单元  keep_prob = 1-dropoutkeep_prob = tf.placeholder(tf.float32, name= 'keep_prob')#save training_stepglobal_step = tf.Variable(tf.zeros([], tf.int64), name='global_step',trainable=False)'''embedding层搭建'''#定义初始化函数[-1,1]embedding_initializer = tf.random_uniform_initializer(-1.0, 1.0)with tf.variable_scope('embedding', initializer=embedding_initializer):embeddings = tf.get_variable('embedding',[vocab_size, hps.num_embedding_size],tf.float32)embed_inputs = tf.nn.embedding_lookup(embeddings, inputs)#change inputs to embedding'''LSTM层搭建'''scale = (1.0 / math.sqrt(hps.num_embedding_size+hps.num_lstm_nodes[-1])) * 3.0lstm_init = tf.random_uniform_initializer(-scale, scale)def _generate_params_for_lstm_cell(x_size, h_size, bias_size):x_w = tf.get_variable('x_weights', x_size)h_w = tf.get_variable('h_weights', h_size)b = tf.get_variable('biases', bias_size, initializer=tf.constant_initializer(0.0))return x_w, h_w, bwith tf.variable_scope('lstm_nn', initializer=lstm_init):with tf.variable_scope('inputs'):ix, ih, ib = _generate_params_for_lstm_cell(x_size=[hps.num_embedding_size, hps.num_lstm_nodes[0]],h_size=[hps.num_lstm_nodes[0], hps.num_lstm_nodes[0]],bias_size=[1, hps.num_lstm_nodes[0]])with tf.variable_scope('outputs'):ox, oh, ob = _generate_params_for_lstm_cell(x_size=[hps.num_embedding_size, hps.num_lstm_nodes[0]],h_size=[hps.num_lstm_nodes[0], hps.num_lstm_nodes[0]],bias_size=[1, hps.num_lstm_nodes[0]])with tf.variable_scope('forget'):fx, fh, fb = _generate_params_for_lstm_cell(x_size=[hps.num_embedding_size, hps.num_lstm_nodes[0]],h_size=[hps.num_lstm_nodes[0], hps.num_lstm_nodes[0]],bias_size=[1, hps.num_lstm_nodes[0]])with tf.variable_scope('memory'):cx, ch, cb = _generate_params_for_lstm_cell(x_size=[hps.num_embedding_size, hps.num_lstm_nodes[0]],h_size=[hps.num_lstm_nodes[0], hps.num_lstm_nodes[0]],bias_size=[1, hps.num_lstm_nodes[0]])state = tf.Variable(tf.zeros([batch_size, hps.num_lstm_nodes[0]]),trainable=False)h = tf.Variable(tf.zeros([batch_size, hps.num_lstm_nodes[0]]),trainable=False)for i in range(num_timesteps):embed_input = embed_inputs[:, i, :]embed_input = tf.reshape(embed_input, [batch_size, hps.num_embedding_size])forget_gate = tf.sigmoid(tf.matmul(embed_input, fx) + tf.matmul(h, fh) + fb)input_gate = tf.sigmoid(tf.matmul(embed_input, ix) + tf.matmul(h, ih) + ib)output_gate = tf.sigmoid(tf.matmul(embed_input, ox) + tf.matmul(h, oh) + ob)mid_state = tf.tanh(tf.matmul(embed_input, cx) + tf.matmul(h, ch) + cb)state = mid_state * input_gate + state * forget_gateh = output_gate * tf.tanh(state)last = hcells = []for i in range(hps.num_lstm_layers):cell = tf.contrib.rnn.BasicLSTMCell(hps.num_lstm_nodes[i], state_is_tuple = True)cell = tf.contrib.rnn.DropoutWrapper(cell,output_keep_prob=keep_prob)cells.append(cell)cell = tf.contrib.rnn.MultiRNNCell(cells)#Cell is 多层LSTMinitial_state = cell.zero_state(batch_size,tf.float32) #初始化隐藏状态为0#RNNoutput:#    一维:batch_size#    二维:num_timesteps#    三维:lstm_outputs[-1]rnn_outputs, _ = tf.nn.dynamic_rnn(cell,embed_inputs,initial_state=initial_state)last = rnn_outputs[:, -1, : ]''''''FC层搭建'''fc_init = tf.uniform_unit_scaling_initializer(factor=1.0)with tf.variable_scope('fc', initializer=fc_init):fc1 = tf.layers.dense(last, hps.num_fc_nodes, activation=tf.nn.relu, name='fc1')fc1_dropout = tf.contrib.layers.dropout(fc1, keep_prob)logits = tf.layers.dense(fc1_dropout, num_classes, name='fc2')'''计算损失函数'''with tf.name_scope('metrics'):softmax_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=outputs)loss = tf.reduce_mean(softmax_loss)y_pred = tf.argmax(tf.nn.softmax(logits), 1, output_type=tf.int32)correct_pred = tf.equal(outputs, y_pred)accuary = tf.reduce_mean(tf.cast(correct_pred, tf.float32))'''计算图构建'''with tf.name_scope('train_op'):tvars = tf.trainable_variables()#获得所有训练变量for var in tvars:tf.logging.info('variable name: %s' % (var.name))grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), hps.clip_lstm_grads)#梯度截断optimizer = tf.train.AdamOptimizer(hps.learning_rate)train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)return ((inputs, outputs, keep_prob), (loss, accuary), (train_op, global_step))placeholders, metrics, others = create_model(hps, vocab_size, num_classes)
inputs, outputs, keep_prob = placeholders
loss, accuary = metrics
train_op, global_step = others
         WARNING:tensorflow:From <ipython-input-6-d07947ed78f3>:88: UniformUnitScaling.__init__ (from tensorflow.python.ops.init_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.initializers.variance_scaling instead with distribution=uniform to get equivalent behavior.
INFO:tensorflow:variable name: embedding/embedding:0
INFO:tensorflow:variable name: lstm_nn/inputs/x_weights:0
INFO:tensorflow:variable name: lstm_nn/inputs/h_weights:0
INFO:tensorflow:variable name: lstm_nn/inputs/biases:0
INFO:tensorflow:variable name: lstm_nn/outputs/x_weights:0
INFO:tensorflow:variable name: lstm_nn/outputs/h_weights:0
INFO:tensorflow:variable name: lstm_nn/outputs/biases:0
INFO:tensorflow:variable name: lstm_nn/forget/x_weights:0
INFO:tensorflow:variable name: lstm_nn/forget/h_weights:0
INFO:tensorflow:variable name: lstm_nn/forget/biases:0
INFO:tensorflow:variable name: lstm_nn/memory/x_weights:0
INFO:tensorflow:variable name: lstm_nn/memory/h_weights:0
INFO:tensorflow:variable name: lstm_nn/memory/biases:0
INFO:tensorflow:variable name: fc/fc1/kernel:0
INFO:tensorflow:variable name: fc/fc1/bias:0
INFO:tensorflow:variable name: fc/fc2/kernel:0
INFO:tensorflow:variable name: fc/fc2/bias:0
#训练流程模块
init_op =  tf.global_variables_initializer()
train_keep_prob_value = 0.8
test_keep_prob_value = 1.0num_train_steps = 10000with tf.Session() as sess:sess.run(init_op)for i in range(num_train_steps):batch_inputs, batch_labels = train_dataset.next_batch(hps.batch_size)outputs_val = sess.run([loss, accuary, train_op, global_step], feed_dict = {inputs: batch_inputs,outputs: batch_labels,keep_prob: train_keep_prob_value})loss_val, accuary_val, _, global_step_val = outputs_valif global_step_val % 20 ==0:tf.logging.info("Step: %5d, loss: %3.3f, accuary: %3.3f" % (global_step_val, loss_val, accuary_val))
INFO:tensorflow:Step:    20, loss: 2.276, accuary: 0.140
INFO:tensorflow:Step:    40, loss: 2.312, accuary: 0.070
INFO:tensorflow:Step:    60, loss: 2.325, accuary: 0.120
INFO:tensorflow:Step:    80, loss: 2.244, accuary: 0.100
INFO:tensorflow:Step:   100, loss: 2.272, accuary: 0.160
INFO:tensorflow:Step:   120, loss: 2.219, accuary: 0.170
INFO:tensorflow:Step:   140, loss: 2.302, accuary: 0.100
INFO:tensorflow:Step:   160, loss: 2.237, accuary: 0.130
INFO:tensorflow:Step:   180, loss: 2.241, accuary: 0.170
INFO:tensorflow:Step:   200, loss: 2.300, accuary: 0.150
INFO:tensorflow:Step:   220, loss: 2.209, accuary: 0.210
INFO:tensorflow:Step:   240, loss: 2.141, accuary: 0.180
INFO:tensorflow:Step:   260, loss: 2.132, accuary: 0.230
INFO:tensorflow:Step:   280, loss: 2.153, accuary: 0.130
INFO:tensorflow:Step:   300, loss: 1.977, accuary: 0.290
INFO:tensorflow:Step:   320, loss: 1.952, accuary: 0.250
INFO:tensorflow:Step:   340, loss: 2.034, accuary: 0.250
INFO:tensorflow:Step:   360, loss: 1.948, accuary: 0.290
INFO:tensorflow:Step:   380, loss: 2.037, accuary: 0.210
INFO:tensorflow:Step:   400, loss: 1.909, accuary: 0.250
INFO:tensorflow:Step:   420, loss: 1.809, accuary: 0.320
INFO:tensorflow:Step:   440, loss: 1.919, accuary: 0.300
INFO:tensorflow:Step:   460, loss: 1.882, accuary: 0.280
INFO:tensorflow:Step:   480, loss: 1.773, accuary: 0.410
INFO:tensorflow:Step:   500, loss: 1.802, accuary: 0.380
INFO:tensorflow:Step:   520, loss: 1.919, accuary: 0.260
INFO:tensorflow:Step:   540, loss: 1.824, accuary: 0.270
INFO:tensorflow:Step:   560, loss: 1.583, accuary: 0.390
INFO:tensorflow:Step:   580, loss: 1.715, accuary: 0.290
INFO:tensorflow:Step:   600, loss: 1.600, accuary: 0.380
INFO:tensorflow:Step:   620, loss: 1.517, accuary: 0.410
INFO:tensorflow:Step:   640, loss: 1.569, accuary: 0.350
INFO:tensorflow:Step:   660, loss: 1.589, accuary: 0.390
INFO:tensorflow:Step:   680, loss: 1.557, accuary: 0.400
INFO:tensorflow:Step:   700, loss: 1.673, accuary: 0.300
INFO:tensorflow:Step:   720, loss: 1.300, accuary: 0.490
INFO:tensorflow:Step:   740, loss: 1.345, accuary: 0.470
INFO:tensorflow:Step:   760, loss: 1.535, accuary: 0.430
INFO:tensorflow:Step:   780, loss: 1.232, accuary: 0.500
INFO:tensorflow:Step:   800, loss: 1.469, accuary: 0.410
INFO:tensorflow:Step:   820, loss: 1.383, accuary: 0.450
INFO:tensorflow:Step:   840, loss: 1.274, accuary: 0.550
INFO:tensorflow:Step:   860, loss: 1.250, accuary: 0.540
INFO:tensorflow:Step:   880, loss: 1.278, accuary: 0.530
INFO:tensorflow:Step:   900, loss: 1.152, accuary: 0.570
INFO:tensorflow:Step:   920, loss: 1.135, accuary: 0.550
INFO:tensorflow:Step:   940, loss: 1.288, accuary: 0.500
INFO:tensorflow:Step:   960, loss: 1.273, accuary: 0.510
INFO:tensorflow:Step:   980, loss: 1.185, accuary: 0.540
INFO:tensorflow:Step:  1000, loss: 1.297, accuary: 0.580
INFO:tensorflow:Step:  1020, loss: 1.129, accuary: 0.590
INFO:tensorflow:Step:  1040, loss: 0.938, accuary: 0.710
INFO:tensorflow:Step:  1060, loss: 0.975, accuary: 0.640
INFO:tensorflow:Step:  1080, loss: 1.121, accuary: 0.540
INFO:tensorflow:Step:  1100, loss: 0.997, accuary: 0.650
INFO:tensorflow:Step:  1120, loss: 1.076, accuary: 0.570
INFO:tensorflow:Step:  1140, loss: 1.173, accuary: 0.560
INFO:tensorflow:Step:  1160, loss: 0.974, accuary: 0.670
INFO:tensorflow:Step:  1180, loss: 1.051, accuary: 0.630
INFO:tensorflow:Step:  1200, loss: 0.961, accuary: 0.710
INFO:tensorflow:Step:  1220, loss: 1.055, accuary: 0.630
INFO:tensorflow:Step:  1240, loss: 0.930, accuary: 0.650
INFO:tensorflow:Step:  1260, loss: 1.244, accuary: 0.540
INFO:tensorflow:Step:  1280, loss: 1.019, accuary: 0.610
INFO:tensorflow:Step:  1300, loss: 0.796, accuary: 0.750
INFO:tensorflow:Step:  1320, loss: 1.065, accuary: 0.580
INFO:tensorflow:Step:  1340, loss: 0.926, accuary: 0.700
INFO:tensorflow:Step:  1360, loss: 0.931, accuary: 0.670
INFO:tensorflow:Step:  1380, loss: 0.883, accuary: 0.710
INFO:tensorflow:Step:  1400, loss: 0.747, accuary: 0.720
INFO:tensorflow:Step:  1420, loss: 0.811, accuary: 0.670
INFO:tensorflow:Step:  1440, loss: 0.859, accuary: 0.680
INFO:tensorflow:Step:  1460, loss: 0.738, accuary: 0.740
INFO:tensorflow:Step:  1480, loss: 0.781, accuary: 0.730
INFO:tensorflow:Step:  1500, loss: 0.827, accuary: 0.720
INFO:tensorflow:Step:  1520, loss: 0.688, accuary: 0.810
INFO:tensorflow:Step:  1540, loss: 0.795, accuary: 0.670
INFO:tensorflow:Step:  1560, loss: 0.747, accuary: 0.770
INFO:tensorflow:Step:  1580, loss: 0.690, accuary: 0.710
INFO:tensorflow:Step:  1600, loss: 0.922, accuary: 0.740
INFO:tensorflow:Step:  1620, loss: 0.843, accuary: 0.700
INFO:tensorflow:Step:  1640, loss: 0.618, accuary: 0.790
INFO:tensorflow:Step:  1660, loss: 0.777, accuary: 0.760
INFO:tensorflow:Step:  1680, loss: 0.737, accuary: 0.740
INFO:tensorflow:Step:  1700, loss: 0.750, accuary: 0.750
INFO:tensorflow:Step:  1720, loss: 0.602, accuary: 0.780
INFO:tensorflow:Step:  1740, loss: 0.834, accuary: 0.720
INFO:tensorflow:Step:  1760, loss: 0.757, accuary: 0.750
INFO:tensorflow:Step:  1780, loss: 0.713, accuary: 0.750
INFO:tensorflow:Step:  1800, loss: 0.849, accuary: 0.730
INFO:tensorflow:Step:  1820, loss: 0.929, accuary: 0.680
INFO:tensorflow:Step:  1840, loss: 0.625, accuary: 0.770
INFO:tensorflow:Step:  1860, loss: 0.627, accuary: 0.800
INFO:tensorflow:Step:  1880, loss: 0.753, accuary: 0.740
INFO:tensorflow:Step:  1900, loss: 0.747, accuary: 0.770
INFO:tensorflow:Step:  1920, loss: 0.767, accuary: 0.730
INFO:tensorflow:Step:  1940, loss: 0.811, accuary: 0.720
INFO:tensorflow:Step:  1960, loss: 0.593, accuary: 0.840
INFO:tensorflow:Step:  1980, loss: 0.661, accuary: 0.730
INFO:tensorflow:Step:  2000, loss: 0.663, accuary: 0.830
INFO:tensorflow:Step:  2020, loss: 0.803, accuary: 0.770
INFO:tensorflow:Step:  2040, loss: 0.530, accuary: 0.820
INFO:tensorflow:Step:  2060, loss: 0.602, accuary: 0.790
INFO:tensorflow:Step:  2080, loss: 0.555, accuary: 0.800
INFO:tensorflow:Step:  2100, loss: 0.539, accuary: 0.840
INFO:tensorflow:Step:  2120, loss: 0.682, accuary: 0.790
INFO:tensorflow:Step:  2140, loss: 0.396, accuary: 0.860
INFO:tensorflow:Step:  2160, loss: 0.554, accuary: 0.860
INFO:tensorflow:Step:  2180, loss: 0.702, accuary: 0.770
INFO:tensorflow:Step:  2200, loss: 0.630, accuary: 0.760
INFO:tensorflow:Step:  2220, loss: 0.452, accuary: 0.860
INFO:tensorflow:Step:  2240, loss: 0.497, accuary: 0.840
INFO:tensorflow:Step:  2260, loss: 0.381, accuary: 0.870
INFO:tensorflow:Step:  2280, loss: 0.406, accuary: 0.870
INFO:tensorflow:Step:  2300, loss: 0.629, accuary: 0.820
INFO:tensorflow:Step:  2320, loss: 0.681, accuary: 0.840
INFO:tensorflow:Step:  2340, loss: 0.656, accuary: 0.810
INFO:tensorflow:Step:  2360, loss: 0.448, accuary: 0.820
INFO:tensorflow:Step:  2380, loss: 0.591, accuary: 0.820
INFO:tensorflow:Step:  2400, loss: 0.655, accuary: 0.800
INFO:tensorflow:Step:  2420, loss: 0.553, accuary: 0.840
INFO:tensorflow:Step:  2440, loss: 0.575, accuary: 0.800
INFO:tensorflow:Step:  2460, loss: 0.495, accuary: 0.800
INFO:tensorflow:Step:  2480, loss: 0.491, accuary: 0.850
INFO:tensorflow:Step:  2500, loss: 0.386, accuary: 0.860
INFO:tensorflow:Step:  2520, loss: 0.451, accuary: 0.840
INFO:tensorflow:Step:  2540, loss: 0.438, accuary: 0.850
INFO:tensorflow:Step:  2560, loss: 0.618, accuary: 0.800
INFO:tensorflow:Step:  2580, loss: 0.556, accuary: 0.820
INFO:tensorflow:Step:  2600, loss: 0.462, accuary: 0.860
INFO:tensorflow:Step:  2620, loss: 0.509, accuary: 0.800
INFO:tensorflow:Step:  2640, loss: 0.411, accuary: 0.850
INFO:tensorflow:Step:  2660, loss: 0.415, accuary: 0.880
INFO:tensorflow:Step:  2680, loss: 0.445, accuary: 0.880
INFO:tensorflow:Step:  2700, loss: 0.442, accuary: 0.820
INFO:tensorflow:Step:  2720, loss: 0.435, accuary: 0.860
INFO:tensorflow:Step:  2740, loss: 0.372, accuary: 0.870
INFO:tensorflow:Step:  2760, loss: 0.391, accuary: 0.850
INFO:tensorflow:Step:  2780, loss: 0.417, accuary: 0.830
INFO:tensorflow:Step:  2800, loss: 0.485, accuary: 0.850
INFO:tensorflow:Step:  2820, loss: 0.569, accuary: 0.840
INFO:tensorflow:Step:  2840, loss: 0.620, accuary: 0.830
INFO:tensorflow:Step:  2860, loss: 0.428, accuary: 0.860
INFO:tensorflow:Step:  2880, loss: 0.395, accuary: 0.880
INFO:tensorflow:Step:  2900, loss: 0.410, accuary: 0.890
INFO:tensorflow:Step:  2920, loss: 0.427, accuary: 0.860
INFO:tensorflow:Step:  2940, loss: 0.421, accuary: 0.810
INFO:tensorflow:Step:  2960, loss: 0.472, accuary: 0.850
INFO:tensorflow:Step:  2980, loss: 0.369, accuary: 0.880
INFO:tensorflow:Step:  3000, loss: 0.480, accuary: 0.830
INFO:tensorflow:Step:  3020, loss: 0.385, accuary: 0.880
INFO:tensorflow:Step:  3040, loss: 0.322, accuary: 0.880
INFO:tensorflow:Step:  3060, loss: 0.281, accuary: 0.900
INFO:tensorflow:Step:  3080, loss: 0.409, accuary: 0.870
INFO:tensorflow:Step:  3100, loss: 0.278, accuary: 0.940
INFO:tensorflow:Step:  3120, loss: 0.429, accuary: 0.910
INFO:tensorflow:Step:  3140, loss: 0.340, accuary: 0.900
INFO:tensorflow:Step:  3160, loss: 0.434, accuary: 0.920
INFO:tensorflow:Step:  3180, loss: 0.264, accuary: 0.890
INFO:tensorflow:Step:  3200, loss: 0.278, accuary: 0.930
INFO:tensorflow:Step:  3220, loss: 0.415, accuary: 0.860
INFO:tensorflow:Step:  3240, loss: 0.655, accuary: 0.810
INFO:tensorflow:Step:  3260, loss: 0.285, accuary: 0.920
INFO:tensorflow:Step:  3280, loss: 0.322, accuary: 0.920
INFO:tensorflow:Step:  3300, loss: 0.489, accuary: 0.890
INFO:tensorflow:Step:  3320, loss: 0.409, accuary: 0.890
INFO:tensorflow:Step:  3340, loss: 0.263, accuary: 0.900
INFO:tensorflow:Step:  3360, loss: 0.285, accuary: 0.930
INFO:tensorflow:Step:  3380, loss: 0.227, accuary: 0.960
INFO:tensorflow:Step:  3400, loss: 0.471, accuary: 0.850
INFO:tensorflow:Step:  3420, loss: 0.162, accuary: 0.960
INFO:tensorflow:Step:  3440, loss: 0.261, accuary: 0.920
INFO:tensorflow:Step:  3460, loss: 0.241, accuary: 0.920
INFO:tensorflow:Step:  3480, loss: 0.714, accuary: 0.830
INFO:tensorflow:Step:  3500, loss: 0.369, accuary: 0.860
INFO:tensorflow:Step:  3520, loss: 0.280, accuary: 0.930
INFO:tensorflow:Step:  3540, loss: 0.245, accuary: 0.960
INFO:tensorflow:Step:  3560, loss: 0.286, accuary: 0.920
INFO:tensorflow:Step:  3580, loss: 0.273, accuary: 0.940
INFO:tensorflow:Step:  3600, loss: 0.150, accuary: 0.970
INFO:tensorflow:Step:  3620, loss: 0.295, accuary: 0.910
INFO:tensorflow:Step:  3640, loss: 0.249, accuary: 0.900
INFO:tensorflow:Step:  3660, loss: 0.188, accuary: 0.980
INFO:tensorflow:Step:  3680, loss: 0.141, accuary: 0.960
INFO:tensorflow:Step:  3700, loss: 0.185, accuary: 0.950
INFO:tensorflow:Step:  3720, loss: 0.274, accuary: 0.910
INFO:tensorflow:Step:  3740, loss: 0.328, accuary: 0.910
INFO:tensorflow:Step:  3760, loss: 0.218, accuary: 0.940
INFO:tensorflow:Step:  3780, loss: 0.233, accuary: 0.930
INFO:tensorflow:Step:  3800, loss: 0.154, accuary: 0.960
INFO:tensorflow:Step:  3820, loss: 0.250, accuary: 0.920
INFO:tensorflow:Step:  3840, loss: 0.226, accuary: 0.920
INFO:tensorflow:Step:  3860, loss: 0.285, accuary: 0.940
INFO:tensorflow:Step:  3880, loss: 0.250, accuary: 0.950
INFO:tensorflow:Step:  3900, loss: 0.349, accuary: 0.880
INFO:tensorflow:Step:  3920, loss: 0.272, accuary: 0.930
INFO:tensorflow:Step:  3940, loss: 0.342, accuary: 0.890
INFO:tensorflow:Step:  3960, loss: 0.231, accuary: 0.940
INFO:tensorflow:Step:  3980, loss: 0.387, accuary: 0.930
INFO:tensorflow:Step:  4000, loss: 0.158, accuary: 0.980
INFO:tensorflow:Step:  4020, loss: 0.147, accuary: 0.960
INFO:tensorflow:Step:  4040, loss: 0.158, accuary: 0.950
INFO:tensorflow:Step:  4060, loss: 0.286, accuary: 0.920
INFO:tensorflow:Step:  4080, loss: 0.201, accuary: 0.960
INFO:tensorflow:Step:  4100, loss: 0.199, accuary: 0.950
INFO:tensorflow:Step:  4120, loss: 0.254, accuary: 0.940
INFO:tensorflow:Step:  4140, loss: 0.243, accuary: 0.920
INFO:tensorflow:Step:  4160, loss: 0.181, accuary: 0.930
INFO:tensorflow:Step:  4180, loss: 0.208, accuary: 0.930
INFO:tensorflow:Step:  4200, loss: 0.269, accuary: 0.940
INFO:tensorflow:Step:  4220, loss: 0.132, accuary: 0.960
INFO:tensorflow:Step:  4240, loss: 0.230, accuary: 0.930
INFO:tensorflow:Step:  4260, loss: 0.364, accuary: 0.930
INFO:tensorflow:Step:  4280, loss: 0.124, accuary: 0.970
INFO:tensorflow:Step:  4300, loss: 0.186, accuary: 0.960
INFO:tensorflow:Step:  4320, loss: 0.222, accuary: 0.950
INFO:tensorflow:Step:  4340, loss: 0.140, accuary: 0.960
INFO:tensorflow:Step:  4360, loss: 0.209, accuary: 0.940
INFO:tensorflow:Step:  4380, loss: 0.306, accuary: 0.960
INFO:tensorflow:Step:  4400, loss: 0.183, accuary: 0.940
INFO:tensorflow:Step:  4420, loss: 0.263, accuary: 0.950
INFO:tensorflow:Step:  4440, loss: 0.170, accuary: 0.940
INFO:tensorflow:Step:  4460, loss: 0.263, accuary: 0.920
INFO:tensorflow:Step:  4480, loss: 0.124, accuary: 0.980
INFO:tensorflow:Step:  4500, loss: 0.146, accuary: 0.940
INFO:tensorflow:Step:  4520, loss: 0.139, accuary: 0.970
INFO:tensorflow:Step:  4540, loss: 0.243, accuary: 0.950
INFO:tensorflow:Step:  4560, loss: 0.139, accuary: 0.950
INFO:tensorflow:Step:  4580, loss: 0.163, accuary: 0.970
INFO:tensorflow:Step:  4600, loss: 0.188, accuary: 0.960
INFO:tensorflow:Step:  4620, loss: 0.072, accuary: 0.980
INFO:tensorflow:Step:  4640, loss: 0.117, accuary: 0.950
INFO:tensorflow:Step:  4660, loss: 0.217, accuary: 0.950
INFO:tensorflow:Step:  4680, loss: 0.273, accuary: 0.940
INFO:tensorflow:Step:  4700, loss: 0.103, accuary: 0.970
INFO:tensorflow:Step:  4720, loss: 0.226, accuary: 0.940
INFO:tensorflow:Step:  4740, loss: 0.108, accuary: 0.980
INFO:tensorflow:Step:  4760, loss: 0.237, accuary: 0.910
INFO:tensorflow:Step:  4780, loss: 0.170, accuary: 0.960
INFO:tensorflow:Step:  4800, loss: 0.138, accuary: 0.950
INFO:tensorflow:Step:  4820, loss: 0.149, accuary: 0.970
INFO:tensorflow:Step:  4840, loss: 0.145, accuary: 0.970
INFO:tensorflow:Step:  4860, loss: 0.127, accuary: 0.960
INFO:tensorflow:Step:  4880, loss: 0.154, accuary: 0.920
INFO:tensorflow:Step:  4900, loss: 0.077, accuary: 0.970
INFO:tensorflow:Step:  4920, loss: 0.086, accuary: 0.980
INFO:tensorflow:Step:  4940, loss: 0.178, accuary: 0.960
INFO:tensorflow:Step:  4960, loss: 0.123, accuary: 0.960
INFO:tensorflow:Step:  4980, loss: 0.155, accuary: 0.970
INFO:tensorflow:Step:  5000, loss: 0.126, accuary: 0.950
INFO:tensorflow:Step:  5020, loss: 0.163, accuary: 0.960
INFO:tensorflow:Step:  5040, loss: 0.170, accuary: 0.950
INFO:tensorflow:Step:  5060, loss: 0.157, accuary: 0.960
INFO:tensorflow:Step:  5080, loss: 0.142, accuary: 0.970
INFO:tensorflow:Step:  5100, loss: 0.194, accuary: 0.970
INFO:tensorflow:Step:  5120, loss: 0.140, accuary: 0.960
INFO:tensorflow:Step:  5140, loss: 0.079, accuary: 0.990
INFO:tensorflow:Step:  5160, loss: 0.175, accuary: 0.960
INFO:tensorflow:Step:  5180, loss: 0.073, accuary: 0.990
INFO:tensorflow:Step:  5200, loss: 0.232, accuary: 0.950
INFO:tensorflow:Step:  5220, loss: 0.151, accuary: 0.960
INFO:tensorflow:Step:  5240, loss: 0.131, accuary: 0.960
INFO:tensorflow:Step:  5260, loss: 0.164, accuary: 0.970
INFO:tensorflow:Step:  5280, loss: 0.079, accuary: 0.980
INFO:tensorflow:Step:  5300, loss: 0.198, accuary: 0.960
INFO:tensorflow:Step:  5320, loss: 0.219, accuary: 0.980
INFO:tensorflow:Step:  5340, loss: 0.150, accuary: 0.970
INFO:tensorflow:Step:  5360, loss: 0.085, accuary: 0.970
INFO:tensorflow:Step:  5380, loss: 0.334, accuary: 0.960
INFO:tensorflow:Step:  5400, loss: 0.155, accuary: 0.980
INFO:tensorflow:Step:  5420, loss: 0.173, accuary: 0.970
INFO:tensorflow:Step:  5440, loss: 0.276, accuary: 0.890
INFO:tensorflow:Step:  5460, loss: 0.212, accuary: 0.950
INFO:tensorflow:Step:  5480, loss: 0.060, accuary: 0.970
INFO:tensorflow:Step:  5500, loss: 0.036, accuary: 0.990
INFO:tensorflow:Step:  5520, loss: 0.151, accuary: 0.970
INFO:tensorflow:Step:  5540, loss: 0.073, accuary: 0.990
INFO:tensorflow:Step:  5560, loss: 0.201, accuary: 0.950
INFO:tensorflow:Step:  5580, loss: 0.115, accuary: 0.960
INFO:tensorflow:Step:  5600, loss: 0.156, accuary: 0.930
INFO:tensorflow:Step:  5620, loss: 0.115, accuary: 0.950
INFO:tensorflow:Step:  5640, loss: 0.181, accuary: 0.950
INFO:tensorflow:Step:  5660, loss: 0.055, accuary: 0.990
INFO:tensorflow:Step:  5680, loss: 0.163, accuary: 0.950
INFO:tensorflow:Step:  5700, loss: 0.055, accuary: 0.990
INFO:tensorflow:Step:  5720, loss: 0.051, accuary: 0.980
INFO:tensorflow:Step:  5740, loss: 0.116, accuary: 0.990
INFO:tensorflow:Step:  5760, loss: 0.058, accuary: 0.990
INFO:tensorflow:Step:  5780, loss: 0.147, accuary: 0.970
INFO:tensorflow:Step:  5800, loss: 0.205, accuary: 0.940
INFO:tensorflow:Step:  5820, loss: 0.096, accuary: 0.980
INFO:tensorflow:Step:  5840, loss: 0.109, accuary: 0.960
INFO:tensorflow:Step:  5860, loss: 0.181, accuary: 0.940
INFO:tensorflow:Step:  5880, loss: 0.245, accuary: 0.960
INFO:tensorflow:Step:  5900, loss: 0.045, accuary: 0.990
INFO:tensorflow:Step:  5920, loss: 0.087, accuary: 0.970
INFO:tensorflow:Step:  5940, loss: 0.234, accuary: 0.950
INFO:tensorflow:Step:  5960, loss: 0.208, accuary: 0.950
INFO:tensorflow:Step:  5980, loss: 0.205, accuary: 0.930
INFO:tensorflow:Step:  6000, loss: 0.031, accuary: 0.990
INFO:tensorflow:Step:  6020, loss: 0.043, accuary: 0.980
INFO:tensorflow:Step:  6040, loss: 0.174, accuary: 0.970
INFO:tensorflow:Step:  6060, loss: 0.037, accuary: 0.990
INFO:tensorflow:Step:  6080, loss: 0.194, accuary: 0.970
INFO:tensorflow:Step:  6100, loss: 0.216, accuary: 0.960
INFO:tensorflow:Step:  6120, loss: 0.063, accuary: 0.990
INFO:tensorflow:Step:  6140, loss: 0.061, accuary: 0.980
INFO:tensorflow:Step:  6160, loss: 0.136, accuary: 0.960
INFO:tensorflow:Step:  6180, loss: 0.029, accuary: 1.000
INFO:tensorflow:Step:  6200, loss: 0.025, accuary: 1.000
INFO:tensorflow:Step:  6220, loss: 0.031, accuary: 1.000
INFO:tensorflow:Step:  6240, loss: 0.109, accuary: 0.980
INFO:tensorflow:Step:  6260, loss: 0.155, accuary: 0.960
INFO:tensorflow:Step:  6280, loss: 0.066, accuary: 0.980
INFO:tensorflow:Step:  6300, loss: 0.061, accuary: 0.980
INFO:tensorflow:Step:  6320, loss: 0.087, accuary: 0.980
INFO:tensorflow:Step:  6340, loss: 0.073, accuary: 0.990
INFO:tensorflow:Step:  6360, loss: 0.038, accuary: 1.000
INFO:tensorflow:Step:  6380, loss: 0.259, accuary: 0.970
INFO:tensorflow:Step:  6400, loss: 0.057, accuary: 0.980
INFO:tensorflow:Step:  6420, loss: 0.152, accuary: 0.950
INFO:tensorflow:Step:  6440, loss: 0.145, accuary: 0.960
INFO:tensorflow:Step:  6460, loss: 0.161, accuary: 0.960
INFO:tensorflow:Step:  6480, loss: 0.051, accuary: 0.990
INFO:tensorflow:Step:  6500, loss: 0.078, accuary: 0.980
INFO:tensorflow:Step:  6520, loss: 0.082, accuary: 0.980
INFO:tensorflow:Step:  6540, loss: 0.045, accuary: 0.990
INFO:tensorflow:Step:  6560, loss: 0.042, accuary: 0.990
INFO:tensorflow:Step:  6580, loss: 0.066, accuary: 0.980
INFO:tensorflow:Step:  6600, loss: 0.051, accuary: 0.990
INFO:tensorflow:Step:  6620, loss: 0.121, accuary: 0.980
INFO:tensorflow:Step:  6640, loss: 0.069, accuary: 0.970
INFO:tensorflow:Step:  6660, loss: 0.077, accuary: 0.980
INFO:tensorflow:Step:  6680, loss: 0.135, accuary: 0.970
INFO:tensorflow:Step:  6700, loss: 0.072, accuary: 0.970
INFO:tensorflow:Step:  6720, loss: 0.049, accuary: 0.980
INFO:tensorflow:Step:  6740, loss: 0.134, accuary: 0.980
INFO:tensorflow:Step:  6760, loss: 0.035, accuary: 0.990
INFO:tensorflow:Step:  6780, loss: 0.040, accuary: 0.980
INFO:tensorflow:Step:  6800, loss: 0.138, accuary: 0.960
INFO:tensorflow:Step:  6820, loss: 0.059, accuary: 0.980
INFO:tensorflow:Step:  6840, loss: 0.064, accuary: 0.990
INFO:tensorflow:Step:  6860, loss: 0.196, accuary: 0.960
INFO:tensorflow:Step:  6880, loss: 0.178, accuary: 0.980
INFO:tensorflow:Step:  6900, loss: 0.137, accuary: 0.980
INFO:tensorflow:Step:  6920, loss: 0.051, accuary: 0.980
INFO:tensorflow:Step:  6940, loss: 0.274, accuary: 0.950
INFO:tensorflow:Step:  6960, loss: 0.031, accuary: 1.000
INFO:tensorflow:Step:  6980, loss: 0.066, accuary: 0.980
INFO:tensorflow:Step:  7000, loss: 0.013, accuary: 1.000
INFO:tensorflow:Step:  7020, loss: 0.035, accuary: 0.990
INFO:tensorflow:Step:  7040, loss: 0.043, accuary: 0.980
INFO:tensorflow:Step:  7060, loss: 0.063, accuary: 0.980
INFO:tensorflow:Step:  7080, loss: 0.050, accuary: 0.980
INFO:tensorflow:Step:  7100, loss: 0.136, accuary: 0.980
INFO:tensorflow:Step:  7120, loss: 0.265, accuary: 0.950
INFO:tensorflow:Step:  7140, loss: 0.107, accuary: 0.980
INFO:tensorflow:Step:  7160, loss: 0.023, accuary: 1.000
INFO:tensorflow:Step:  7180, loss: 0.019, accuary: 1.000
INFO:tensorflow:Step:  7200, loss: 0.091, accuary: 0.970
INFO:tensorflow:Step:  7220, loss: 0.096, accuary: 0.970
INFO:tensorflow:Step:  7240, loss: 0.018, accuary: 1.000
INFO:tensorflow:Step:  7260, loss: 0.032, accuary: 0.980
INFO:tensorflow:Step:  7280, loss: 0.071, accuary: 0.990
INFO:tensorflow:Step:  7300, loss: 0.128, accuary: 0.970
INFO:tensorflow:Step:  7320, loss: 0.122, accuary: 0.960
INFO:tensorflow:Step:  7340, loss: 0.041, accuary: 0.990
INFO:tensorflow:Step:  7360, loss: 0.045, accuary: 0.980
INFO:tensorflow:Step:  7380, loss: 0.143, accuary: 0.940
INFO:tensorflow:Step:  7400, loss: 0.167, accuary: 0.950
INFO:tensorflow:Step:  7420, loss: 0.104, accuary: 0.970
INFO:tensorflow:Step:  7440, loss: 0.031, accuary: 0.990
INFO:tensorflow:Step:  7460, loss: 0.027, accuary: 1.000
INFO:tensorflow:Step:  7480, loss: 0.181, accuary: 0.940
INFO:tensorflow:Step:  7500, loss: 0.048, accuary: 0.980
INFO:tensorflow:Step:  7520, loss: 0.102, accuary: 0.990
INFO:tensorflow:Step:  7540, loss: 0.094, accuary: 0.970
INFO:tensorflow:Step:  7560, loss: 0.070, accuary: 0.980
INFO:tensorflow:Step:  7580, loss: 0.016, accuary: 0.990
INFO:tensorflow:Step:  7600, loss: 0.167, accuary: 0.990
INFO:tensorflow:Step:  7620, loss: 0.035, accuary: 0.990
INFO:tensorflow:Step:  7640, loss: 0.025, accuary: 0.990
INFO:tensorflow:Step:  7660, loss: 0.140, accuary: 0.970
INFO:tensorflow:Step:  7680, loss: 0.008, accuary: 1.000
INFO:tensorflow:Step:  7700, loss: 0.063, accuary: 0.970
INFO:tensorflow:Step:  7720, loss: 0.058, accuary: 0.980
INFO:tensorflow:Step:  7740, loss: 0.125, accuary: 0.950
INFO:tensorflow:Step:  7760, loss: 0.097, accuary: 0.970
INFO:tensorflow:Step:  7780, loss: 0.050, accuary: 0.980
INFO:tensorflow:Step:  7800, loss: 0.030, accuary: 0.990
INFO:tensorflow:Step:  7820, loss: 0.019, accuary: 1.000
INFO:tensorflow:Step:  7840, loss: 0.093, accuary: 0.980
INFO:tensorflow:Step:  7860, loss: 0.117, accuary: 0.980
INFO:tensorflow:Step:  7880, loss: 0.077, accuary: 0.970
INFO:tensorflow:Step:  7900, loss: 0.174, accuary: 0.970
INFO:tensorflow:Step:  7920, loss: 0.116, accuary: 0.980
INFO:tensorflow:Step:  7940, loss: 0.020, accuary: 1.000
INFO:tensorflow:Step:  7960, loss: 0.113, accuary: 0.960
INFO:tensorflow:Step:  7980, loss: 0.027, accuary: 0.990
INFO:tensorflow:Step:  8000, loss: 0.014, accuary: 0.990
INFO:tensorflow:Step:  8020, loss: 0.025, accuary: 1.000
INFO:tensorflow:Step:  8040, loss: 0.035, accuary: 0.990
INFO:tensorflow:Step:  8060, loss: 0.046, accuary: 0.980
INFO:tensorflow:Step:  8080, loss: 0.083, accuary: 0.970
INFO:tensorflow:Step:  8100, loss: 0.221, accuary: 0.960
INFO:tensorflow:Step:  8120, loss: 0.125, accuary: 0.970
INFO:tensorflow:Step:  8140, loss: 0.021, accuary: 0.990
INFO:tensorflow:Step:  8160, loss: 0.051, accuary: 0.980
INFO:tensorflow:Step:  8180, loss: 0.110, accuary: 0.990
INFO:tensorflow:Step:  8200, loss: 0.095, accuary: 0.970
INFO:tensorflow:Step:  8220, loss: 0.097, accuary: 0.990
INFO:tensorflow:Step:  8240, loss: 0.090, accuary: 0.980
INFO:tensorflow:Step:  8260, loss: 0.072, accuary: 0.990
INFO:tensorflow:Step:  8280, loss: 0.087, accuary: 0.960
INFO:tensorflow:Step:  8300, loss: 0.088, accuary: 0.990
INFO:tensorflow:Step:  8320, loss: 0.057, accuary: 0.970
INFO:tensorflow:Step:  8340, loss: 0.073, accuary: 0.990
INFO:tensorflow:Step:  8360, loss: 0.126, accuary: 0.970
INFO:tensorflow:Step:  8380, loss: 0.017, accuary: 0.990
INFO:tensorflow:Step:  8400, loss: 0.074, accuary: 0.980
INFO:tensorflow:Step:  8420, loss: 0.009, accuary: 1.000
INFO:tensorflow:Step:  8440, loss: 0.041, accuary: 0.980
INFO:tensorflow:Step:  8460, loss: 0.074, accuary: 0.980
INFO:tensorflow:Step:  8480, loss: 0.005, accuary: 1.000
INFO:tensorflow:Step:  8500, loss: 0.050, accuary: 0.990
INFO:tensorflow:Step:  8520, loss: 0.025, accuary: 0.990
INFO:tensorflow:Step:  8540, loss: 0.043, accuary: 0.980
INFO:tensorflow:Step:  8560, loss: 0.011, accuary: 1.000
INFO:tensorflow:Step:  8580, loss: 0.060, accuary: 0.980
INFO:tensorflow:Step:  8600, loss: 0.136, accuary: 0.970
INFO:tensorflow:Step:  8620, loss: 0.167, accuary: 0.950
INFO:tensorflow:Step:  8640, loss: 0.153, accuary: 0.960
INFO:tensorflow:Step:  8660, loss: 0.006, accuary: 1.000
INFO:tensorflow:Step:  8680, loss: 0.069, accuary: 0.980
INFO:tensorflow:Step:  8700, loss: 0.040, accuary: 0.980
INFO:tensorflow:Step:  8720, loss: 0.038, accuary: 0.990
INFO:tensorflow:Step:  8740, loss: 0.018, accuary: 1.000
INFO:tensorflow:Step:  8760, loss: 0.025, accuary: 0.990
INFO:tensorflow:Step:  8780, loss: 0.039, accuary: 0.990
INFO:tensorflow:Step:  8800, loss: 0.005, accuary: 1.000
INFO:tensorflow:Step:  8820, loss: 0.050, accuary: 0.990
INFO:tensorflow:Step:  8840, loss: 0.057, accuary: 0.990
INFO:tensorflow:Step:  8860, loss: 0.145, accuary: 0.960
INFO:tensorflow:Step:  8880, loss: 0.018, accuary: 1.000
INFO:tensorflow:Step:  8900, loss: 0.013, accuary: 1.000
INFO:tensorflow:Step:  8920, loss: 0.011, accuary: 0.990
INFO:tensorflow:Step:  8940, loss: 0.011, accuary: 1.000
INFO:tensorflow:Step:  8960, loss: 0.007, accuary: 1.000
INFO:tensorflow:Step:  8980, loss: 0.057, accuary: 0.990
INFO:tensorflow:Step:  9000, loss: 0.028, accuary: 0.990
INFO:tensorflow:Step:  9020, loss: 0.009, accuary: 1.000
INFO:tensorflow:Step:  9040, loss: 0.038, accuary: 0.990
INFO:tensorflow:Step:  9060, loss: 0.054, accuary: 0.990
INFO:tensorflow:Step:  9080, loss: 0.008, accuary: 1.000
INFO:tensorflow:Step:  9100, loss: 0.006, accuary: 1.000
INFO:tensorflow:Step:  9120, loss: 0.007, accuary: 1.000
INFO:tensorflow:Step:  9140, loss: 0.066, accuary: 0.980
INFO:tensorflow:Step:  9160, loss: 0.019, accuary: 0.990
INFO:tensorflow:Step:  9180, loss: 0.017, accuary: 1.000
INFO:tensorflow:Step:  9200, loss: 0.036, accuary: 0.990
INFO:tensorflow:Step:  9220, loss: 0.136, accuary: 0.980
INFO:tensorflow:Step:  9240, loss: 0.085, accuary: 0.970
INFO:tensorflow:Step:  9260, loss: 0.029, accuary: 0.990
INFO:tensorflow:Step:  9280, loss: 0.016, accuary: 0.990
INFO:tensorflow:Step:  9300, loss: 0.024, accuary: 0.990
INFO:tensorflow:Step:  9320, loss: 0.025, accuary: 0.990
INFO:tensorflow:Step:  9340, loss: 0.013, accuary: 1.000
INFO:tensorflow:Step:  9360, loss: 0.011, accuary: 1.000
INFO:tensorflow:Step:  9380, loss: 0.029, accuary: 0.990
INFO:tensorflow:Step:  9400, loss: 0.017, accuary: 1.000
INFO:tensorflow:Step:  9420, loss: 0.009, accuary: 1.000
INFO:tensorflow:Step:  9440, loss: 0.012, accuary: 1.000
INFO:tensorflow:Step:  9460, loss: 0.024, accuary: 0.990
INFO:tensorflow:Step:  9480, loss: 0.018, accuary: 0.990
INFO:tensorflow:Step:  9500, loss: 0.011, accuary: 1.000
INFO:tensorflow:Step:  9520, loss: 0.005, accuary: 1.000
INFO:tensorflow:Step:  9540, loss: 0.015, accuary: 1.000
INFO:tensorflow:Step:  9560, loss: 0.038, accuary: 0.990
INFO:tensorflow:Step:  9580, loss: 0.125, accuary: 0.980
INFO:tensorflow:Step:  9600, loss: 0.033, accuary: 0.990
INFO:tensorflow:Step:  9620, loss: 0.005, accuary: 1.000
INFO:tensorflow:Step:  9640, loss: 0.026, accuary: 0.990
INFO:tensorflow:Step:  9660, loss: 0.084, accuary: 0.980
INFO:tensorflow:Step:  9680, loss: 0.061, accuary: 0.980
INFO:tensorflow:Step:  9700, loss: 0.072, accuary: 0.990
INFO:tensorflow:Step:  9720, loss: 0.038, accuary: 0.990
INFO:tensorflow:Step:  9740, loss: 0.014, accuary: 1.000
INFO:tensorflow:Step:  9760, loss: 0.020, accuary: 1.000
INFO:tensorflow:Step:  9780, loss: 0.006, accuary: 1.000
INFO:tensorflow:Step:  9800, loss: 0.020, accuary: 0.990
INFO:tensorflow:Step:  9820, loss: 0.103, accuary: 0.970
INFO:tensorflow:Step:  9840, loss: 0.065, accuary: 0.980
INFO:tensorflow:Step:  9860, loss: 0.018, accuary: 1.000
INFO:tensorflow:Step:  9880, loss: 0.009, accuary: 1.000
INFO:tensorflow:Step:  9900, loss: 0.031, accuary: 0.980
INFO:tensorflow:Step:  9920, loss: 0.023, accuary: 0.990
INFO:tensorflow:Step:  9940, loss: 0.003, accuary: 1.000
INFO:tensorflow:Step:  9960, loss: 0.058, accuary: 0.990
INFO:tensorflow:Step:  9980, loss: 0.110, accuary: 0.980
INFO:tensorflow:Step: 10000, loss: 0.009, accuary: 1.000

迭代到10000准确率基本达到要求


http://www.ppmy.cn/news/478601.html

相关文章

cs224d 作业 problem set1 (一) 主要是实现word2vector模型,SGD,CBOW,Softmax,算法

Created on 2017年9月13日author: weizhenimport numpy as np def sigmoid(x):return 1 / (1 np.exp(-x)) 首先上来的是最简单的sigmoid激励函数&#xff0c; 至于为什么选他做激励函数&#xff0c; 1、因为这个函数能将定义域为(-inf,inf)的值映射到(0,1)区间&#xff0c;便于…

Maltab在数学建模中的应用(第二版)——读书笔记上

Maltab在数学建模中的应用&#xff08;第二版&#xff09;——读书笔记上 1.MATLAB与数据文件的交互1.1数据拟合1.2数据拟合实例1.3数据可视化1.4层次分析法 2.规划问题的MATLAB求解&#xff08;多约束线性规划、整数规划、不复杂的多约束非线性规划&#xff09;3.数据建模及MA…

lianxi

代码 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optimtorch.manual_seed(1)# 1) 准备数据 def prepare_sentence(seq, to_ix):idx [to_ix[w] for w in seq]return torch.tensor(idx, dtypetorch.long)training_data [(&qu…

(一)色阶图

1. 前言 色阶图适合二维的数据,而且横轴跟纵轴的标签都比较多。本期的数据: Example data shows concurrent user sessions over time, taken from a development environment. 翻译过来大意就是:展示的是随着时间的推移用户会话并发的个数 数据结构:…

Shell 脚本和编程

shell脚本和编程 Shel基础概念 Shell是一种命令行解释器&#xff0c;是Linux系统中最常用的命令行界面。Shell脚本是由一系列Shell命令组成的文本文件&#xff0c;可以用来自动化执行Linux系统上的任务。Shell脚本是一种强大的工具&#xff0c;可以通过编写脚本来实现自动化运…

jmeter函数助手

详解JMeter函数和变量 测试人员可以在JMeter的选项菜单中找到函数助手对话框&#xff08;"Function Helper"对话框&#xff09;&#xff0c;如图11-1所示。 图11-1 函数助手&#xff08;Function Helper&#xff09;对话框 使用函数助手&#xff0c;测试人员可以…

Vue3 Flask 渐进式入门笔记

以下均在Windows 10环境下实现。 安装Vue3 安装node.js的过程略过。 1、在cmd命令行中执行以下命令&#xff1a; >npm install vue/cli -g2、查看vue版本 >vue -V vue/cli 5.0.8注意&#xff0c;如果电脑中以前有vue2版本&#xff0c;则需要卸载后重启电脑再重新安装…

大数据技术之Hadoop(MapReduceYarn)V3.0 阿善重要

第1章 MapReduce概述1.1 MapReduce定义MapReduce是一个分布式运算程序的编程框架&#xff0c;是用户开发“基于Hadoop的数据分析应用”的核心框架。MapReduce核心功能是将用户编写的业务逻辑代码和自带默认组件整合成一个完整的分布式运算程序&#xff0c;并发运行在一个Hadoop…