# compute

# compute # part predict the choice, compute the loss value, and train the model by using AdamOptimizer, then save the model status after training.

Fig.1 Schematic of compute layer.

        num_filters_total = self.filter_num2 * len(self.filter_size2)

        h_pool_PA_2 = tf.concat(pooled_outputs_PA_2, 3) ##[batch_size, 1, 1,num_filters_total]
        h_pool_PB_2 = tf.concat(pooled_outputs_PB_2, 3) ##[batch_size, 1, 1,num_filters_total]
        h_pool_PC_2 = tf.concat(pooled_outputs_PC_2, 3) ##[batch_size, 1, 1,num_filters_total]
        h_pool_PD_2 = tf.concat(pooled_outputs_PD_2, 3) ##[batch_size, 1, 1,num_filters_total]
        h_pool_PE_2 = tf.concat(pooled_outputs_PE_2, 3) ##[batch_size, 1, 1,num_filters_total]


        h_pool_flat_PA = tf.squeeze(h_pool_PA_2) ##[batch_size, num_filters_total]
        h_pool_flat_PB = tf.squeeze(h_pool_PB_2) ##[batch_size, num_filters_total]
        h_pool_flat_PC = tf.squeeze(h_pool_PC_2) ##[batch_size, num_filters_total]
        h_pool_flat_PD = tf.squeeze(h_pool_PD_2) ##[batch_size, num_filters_total]
        h_pool_flat_PE = tf.squeeze(h_pool_PE_2) ##[batch_size, num_filters_total]

        result = tf.concat([h_pool_flat_PA,h_pool_flat_PB,h_pool_flat_PC,h_pool_flat_PD,h_pool_flat_PE],0)  #[5*batch_size,num_filters_total]

        with tf.variable_scope('last_DNN_PARAMETER'):
            wtrans = tf.get_variable("wtrans", [num_filters_total,dnn_width],initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))    
            btrans = tf.get_variable("btrans", [dnn_width],initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))    
            wout = tf.get_variable("wout", [dnn_width,1],initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
            bout = tf.get_variable("bout", [1],initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
        y = tf.tanh(tf.matmul(result,wtrans)+btrans) ##(5*batch,dnn_width)
        y = tf.matmul(y,wout)+bout ##(5*batch,1)
        y = tf.transpose(tf.reshape(y,[5,batch_size]),[1,0]) ##(batch,5)

        self.output_logit = tf.nn.softmax(y)
        self.predict_result = tf.argmax(y, dimension=1)


        self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y,labels=self.y_hat))

        global_step = tf.Variable(0, trainable=False)
        self.train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost,global_step=global_step)
        self.saver = tf.train.Saver()
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement = True))

results matching ""

    No results matching ""