欢迎光临
我们一直在努力

tensorflow像识别框架,tensorflowcnn使用方法

基于TensorFlow实现的CNN神经网络 花卉识别系统Demo Demo展示登录与注册主页面模型训练识别 神经网络训练Demo下载

Demo展示 登录与注册

主页面

模型训练

识别

神经网络

定义CNN网络结构
卷积神经网络,卷积加池化2,全连接2,softmax分类
关键代码:

# 定义函数infence,定义CNN网络结构# 卷积神经网络,卷积加池化*2,全连接*2,softmax分类# 卷积层1def inference(images, batch_size, n_classes): with tf.variable_scope(‘conv1′) as scope: weights = tf.Variable(tf.truncated_normal(shape=[3, 3, 3, 64], stddev=1.0, dtype=tf.float32), name=’weights’, dtype=tf.float32) biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[64]), name=’biases’, dtype=tf.float32) conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding=’SAME’) pre_activation = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) # 池化层1 # 3×3最大池化,步长strides为2,池化后执行lrn()操作,局部响应归一化,对训练有利。 with tf.variable_scope(‘pooling1_lrn’) as scope: pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=’SAME’, name=’pooling1′) norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=’norm1′) # 卷积层2 # 16个3×3的卷积核(16通道),padding=’SAME’,表示padding后卷积的图与原图尺寸一致,激活函数relu() with tf.variable_scope(‘conv2′) as scope: weights = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 16], stddev=0.1, dtype=tf.float32), name=’weights’, dtype=tf.float32) biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[16]), name=’biases’, dtype=tf.float32) conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding=’SAME’) pre_activation = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(pre_activation, name=’conv2′) # 池化层2 # 3×3最大池化,步长strides为2,池化后执行lrn()操作, # pool2 and norm2 with tf.variable_scope(‘pooling2_lrn’) as scope: norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=’norm2′) pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding=’SAME’, name=’pooling2′) # 全连接层3 # 128个神经元,将之前pool层的输出reshape成一行,激活函数relu() with tf.variable_scope(‘local3′) as scope: reshape = tf.reshape(pool2, shape=[batch_size, -1]) dim = reshape.get_shape()[1].value weights = tf.Variable(tf.truncated_normal(shape=[dim, 128], stddev=0.005, dtype=tf.float32), name=’weights’, dtype=tf.float32) biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[128]), name=’biases’, dtype=tf.float32) local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) # 全连接层4 # 128个神经元,激活函数relu() with tf.variable_scope(‘local4′) as scope: weights = tf.Variable(tf.truncated_normal(shape=[128, 128], stddev=0.005, dtype=tf.float32), name=’weights’, dtype=tf.float32) biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[128]), name=’biases’, dtype=tf.float32) local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=’local4′) # dropout层 # with tf.variable_scope(‘dropout’) as scope: # drop_out = tf.nn.dropout(local4, 0.8) # Softmax回归层 # 将前面的FC层输出,做一个线性回归,计算出每一类的得分 with tf.variable_scope(‘softmax_linear’) as scope: weights = tf.Variable(tf.truncated_normal(shape=[128, n_classes], stddev=0.005, dtype=tf.float32), name=’softmax_linear’, dtype=tf.float32) biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[n_classes]), name=’biases’, dtype=tf.float32) softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=’softmax_linear’) return softmax_linear# —————————————————————————–# loss计算# 传入参数:logits,网络计算输出值。labels,真实值,在这里是0或者1# 返回参数:loss,损失值def losses(logits, labels): with tf.variable_scope(‘loss’) as scope: cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name=’xentropy_per_example’) loss = tf.reduce_mean(cross_entropy, name=’loss’) tf.summary.scalar(scope.name + ‘/loss’, loss) return loss# ————————————————————————–# loss损失值优化# 输入参数:loss。learning_rate,学习速率。# 返回参数:train_op,训练op,这个参数要输入sess.run中让模型去训练。def trainning(loss, learning_rate): with tf.name_scope(‘optimizer’): optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) global_step = tf.Variable(0, name=’global_step’, trainable=False) train_op = optimizer.minimize(loss, global_step=global_step) return train_op# ———————————————————————–# 评价/准确率计算# 输入参数:logits,网络计算值。labels,标签,也就是真实值,在这里是0或者1。# 返回参数:accuracy,当前step的平均准确率,也就是在这些batch中多少张图片被正确分类了。def evaluation(logits, labels): with tf.variable_scope(‘accuracy’) as scope: correct = tf.nn.in_top_k(logits, labels, 1) correct = tf.cast(correct, tf.float16) accuracy = tf.reduce_mean(correct) tf.summary.scalar(scope.name + ‘/accuracy’, accuracy) return accuracy 训练

关键代码:

class Train:path, train_dir, logs_train_dir = None, None, Nonedef __init__(self):self.path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))self.train_dir = self.path + ‘/input_data’ # 训练样本的读入路径self.logs_train_dir = self.path + ‘/save’ # logs存储路径def train(self, BATCH_SIZE=20, MAX_STEP=1000, learning_rate=0.0001):# 变量声明N_CLASSES = 4 # 四种花类型IMG_W = 64 # resize图像,太大的话训练时间久IMG_H = 64CAPACITY = 200# 获取批次batch# train, train_label = input_data.get_files(train_dir)train, train_label, val, val_label = input_data.get_files(self.train_dir, 0.3)# 训练数据及标签train_batch, train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)# 测试数据及标签val_batch, val_label_batch = input_data.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)# 训练操作定义train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)train_loss = model.losses(train_logits, train_label_batch)train_op = model.trainning(train_loss, learning_rate)train_acc = model.evaluation(train_logits, train_label_batch)# 测试操作定义test_logits = model.inference(val_batch, BATCH_SIZE, N_CLASSES)test_loss = model.losses(test_logits, val_label_batch)test_acc = model.evaluation(test_logits, val_label_batch)# 这个是log汇总记录summary_op = tf.summary.merge_all()# 产生一个会话sess = tf.Session()# 产生一个writer来写log文件train_writer = tf.summary.FileWriter(self.logs_train_dir, sess.graph)# 产生一个saver来存储训练好的模型saver = tf.train.Saver()# 所有节点初始化sess.run(tf.initialize_all_variables())# 队列监控coord = tf.train.Coordinator()threads = tf.train.start_queue_runners(sess=sess, coord=coord)# 进行batch的训练try:print(‘批次为:{},步数为:{},学习率为:{}’.format(BATCH_SIZE, MAX_STEP, learning_rate))# 执行MAX_STEP步的训练,一步一个batchfor step in np.arange(MAX_STEP + 1):if coord.should_stop():break_, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])# 每隔50步打印一次当前的loss以及acc,同时记录log,写入writerif step % 10 == 0:print(‘步数:%d, loss:%.2f, 训练准确率:%.2f%%’ % (step, tra_loss, tra_acc * 100.0))summary_str = sess.run(summary_op)train_writer.add_summary(summary_str, step)# 每隔100步,保存一次训练好的模型if (step) == MAX_STEP:checkpoint_path = os.path.join(self.logs_train_dir, ‘model.ckpt’)saver.save(sess, checkpoint_path, global_step=step)except tf.errors.OutOfRangeError:print(‘到达训练上限,训练完成’)finally:coord.request_stop()if __name__ == ‘__main__’:Train().train() Demo下载

大家喜欢的话,希望可以star一下,谢谢。
https://github.com/JJJiangYH/Flower-Distinguish

19674000

赞(0)
【声明】:本博客不参与任何交易,也非中介,仅记录个人感兴趣的主机测评结果和优惠活动,内容均不作直接、间接、法定、约定的保证。访问本博客请务必遵守有关互联网的相关法律、规定与规则。一旦您访问本博客,即表示您已经知晓并接受了此声明通告。