本文主要是介绍slim_walkthrough测试,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
目录
环境测试
模块导入
使用TF-Slim创建一个多层感知器
打印变量名
产生批量训练数据
定义张量生成函数
创建模型并训练
添加多种损失函数
导入模型并进行预测
进行模型评估
使用TF-Slim进行基于17flowers数据集的分类器训练
下载数据集
随机显示数据集中的图片
定义CNN网络
将模型应用到一些随机选取的图片
使用创建的CNN网络进行训练
模型训练
模型评估
使用预训练模型
模型下载
使用模型进行预测
使用VGG进行图片分类
VGG模型下载
使用预训练的VGG网络模型进行图片分类
使用模型微调进行在数据集上进行训练
使用微调后的模型进行图片分类
环境测试
运行以下命令,以确认tf.contrib.slim模块安装已安装正确。
python -c "import tensorflow.contrib.slim as slim; eval = slim.evaluation.evaluate_once"
模块导入
from __future__ import absolute_import
from __future__ import division
from __future__ import print_functionimport matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import math
import numpy as np
import tensorflow as tf
import timefrom datasets import dataset_utils# Main slim library
from tensorflow.contrib import slim
使用TF-Slim创建一个多层感知器
def regression_model(inputs, is_training=True, scope="deep_regression"):"""Creates the regression model.参数值:inputs: A node that yields a `Tensor` of size [batch_size, dimensions].is_training: Whether or not we're currently training the model.scope: An optional variable_op scope for the model.返回值:predictions: 1-D `Tensor` of shape [batch_size] of responses.end_points: A dict of end points representing the hidden layers."""with tf.variable_scope(scope, 'deep_regression', [inputs]):end_points = {}# 使用slim.arg_scope为slim.fully_connected设置默认的参数:weight _regularizer和acvitationwith slim.arg_scope([slim.fully_connected],activation_fn=tf.nn.relu,weights_regularizer=slim.l2_regularizer(0.01)):# 创建一个包含32个隐藏节点的全连接层,end_points这个字典用于将隐藏层单元返回net = slim.fully_connected(inputs, 32, scope='fc1')end_points['fc1'] = net# 添加dropout层来防止过拟合net = slim.dropout(net, 0.8, is_training=is_training)# 创建一个包含16个隐藏节点的全连接层net = slim.fully_connected(net, 16, scope='fc2')end_points['fc2'] = net# 创建包含1个隐藏节点的隐藏层。设置激活函数为空使得线性化predictions = slim.fully_connected(net, 1, activation_fn=None, scope='prediction')end_points['out'] = predictionsreturn predictions, end_points
打印变量名
with tf.Graph().as_default():# Dummy placeholders for arbitrary number of 1d inputs and outputsinputs = tf.placeholder(tf.float32, shape=(None, 1))outputs = tf.placeholder(tf.float32, shape=(None, 1))# Build modelpredictions, end_points = regression_model(inputs)# Print name and shape of each tensor.print("Layers")for k, v in end_points.items():print('name = {}, shape = {}'.format(v.name, v.get_shape()))# Print name and shape of parameter nodes (values not yet initialized)print("\n")print("Parameters")for v in slim.get_model_variables():print('name = {}, shape = {}'.format(v.name, v.get_shape()))
产生批量训练数据
def produce_batch(batch_size, noise=0.3):xs = np.random.random(size=[batch_size, 1]) * 10ys = np.sin(xs) + 5 + np.random.normal(size=[batch_size, 1], scale=noise)return [xs.astype(np.float32), ys.astype(np.float32)]x_train, y_train = produce_batch(200)
x_test, y_test = produce_batch(200)
plt.scatter(x_train, y_train)
定义张量生成函数
def convert_data_to_tensors(x, y):inputs = tf.constant(x)inputs.set_shape([None, 1])outputs = tf.constant(y)outputs.set_shape([None, 1])return inputs, outputs
创建模型并训练
# The following snippet trains the regression model using a mean_squared_error loss.
ckpt_dir = '/tmp/regression_model/'with tf.Graph().as_default():tf.logging.set_verbosity(tf.logging.INFO)inputs, targets = convert_data_to_tensors(x_train, y_train)# Make the model.predictions, nodes = regression_model(inputs, is_training=True)# Add the loss function to the graph.loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions)# The total loss is the user's loss plus any regularization losses.total_loss = slim.losses.get_total_loss()# Specify the optimizer and create the train op:optimizer = tf.train.AdamOptimizer(learning_rate=0.005)train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training inside a session.final_loss = slim.learning.train(train_op,logdir=ckpt_dir,number_of_steps=5000,save_summaries_secs=5,log_every_n_steps=500)print("Finished training. Last batch loss:", final_loss)
print("Checkpoint saved in %s" % ckpt_dir)
添加多种损失函数
with tf.Graph().as_default():inputs, targets = convert_data_to_tensors(x_train, y_train)predictions, end_points = regression_model(inputs, is_training=True)# Add multiple loss nodes.mean_squared_error_loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions)absolute_difference_loss = slim.losses.absolute_difference(predictions, targets)# The following two ways to compute the total loss are equivalentregularization_loss = tf.add_n(slim.losses.get_regularization_losses())total_loss1 = mean_squared_error_loss + absolute_difference_loss + regularization_loss# Regularization Loss is included in the total loss by default.# This is good for training, but not for testing.total_loss2 = slim.losses.get_total_loss(add_regularization_losses=True)init_op = tf.global_variables_initializer()with tf.Session() as sess:sess.run(init_op) # Will initialize the parameters with random weights.total_loss1, total_loss2 = sess.run([total_loss1, total_loss2])print('Total Loss1: %f' % total_loss1)print('Total Loss2: %f' % total_loss2)print('Regularization Losses:')for loss in slim.losses.get_regularization_losses():print(loss)print('Loss Functions:')for loss in slim.losses.get_losses():print(loss)
导入模型并进行预测
with tf.Graph().as_default():inputs, targets = convert_data_to_tensors(x_test, y_test)# Create the model structure. (Parameters will be loaded below.)predictions, end_points = regression_model(inputs, is_training=False)# Make a session which restores the old parameters from a checkpoint.sv = tf.train.Supervisor(logdir=ckpt_dir)with sv.managed_session() as sess:inputs, predictions, targets = sess.run([inputs, predictions, targets])plt.scatter(inputs, targets, c='r');
plt.scatter(inputs, predictions, c='b');
plt.title('red=true, blue=predicted')
进行模型评估
with tf.Graph().as_default():inputs, targets = convert_data_to_tensors(x_test, y_test)predictions, end_points = regression_model(inputs, is_training=False)# Specify metrics to evaluate:names_to_value_nodes, names_to_update_nodes = slim.metrics.aggregate_metric_map({'Mean Squared Error': slim.metrics.streaming_mean_squared_error(predictions, targets),'Mean Absolute Error': slim.metrics.streaming_mean_absolute_error(predictions, targets)})# Make a session which restores the old graph parameters, and then run eval.sv = tf.train.Supervisor(logdir=ckpt_dir)with sv.managed_session() as sess:
# metric_values = slim.evaluation.evaluation(sess,
# num_evals=1, # Single pass over data
# eval_op=names_to_update_nodes.values(),
# final_op=names_to_value_nodes.values())metric_values = slim.evaluation.evaluation(sess, num_evals=1, eval_op=names_to_update_nodes.values(), final_op=names_to_value_nodes.values())names_to_values = dict(zip(names_to_value_nodes.keys(), metric_values))for key, value in names_to_values.items():print('%s: %f' % (key, value))
使用TF-Slim进行基于17flowers数据集的分类器训练
下载数据集
import tensorflow as tf
from datasets import dataset_utilsurl = "http://download.tensorflow.org/data/flowers.tar.gz"
flowers_data_dir = '/tmp/flowers'if not tf.gfile.Exists(flowers_data_dir):tf.gfile.MakeDirs(flowers_data_dir)dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir)
随机显示数据集中的图片
# from datasets import flowers
from datasets import cifar10
import tensorflow as tffrom tensorflow.contrib import slimcifar10_data_dir = "./datasets/cifar10/tfrecords"
with tf.Graph().as_default():
# dataset = flowers.get_split('train', flowers_data_dir)dataset = cifar10.get_split('train', cifar10_data_dir)data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset, common_queue_capacity=32, common_queue_min=1)image, label = data_provider.get(['image', 'label'])with tf.Session() as sess: with slim.queues.QueueRunners(sess):for i in range(4):np_image, np_label = sess.run([image, label])height, width, _ = np_image.shapeclass_name = name = dataset.labels_to_names[np_label]plt.figure()plt.imshow(np_image)plt.title('%s, %d x %d' % (name, height, width))plt.axis('off')plt.show()
定义CNN网络
def my_cnn(images, num_classes, is_training): # is_training is not used...with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2):net = slim.conv2d(images, 64, [5, 5])net = slim.max_pool2d(net)net = slim.conv2d(net, 64, [5, 5])net = slim.max_pool2d(net)net = slim.flatten(net)net = slim.fully_connected(net, 192)net = slim.fully_connected(net, num_classes, activation_fn=None) return net
将模型应用到一些随机选取的图片
import tensorflow as tfwith tf.Graph().as_default():# The model can handle any input size because the first layer is convolutional.# The size of the model is determined when image_node is first passed into the my_cnn function.# Once the variables are initialized, the size of all the weight matrices is fixed.# Because of the fully connected layers, this means that all subsequent images must have the same# input size as the first image.batch_size, height, width, channels = 3, 28, 28, 3images = tf.random_uniform([batch_size, height, width, channels], maxval=1)# Create the model.num_classes = 10logits = my_cnn(images, num_classes, is_training=True)probabilities = tf.nn.softmax(logits)# Initialize all the variables (including parameters) randomly.init_op = tf.global_variables_initializer()with tf.Session() as sess:# Run the init_op, evaluate the model outputs and print the results:sess.run(init_op)probabilities = sess.run(probabilities)print('Probabilities Shape:')
print(probabilities.shape) # batch_size x num_classes print('\nProbabilities:')
print(probabilities)print('\nSumming across all classes (Should equal 1):')
print(np.sum(probabilities, 1)) # Each row sums to 1
使用创建的CNN网络进行训练
from preprocessing import inception_preprocessing
import tensorflow as tffrom tensorflow.contrib import slimdef load_batch(dataset, batch_size=32, height=32, width=32, is_training=False):"""Loads a single batch of data.Args:dataset: The dataset to load.batch_size: The number of images in the batch.height: The size of each image after preprocessing.width: The size of each image after preprocessing.is_training: Whether or not we're currently training or evaluating.Returns:images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed.images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization.labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes."""data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset, common_queue_capacity=32,common_queue_min=8)image_raw, label = data_provider.get(['image', 'label'])# Preprocess image for usage by Inception.image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)# Preprocess the image for display purposes.image_raw = tf.expand_dims(image_raw, 0)image_raw = tf.image.resize_images(image_raw, [height, width])image_raw = tf.squeeze(image_raw)# Batch it up.images, images_raw, labels = tf.train.batch([image, image_raw, label],batch_size=batch_size,num_threads=1,capacity=2 * batch_size)return images, images_raw, labels
模型训练
from datasets import cifar10# This might take a few minutes.
train_dir = './tfslim_model/'
cifar10_data_dir = "./datasets/cifar10/tfrecords"
print('Will save model to %s' % train_dir)with tf.Graph().as_default():tf.logging.set_verbosity(tf.logging.INFO)dataset = cifar10.get_split('train', cifar10_data_dir)images, _, labels = load_batch(dataset)# Create the model:logits = my_cnn(images, num_classes=dataset.num_classes, is_training=True)# Specify the loss function:one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)slim.losses.softmax_cross_entropy(logits, one_hot_labels)total_loss = slim.losses.get_total_loss()# Create some summaries to visualize the training process:tf.summary.scalar('losses/Total Loss', total_loss)# Specify the optimizer and create the train op:optimizer = tf.train.AdamOptimizer(learning_rate=0.01)train_op = slim.learning.create_train_op(total_loss, optimizer)# Run the training:final_loss = slim.learning.train(train_op,logdir=train_dir,number_of_steps=100, # For speed, we just do 1 epochsave_summaries_secs=1)print('Finished training. Final batch loss %d' % final_loss)
模型评估
from datasets import cifar10# This might take a few minutes.
with tf.Graph().as_default():tf.logging.set_verbosity(tf.logging.DEBUG)dataset = cifar10.get_split('train', cifar10_data_dir)images, _, labels = load_batch(dataset)logits = my_cnn(images, num_classes=dataset.num_classes, is_training=False)predictions = tf.argmax(logits, 1)# Define the metrics:names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels),'eval/Recall@5': slim.metrics.streaming_recall_at_k(logits, labels, 5),})print(names_to_values.values(), names_to_updates.values())print('Running evaluation Loop...')checkpoint_path = tf.train.latest_checkpoint(train_dir)
# metric_values = slim.evaluation.evaluate_once(
# master='',
# checkpoint_path=checkpoint_path,
# logdir=train_dir,
# eval_op=names_to_updates.values(),
# final_op=names_to_values.values())# names_to_values = dict(zip(names_to_values.keys(), metric_values))
# for name in names_to_values:
# print('%s: %f' % (name, names_to_values[name]))
使用预训练模型
模型下载
from datasets import dataset_utilsurl = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'if not tf.gfile.Exists(checkpoints_dir):tf.gfile.MakeDirs(checkpoints_dir)dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
使用模型进行预测
import numpy as np
import os
import tensorflow as tftry:import urllib2 as urllib
except ImportError:import urllib.request as urllibfrom datasets import imagenet
from nets import inception
from preprocessing import inception_preprocessingfrom tensorflow.contrib import slimimage_size = inception.inception_v1.default_image_sizewith tf.Graph().as_default():url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'image_string = urllib.urlopen(url).read()image = tf.image.decode_jpeg(image_string, channels=3)processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)processed_images = tf.expand_dims(processed_image, 0)# Create the model, use the default arg scope to configure the batch norm parameters.with slim.arg_scope(inception.inception_v1_arg_scope()):logits, _ = inception.inception_v1(processed_images, num_classes=1001, is_training=False)probabilities = tf.nn.softmax(logits)init_fn = slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir, 'inception_v1.ckpt'),slim.get_model_variables('InceptionV1'))with tf.Session() as sess:init_fn(sess)np_image, probabilities = sess.run([image, probabilities])probabilities = probabilities[0, 0:]sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]plt.figure()plt.imshow(np_image.astype(np.uint8))plt.axis('off')plt.show()names = imagenet.create_readable_names_for_imagenet_labels()for i in range(5):index = sorted_inds[i]print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index]))
使用VGG进行图片分类
VGG模型下载
from datasets import dataset_utils
import tensorflow as tfurl = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'if not tf.gfile.Exists(checkpoints_dir):tf.gfile.MakeDirs(checkpoints_dir)dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
使用预训练的VGG网络模型进行图片分类
import numpy as np
import os
import tensorflow as tftry:import urllib2
except ImportError:import urllib.request as urllibfrom datasets import imagenet
from nets import vgg
from preprocessing import vgg_preprocessingfrom tensorflow.contrib import slimimage_size = vgg.vgg_16.default_image_sizewith tf.Graph().as_default():url = 'https://upload.wikimedia.org/wikipedia/commons/d/d9/First_Student_IC_school_bus_202076.jpg'image_string = urllib.urlopen(url).read()image = tf.image.decode_jpeg(image_string, channels=3)processed_image = vgg_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)processed_images = tf.expand_dims(processed_image, 0)# Create the model, use the default arg scope to configure the batch norm parameters.with slim.arg_scope(vgg.vgg_arg_scope()):# 1000 classes instead of 1001.logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False)probabilities = tf.nn.softmax(logits)init_fn = slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir, 'vgg_16.ckpt'),slim.get_model_variables('vgg_16'))with tf.Session() as sess:init_fn(sess)np_image, probabilities = sess.run([image, probabilities])probabilities = probabilities[0, 0:]sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]plt.figure()plt.imshow(np_image.astype(np.uint8))plt.axis('off')plt.show()names = imagenet.create_readable_names_for_imagenet_labels()for i in range(5):index = sorted_inds[i]# Shift the index of a class name by one. print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index+1]))
使用模型微调进行在数据集上进行训练
# Note that this may take several minutes.import osfrom datasets import flowers
from nets import inception
from preprocessing import inception_preprocessingfrom tensorflow.contrib import slim
image_size = inception.inception_v1.default_image_sizedef get_init_fn():"""Returns a function run by the chief worker to warm-start the training."""checkpoint_exclude_scopes=["InceptionV1/Logits", "InceptionV1/AuxLogits"]exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]variables_to_restore = []for var in slim.get_model_variables():for exclusion in exclusions:if var.op.name.startswith(exclusion):breakelse:variables_to_restore.append(var)return slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir, 'inception_v1.ckpt'),variables_to_restore)train_dir = '/tmp/inception_finetuned/'with tf.Graph().as_default():tf.logging.set_verbosity(tf.logging.INFO)dataset = flowers.get_split('train', flowers_data_dir)images, _, labels = load_batch(dataset, height=image_size, width=image_size)# Create the model, use the default arg scope to configure the batch norm parameters.with slim.arg_scope(inception.inception_v1_arg_scope()):logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)# Specify the loss function:one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)slim.losses.softmax_cross_entropy(logits, one_hot_labels)total_loss = slim.losses.get_total_loss()# Create some summaries to visualize the training process:tf.summary.scalar('losses/Total Loss', total_loss)# Specify the optimizer and create the train op:optimizer = tf.train.AdamOptimizer(learning_rate=0.01)train_op = slim.learning.create_train_op(total_loss, optimizer)# Run the training:final_loss = slim.learning.train(train_op,logdir=train_dir,init_fn=get_init_fn(),number_of_steps=2)print('Finished training. Last batch loss %f' % final_loss)
使用微调后的模型进行图片分类
import numpy as np
import tensorflow as tf
from datasets import flowers
from nets import inceptionfrom tensorflow.contrib import slimimage_size = inception.inception_v1.default_image_size
batch_size = 3with tf.Graph().as_default():tf.logging.set_verbosity(tf.logging.INFO)dataset = flowers.get_split('train', flowers_data_dir)images, images_raw, labels = load_batch(dataset, height=image_size, width=image_size)# Create the model, use the default arg scope to configure the batch norm parameters.with slim.arg_scope(inception.inception_v1_arg_scope()):logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)probabilities = tf.nn.softmax(logits)checkpoint_path = tf.train.latest_checkpoint(train_dir)init_fn = slim.assign_from_checkpoint_fn(checkpoint_path,slim.get_variables_to_restore())with tf.Session() as sess:with slim.queues.QueueRunners(sess):sess.run(tf.initialize_local_variables())init_fn(sess)np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])for i in range(batch_size): image = np_images_raw[i, :, :, :]true_label = np_labels[i]predicted_label = np.argmax(np_probabilities[i, :])predicted_name = dataset.labels_to_names[predicted_label]true_name = dataset.labels_to_names[true_label]plt.figure()plt.imshow(image.astype(np.uint8))plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))plt.axis('off')plt.show()
这篇关于slim_walkthrough测试的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!