本文主要是介绍Tensorflow1.x系列(6): TensorFlow计算加速实现,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
1. GPU实现
# # 通过tf.device将运算指定到特定的设备上。
with tf.device('/cpu:0'):a = tf.constant([1.0, 2.0, 3.0], shape=[3], name='a')b = tf.constant([1.0, 2.0, 3.0], shape=[3], name='b')
with tf.device('/gpu:0'):c = a + bsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
print(sess.run(c))
如果把GPU换成/gpu:1
,就会出错,这是什么原因?难道只能按照顺序来制定GPU?
InvalidArgumentError (see above for traceback): Cannot assign a device for opera
tion 'add': Operation was explicitly assigned to /device:GPU:1 but available dev
ices are [ /job:localhost/replica:0/task:0/device:CPU:0, /job:localhost/replica:
0/task:0/device:GPU:0 ]. Make sure the device specification refers to a valid de
vice.[[Node: add = Add[T=DT_FLOAT, _device="/device:GPU:1"](a, b)]]
还有一个问题,按道理,TensorFlow在device mapping的过程应该使用不同的GPU,该如何查看?
1.2GPU的允许操作
GPU只支持实数型(float16、float32和double类型的参数)
TensorFlow支持使用 allow_soft_placement来将不能放在GPU的操作,自动放置到CPU来进行运算。
2. 分布式TensorFlow
分布式TensorFlow 运行方式分成 :同步模式,异步模式;
两者的区别在于,迭代一次之后,不同服务器的参数反馈是否同步更新。
因此,同步模式存在等待最慢服务器的缺点;异步模式存在无法得到较优训练结果的坏处。
2.1 分布式TensorFlow原理
创建单机器集群
import tensorflow as tf# 创建一个本地集群。
c = tf.constant("Hello, distributed TensorFlow!")
server = tf.train.Server.create_local_server()
sess = tf.Session(server.target)
print(sess.run(c))
运行结果
创建两台机器集群
#coding=utf-8
#local_reload_test.pyimport numpy as np
import tensorflow as tf# Define parameters
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_float('learning_rate', 0.00003, 'Initial learning rate.')# Hyperparameters
learning_rate = FLAGS.learning_ratedef main(_):global_step = tf.Variable(0, name='global_step', trainable=False)input = tf.placeholder("float")label = tf.placeholder("float")weight = tf.get_variable("weight", [1], tf.float32, initializer=tf.random_normal_initializer())biase = tf.get_variable("biase", [1], tf.float32, initializer=tf.random_normal_initializer())pred = tf.multiply(input, weight) + biaseloss_value = loss(label, pred)train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_value, global_step=global_step)init_op = tf.global_variables_initializer()saver = tf.train.Saver()session = tf.Session()ckpt = tf.train.get_checkpoint_state("./checkpoint/")if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):saver.restore(session, ckpt.model_checkpoint_path)global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])print("%s, global_step = %d" % (ckpt.model_checkpoint_path, global_step))else:returnw,b = session.run([weight,biase])print("weight: %f, biase: %f" %(w, b))def loss(label, pred):return tf.square(label - pred)if __name__ == "__main__":tf.app.run()
代码解释:
#coding=utf-8
#distributedimport numpy as np
import tensorflow as tf# Define parameters
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_float('learning_rate', 0.00003, 'Initial learning rate.')
tf.app.flags.DEFINE_integer('steps_to_validate', 1000,'Steps to validate and print loss')# For distributed
tf.app.flags.DEFINE_string("ps_hosts", "","Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("worker_hosts", "","Comma-separated list of hostname:port pairs")
tf.app.flags.DEFINE_string("job_name", "", "One of 'ps', 'worker'")
tf.app.flags.DEFINE_integer("task_index", 0, "Index of task within the job")
tf.app.flags.DEFINE_integer("issync", 0, "是否采用分布式的同步模式,1表示同步模式,0表示异步模式")# Hyperparameters
learning_rate = FLAGS.learning_rate
steps_to_validate = FLAGS.steps_to_validatedef main(_):ps_hosts = FLAGS.ps_hosts.split(",")worker_hosts = FLAGS.worker_hosts.split(",")cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})server = tf.train.Server(cluster,job_name=FLAGS.job_name,task_index=FLAGS.task_index)issync = FLAGS.issyncif FLAGS.job_name == "ps":server.join()elif FLAGS.job_name == "worker":with tf.device(tf.train.replica_device_setter(worker_device="/job:worker/task:%d" % FLAGS.task_index,cluster=cluster)):global_step = tf.Variable(0, name='global_step', trainable=False)input = tf.placeholder("float")label = tf.placeholder("float")weight = tf.get_variable("weight", [1], tf.float32, initializer=tf.random_normal_initializer())biase = tf.get_variable("biase", [1], tf.float32, initializer=tf.random_normal_initializer())pred = tf.multiply(input, weight) + biaseloss_value = loss(label, pred)optimizer = tf.train.GradientDescentOptimizer(learning_rate)grads_and_vars = optimizer.compute_gradients(loss_value)if issync == 1:#同步模式计算更新梯度rep_op = tf.train.SyncReplicasOptimizer(optimizer,replicas_to_aggregate=len(worker_hosts),replica_id=FLAGS.task_index,total_num_replicas=len(worker_hosts),use_locking=True)train_op = rep_op.apply_gradients(grads_and_vars,global_step=global_step)init_token_op = rep_op.get_init_tokens_op()chief_queue_runner = rep_op.get_chief_queue_runner()else:#异步模式计算更新梯度train_op = optimizer.apply_gradients(grads_and_vars,global_step=global_step)init_op = tf.initialize_all_variables()saver = tf.train.Saver()tf.summary.scalar('cost', loss_value)summary_op = tf.summary.merge_all()sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),logdir="./checkpoint/",init_op=init_op,summary_op=None,saver=saver,global_step=global_step,save_model_secs=60)with sv.prepare_or_wait_for_session(server.target) as sess:# 如果是同步模式if FLAGS.task_index == 0 and issync == 1:sv.start_queue_runners(sess, [chief_queue_runner])sess.run(init_token_op)step = 0while step < 1000000:train_x = np.random.randn(1)train_y = 2 * train_x + np.random.randn(1) * 0.33 + 10_, loss_v, step = sess.run([train_op, loss_value,global_step], feed_dict={input:train_x, label:train_y})if step % steps_to_validate == 0:w,b = sess.run([weight,biase])print("step: %d, weight: %f, biase: %f, loss: %f" %(step, w, b, loss_v))sv.stop()def loss(label, pred):return tf.square(label - pred)if __name__ == "__main__":tf.app.run()
设定使用的GPU
在GIT运行
RoFun@RoseFun MINGW64 ~
$ export CUDA_VISIBLE_DEVICES=0
然后运行代码distributed.py
(确保代码在c:\username\user里,或者先在cmd上调出代码运行的路径)
$ CUDA_VISIBLE_DEVICES=0 python distributed.py --ps_hosts=122.225.220.136:2222
参考:
- 分布式TensorFlow;
- tensorflow入门教程之CIFAR-10源代码;
- 分布式TensorFlow_简书;
- Github案例;
- tensorflow gpu使用_ CUDA_VISIBLE_DEVICES
这篇关于Tensorflow1.x系列(6): TensorFlow计算加速实现的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!