初识Tensorflow2.1

1、命令式编程

1import tensorflow as tf 2# 1.创建输入张量 3a = tf.constant(2.) 4b = tf.constant(4.) 5# 2.直接计算并打印 6print('a+b=',a+b) 7

2、利用GPU加速计算

注:指定运行在CPU上还是GPU上。tensorflow中不同的GPU使用/gpu:0和/gpu:1区分,而CPU不区分设备号,统一使用 /cpu:0

tf.matmul()函数的使用(知识点:创建数组,数组相乘):

1import tensorflow as tf 2import numpy as np 3a = [1,2,3] 4b = np.array([a]) 5c = np.transpose(b) 6d = tf.matmul(b,c) 7print(a,b,c,d) 8 9

 

在/gpu:0上进行数组乘法计算:

1import tensorflow as tf 2import matplotlib 3from matplotlib import pyplot as plt 4import os 5import numpy as np 6os.environ['TF.CPP.MIN.LOG.LEVEL'] = '2' 7n = 100 8with tf.device('/gpu:0'): 9 cpu_a = tf.random.normal([1,n]) 10 cpu_b = tf.random.normal([n,1]) 11 cpu_c = tf.matmul(cpu_a,cpu_b) 12x = [i for i in range(9)] # for 循环产生数组 13print(x,cpu_a,cpu_b,cpu_c) 14cpu_a = np.transpose(cpu_a) 15plt.plot(cpu_a,label='cpu_a') 16plt.plot(cpu_b,label='cpu_b') 17plt.legend() 18plt.show() 19 20

cpu_b如下图所示: 

 测试一:

1import tensorflow as tf 2import matplotlib 3from matplotlib import pyplot as plt 4 5import timeit 6cpu_data = [] 7gpu_data = [] 8for n in range(9): 9 n = 9**n 10 # 创建在 CPU 上运算的 2 个矩阵 11 with tf.device('/cpu:0'): 12 cpu_a = tf.random.normal([1, n]) # 符合正态分布、随机选取 13 cpu_b = tf.random.normal([n, 1]) 14 print(cpu_a.device, cpu_b.device) 15 # 创建使用 GPU 运算的 2 个矩阵 16 with tf.device('/gpu:0'): 17 gpu_a = tf.random.normal([1, n]) 18 gpu_b = tf.random.normal([n, 1]) 19 print(gpu_a.device, gpu_b.device) 20 def cpu_run(): 21 with tf.device('/cpu:0'): 22 c = tf.matmul(cpu_a, cpu_b) # 矩阵相乘 23 return c 24 def gpu_run(): 25 with tf.device('/gpu:0'): 26 c = tf.matmul(gpu_a, gpu_b) 27 return c 28 # 第一次计算需要热身,避免将初始化阶段时间结算在内 29 cpu_time = timeit.timeit(cpu_run, number=10) # 测量运算时间 number为测试次数 30 gpu_time = timeit.timeit(gpu_run, number=10) 31 print('warmup:', cpu_time, gpu_time) 32 # 正式计算 10 次,取平均时间 33 cpu_time = timeit.timeit(cpu_run, number=10) 34 gpu_time = timeit.timeit(gpu_run, number=10) 35 print('run time:', cpu_time, gpu_time) 36 cpu_data.append(cpu_time/10) # 在数组后加上相应的元素 37 gpu_data.append(gpu_time/10) 38 del cpu_a,cpu_b,gpu_a,gpu_b # 删除对应的变量而不是值 39 print(cpu_data) 40 print(gpu_data) 41x = [9**i for i in range(9)] 42cpu_data = [1000*i for i in cpu_data] 43gpu_data = [1000*i for i in gpu_data] 44plt.plot(x, cpu_data, 'C1') 45plt.plot(x, cpu_data, color='C1', marker='s', label='CPU') 46plt.plot(x, gpu_data,'C0') 47plt.plot(x, gpu_data, color='C0', marker='^', label='GPU') 48plt.gca().set_xscale('log') 49plt.gca().set_yscale('log') 50#plt.ylim([0,100]) 51plt.xlabel('矩阵大小n:(1xn)@(nx1)') 52plt.ylabel('运算时间(ms)') 53plt.legend() 54plt.show() 55

 

 对测试一进行一定的修改:

1import tensorflow as tf 2import matplotlib 3from matplotlib import pyplot as plt 4 5import timeit 6cpu_data = [] 7gpu_data = [] 8for n in range(9): 9 n = 9**n 10 11 def cpu_run(): 12 with tf.device('/cpu:0'): 13 cpu_a = tf.random.normal([1, n]) # 符合正态分布、随机选取 14 cpu_b = tf.random.normal([n, 1]) 15 c = tf.matmul(cpu_a, cpu_b) # 矩阵相乘 16 return c 17 def gpu_run(): 18 with tf.device('/gpu:0'): 19 gpu_a = tf.random.normal([1, n]) 20 gpu_b = tf.random.normal([n, 1]) 21 c = tf.matmul(gpu_a, gpu_b) 22 return c 23 # 第一次计算需要热身,避免将初始化阶段时间结算在内 24 cpu_time = timeit.timeit(cpu_run, number=10) # 测量运算时间 number为测试次数 25 gpu_time = timeit.timeit(gpu_run, number=10) 26 print('warmup:', cpu_time, gpu_time) 27 # 正式计算 10 次,取平均时间 28 cpu_time = timeit.timeit(cpu_run, number=10) 29 gpu_time = timeit.timeit(gpu_run, number=10) 30 print('run time:', cpu_time, gpu_time) 31 cpu_data.append(cpu_time/10) # 在数组后加上相应的元素 32 gpu_data.append(gpu_time/10) 33 #del cpu_a,cpu_b,gpu_a,gpu_b # 删除对应的变量而不是值 34 print(cpu_data) 35 print(gpu_data) 36x = [9**i for i in range(9)] 37cpu_data = [1000*i for i in cpu_data] 38gpu_data = [1000*i for i in gpu_data] 39plt.plot(x, cpu_data, 'C1') 40plt.plot(x, cpu_data, color='C1', marker='s', label='CPU') 41plt.plot(x, gpu_data,'C0') 42plt.plot(x, gpu_data, color='C0', marker='^', label='GPU') 43plt.gca().set_xscale('log') 44plt.gca().set_yscale('log') 45#plt.ylim([0,100]) 46plt.xlabel('矩阵大小n:(1xn)@(nx1)') 47plt.ylabel('运算时间(ms)') 48plt.legend() 49plt.show() 50

 

 

1import tensorflow as tf 2import matplotlib 3from matplotlib import pyplot as plt 4import os 5os.environ['TF.CPP.MIN.LOG.LEVEL'] = '2' 6n = 100 7with tf.device('/gpu:0'): 8 cpu_a = tf.random.normal([1,n]) 9 cpu_b = tf.random.normal([n,1]) 10 cpu_c = tf.matmul(cpu_a,cpu_b) 11x = [i for i in range(9)] # for 循环产生数组 12print(x,cpu_a,cpu_b,cpu_c) 13# plt.plot(cpu_a,label='cpu_a') 14plt.plot(cpu_b,label='cpu_a') 15plt.show() 16 17import tensorflow as tf 18# 创建 4 个张量 19a = tf.constant(1.) 20b = tf.constant(2.) 21c = tf.constant(3.) 22w = tf.constant(4.) 23with tf.GradientTape() as tape:# 构建梯度环境 24 tape.watch([w]) # 将 w 加入梯度跟踪列表 25 # 构建计算过程 26 y = a * w**2 + b * w + c 27# 求导 28[dy_dw] = tape.gradient(y, [w]) 29print(dy_dw) # 打印出导数 30print()aprint(a) 31

输出结果:

 

代码交流 2021