main.py 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. import tensorflow as tf
  2. from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
  3. # 设置GPU使用方式
  4. # 获取GPU列表
  5. gpus = tf.config.experimental.list_physical_devices('GPU')
  6. if gpus:
  7. try:
  8. # 设置GPU为增长式占用
  9. for gpu in gpus:
  10. tf.config.experimental.set_memory_growth(gpu, True)
  11. except RuntimeError as e:
  12. # 打印异常
  13. print(e)
  14. (xs, ys),_ = datasets.mnist.load_data()
  15. print('datasets:', xs.shape, ys.shape, xs.min(), xs.max())
  16. batch_size = 32
  17. xs = tf.convert_to_tensor(xs, dtype=tf.float32) / 255.
  18. db = tf.data.Dataset.from_tensor_slices((xs,ys))
  19. db = db.batch(batch_size).repeat(30)
  20. model = Sequential([layers.Dense(256, activation='relu'),
  21. layers.Dense(128, activation='relu'),
  22. layers.Dense(10)])
  23. model.build(input_shape=(4, 28*28))
  24. model.summary()
  25. optimizer = optimizers.SGD(lr=0.01)
  26. acc_meter = metrics.Accuracy()
  27. for step, (x,y) in enumerate(db):
  28. with tf.GradientTape() as tape:
  29. # 打平操作,[b, 28, 28] => [b, 784]
  30. x = tf.reshape(x, (-1, 28*28))
  31. # Step1. 得到模型输出output [b, 784] => [b, 10]
  32. out = model(x)
  33. # [b] => [b, 10]
  34. y_onehot = tf.one_hot(y, depth=10)
  35. # 计算差的平方和,[b, 10]
  36. loss = tf.square(out-y_onehot)
  37. # 计算每个样本的平均误差,[b]
  38. loss = tf.reduce_sum(loss) / x.shape[0]
  39. acc_meter.update_state(tf.argmax(out, axis=1), y)
  40. grads = tape.gradient(loss, model.trainable_variables)
  41. optimizer.apply_gradients(zip(grads, model.trainable_variables))
  42. if step % 200==0:
  43. print(step, 'loss:', float(loss), 'acc:', acc_meter.result().numpy())
  44. acc_meter.reset_states()