forward_layer.py 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. import os
  2. os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
  3. import tensorflow as tf
  4. from tensorflow import keras
  5. from tensorflow.keras import layers, optimizers, datasets
  6. (x, y), (x_val, y_val) = datasets.mnist.load_data()
  7. x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
  8. y = tf.convert_to_tensor(y, dtype=tf.int32)
  9. y = tf.one_hot(y, depth=10)
  10. print(x.shape, y.shape)
  11. train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
  12. train_dataset = train_dataset.batch(200)
  13. model = keras.Sequential([
  14. layers.Dense(512, activation='relu'),
  15. layers.Dense(256, activation='relu'),
  16. layers.Dense(10)])
  17. optimizer = optimizers.SGD(learning_rate=0.001)
  18. def train_epoch(epoch):
  19. # Step4.loop
  20. for step, (x, y) in enumerate(train_dataset):
  21. with tf.GradientTape() as tape:
  22. # [b, 28, 28] => [b, 784]
  23. x = tf.reshape(x, (-1, 28*28))
  24. # Step1. compute output
  25. # [b, 784] => [b, 10]
  26. out = model(x)
  27. # Step2. compute loss
  28. loss = tf.reduce_sum(tf.square(out - y)) / x.shape[0]
  29. # Step3. optimize and update w1, w2, w3, b1, b2, b3
  30. grads = tape.gradient(loss, model.trainable_variables)
  31. # w' = w - lr * grad
  32. optimizer.apply_gradients(zip(grads, model.trainable_variables))
  33. if step % 100 == 0:
  34. print(epoch, step, 'loss:', loss.numpy())
  35. def train():
  36. for epoch in range(30):
  37. train_epoch(epoch)
  38. if __name__ == '__main__':
  39. train()