大三寒假学习进度(2) tensorflow学习

  • 线性回归问题代码实现

import matplotlib.pyplot as plt
import tensorflow as tf


TRUE_W = 3.0
TRUE_b = 2.0
NUM_SAMPLES = 100

# 初始化随机数据
X = tf.random.normal(shape=[NUM_SAMPLES, 1]).numpy()
noise = tf.random.normal(shape=[NUM_SAMPLES, 1]).numpy()
y = X * TRUE_W + TRUE_b + noise  

plt.scatter(X, y)

# 定义模型
model = tf.keras.layers.Dense(units=1)
plt.scatter(X, y)
plt.plot(X, model(X), c='r')
plt.show()


EPOCHS = 16 #循环次数
LEARNING_RATE = 0.002 #衰减因子
for epoch in range(EPOCHS):  # 迭代次数
    with tf.GradientTape() as tape:  # 追踪梯度
        y_ = model(X)
        loss = tf.reduce_sum(tf.keras.losses.mean_squared_error(y, y_))  # 计算损失

    grads = tape.gradient(loss, model.variables)  # 计算梯度
    optimizer = tf.keras.optimizers.SGD(LEARNING_RATE)  # 随机梯度下降
    optimizer.apply_gradients(zip(grads, model.variables))  # 更新梯度

    print('Epoch [{}/{}], loss [{:.3f}]'.format(epoch, EPOCHS, loss))
    plt.scatter(X, y)
    plt.plot(X, model(X), c='r')