def test(model, params, x, v_batch, id_batch):
batch_size = x.shape[0]
sample_mu = torch.zeros(batch_size, params.predict_steps, device=params.device)
sample_q90 = torch.zeros(batch_size, params.predict_steps, device=params.device)
src_mask, memory = model.encode(x[:, :params.predict_start,:], id_batch)
for t in range(params.predict_steps):
ys = x[:, params.predict_start:params.predict_start+t+1,:]
out = model.decode(memory, ys, id_batch, src_mask)
q50, q90 = model.generator(out)
if t!=0:
q50 = q50[:, -1]
q90 = q90[:, -1]
sample_mu[:, t] = q50 * v_batch[:, 0] + v_batch[:, 1]
sample_q90[:, t] = q90* v_batch[:, 0]
if t < (params.predict_steps - 1):
x[:, params.predict_steps+t+1, 0] = q50
return sample_mu, sample_q90
- In the loop "for t in range(params.predict_steps):", the loop executes once and returns?
- When forecasting, the value of the prediction range is unknown. Why is ys = x[:, params.predict_start:params.predict_start+t+1,:] is used as the input of the decoder.
- if t < (params.predict_steps - 1): x[:, params.predict_steps+t+1, 0] = q50 What does this step mean?