I am trying to reuse the input image in a conv2d layer in the LeNet example. The reuse_at
primitive works fine with the placeholder inputs (i.e. input_image
in the first conv2d). However when passing the max-pooled result to the second conv2d layer, no reuse pattern was found for it.
import heterocl as hcl
import hlib
import numpy as np
batch_size = 1000
qtype1 = hcl.Fixed(16, 14)
qtype2 = hcl.Fixed(16, 14)
def build_lenet(input_image, weight_conv1, weight_conv2,
weight_fc1, weight_fc2, lenet):
# first conv
conv1 = hlib.nn.conv2d_nchw(input_image, weight_conv1, "conv1")
tanh1 = hlib.nn.tanh(conv1, "tanh1")
pool1 = hlib.nn.max_pool(tanh1, kernel=(2,2), stride=(2,2), name="pool1")
# second conv
conv2 = hlib.nn.conv2d_nchw(pool1, weight_conv2, name="conv2")
tanh2 = hlib.nn.tanh(conv2, "tanh2")
pool2 = hlib.nn.max_pool(tanh2, kernel=(2,2), stride=(2,2))
# first fc
flat = hlib.nn.flatten(pool2)
fc1 = hlib.nn.dense(flat, weight_fc1)
tanh3 = hlib.nn.tanh(fc1, "tanh3")
# second fc
fc2 = hlib.nn.dense(tanh3, weight_fc2)
# loss
return hlib.nn.softmax(lenet, fc2)
input_image = hcl.placeholder((batch_size, 1, 28, 28), "input_image")
weight_conv1 = hcl.placeholder((20, 1, 5, 5), "weight_conv1", qtype1)
weight_conv2 = hcl.placeholder((50, 20, 5, 5), "weight_conv2", qtype1)
weight_fc1 = hcl.placeholder((500, 800), "weight_fc1", qtype1)
weight_fc2 = hcl.placeholder((10, 500), "weight_fc2", qtype1)
lenet = hcl.placeholder((batch_size, 10), "lenet")
s = hcl.create_schedule([input_image, weight_conv1, weight_conv2,
weight_fc1, weight_fc2, lenet], build_lenet)
s[build_lenet.conv1].compute_at(s[build_lenet.tanh1], build_lenet.tanh1.axis[3])
s.reuse_at(input_image, s[build_lenet.conv1], build_lenet.conv1.axis[0])
s.reuse_at(build_lenet.pool1._op, s[build_lenet.conv2], build_lenet.conv2.axis[1])
print(hcl.lower(s))