Hi there!
I was trying to use the RT-1 saved model but encountered a 'Value Error' as shown below. Did I do something wrong with the arguments (i.e., time_step and policy_state)? I am new to TensorFlow and any suggestion will be appreciated!
Many thanks in advance.
ValueError Traceback (most recent call last)
in <cell line: 3>()
1 time_step = create_time_step()
2 init_state = policy.get_initial_state(1)
----> 3 policy_state = policy.action(time_step, init_state)
1 frames
/usr/local/lib/python3.10/dist-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.traceback)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
/usr/local/lib/python3.10/dist-packages/tensorflow/python/saved_model/function_deserialization.py in restored_function_body(*args, **kwargs)
299 "Option {}:\n {}\n Keyword arguments: {}".format(
300 index + 1, _pretty_format_positional(positional), keyword))
--> 301 raise ValueError(
302 "Could not find matching concrete function to call loaded from the "
303 f"SavedModel. Got:\n {_pretty_format_positional(args)}\n Keyword "
ValueError: Could not find matching concrete function to call loaded from the SavedModel. Got:
Positional arguments (3 total):
* TimeStep(
{'discount': <tf.Tensor 'time_step_2:0' shape=(1,) dtype=float32>,
'observation': {'base_pose_tool_reached': <tf.Tensor 'time_step_12:0' shape=(1, 7) dtype=float32>,
'gripper_closed': <tf.Tensor 'time_step_11:0' shape=(1, 1) dtype=float32>,
'gripper_closedness_commanded': <tf.Tensor 'time_step_16:0' shape=(1, 1) dtype=float32>,
'height_to_bottom': <tf.Tensor 'time_step_8:0' shape=(1, 1) dtype=float32>,
'image': <tf.Tensor 'time_step_10:0' shape=(1, 256, 320, 3) dtype=uint8>,
'natural_language_embedding': <tf.Tensor 'time_step_6:0' shape=(1, 512) dtype=float32>,
'natural_language_instruction': <tf.Tensor 'time_step_4:0' shape=(1,) dtype=string>,
'orientation_box': <tf.Tensor 'time_step_13:0' shape=(1, 2, 3) dtype=float32>,
'orientation_start': <tf.Tensor 'time_step_3:0' shape=(1, 4) dtype=float32>,
'robot_orientation_position_box': <tf.Tensor 'time_step_14:0' shape=(1, 3, 3) dtype=float32>,
'rotation_delta_to_go': <tf.Tensor 'time_step_5:0' shape=(1, 3) dtype=float32>,
'src_rotation': <tf.Tensor 'time_step_9:0' shape=(1, 4) dtype=float32>,
'vector_to_go': <tf.Tensor 'time_step_7:0' shape=(1, 3) dtype=float32>,
'workspace_bounds': <tf.Tensor 'time_step_15:0' shape=(1, 3, 3) dtype=float32>},
'reward': <tf.Tensor 'time_step_1:0' shape=(1,) dtype=float32>,
'step_type': <tf.Tensor 'time_step:0' shape=(1,) dtype=int32>})
* {'action_tokens': <tf.Tensor 'policy_state_3:0' shape=(1, 6, 11, 1, 1) dtype=int32>,
'image': <tf.Tensor 'policy_state:0' shape=(1, 6, 256, 320, 3) dtype=uint8>,
'step_num': <tf.Tensor 'policy_state_2:0' shape=(1, 1, 1, 1, 1) dtype=int32>,
't': <tf.Tensor 'policy_state_1:0' shape=(1, 1, 1, 1, 1) dtype=int32>}
* None
Keyword arguments: {}
Expected these arguments to match one of the following 2 option(s):
Option 1:
Positional arguments (3 total):
* TimeStep(step_type=TensorSpec(shape=(None,), dtype=tf.int32, name='step_type'), reward=TensorSpec(shape=(None,), dtype=tf.float32, name='reward'), discount=TensorSpec(shape=(None,), dtype=tf.float32, name='discount'), observation={'orientation_start': TensorSpec(shape=(None, 4), dtype=tf.float32, name='observation/orientation_start'), 'vector_to_go': TensorSpec(shape=(None, 3), dtype=tf.float32, name='observation/vector_to_go'), 'orientation_box': TensorSpec(shape=(None, 2, 3), dtype=tf.float32, name='observation/orientation_box'), 'image': TensorSpec(shape=(None, 256, 320, 3), dtype=tf.uint8, name='observation/image'), 'robot_orientation_positions_box': TensorSpec(shape=(None, 3, 3), dtype=tf.float32, name='observation/robot_orientation_positions_box'), 'natural_language_embedding': TensorSpec(shape=(None, 512), dtype=tf.float32, name='observation/natural_language_embedding'), 'rotation_delta_to_go': TensorSpec(shape=(None, 3), dtype=tf.float32, name='observation/rotation_delta_to_go'), 'natural_language_instruction': TensorSpec(shape=(None,), dtype=tf.string, name='observation/natural_language_instruction'), 'gripper_closedness_commanded': TensorSpec(shape=(None, 1), dtype=tf.float32, name='observation/gripper_closedness_commanded'), 'base_pose_tool_reached': TensorSpec(shape=(None, 7), dtype=tf.float32, name='observation/base_pose_tool_reached'), 'workspace_bounds': TensorSpec(shape=(None, 3, 3), dtype=tf.float32, name='observation/workspace_bounds'), 'src_rotatio...
* {'action_tokens': TensorSpec(shape=(None, 6, 11, 1, 1), dtype=tf.int32, name='action_tokens'),
'image': TensorSpec(shape=(None, 6, 256, 320, 3), dtype=tf.uint8, name='image'),
'step_num': TensorSpec(shape=(None, 1, 1, 1, 1), dtype=tf.int32, name='step_num'),
't': TensorSpec(shape=(None, 1, 1, 1, 1), dtype=tf.int32, name='t')}
* None
Keyword arguments: {}
Option 2:
Positional arguments (3 total):
* TimeStep(step_type=TensorSpec(shape=(None,), dtype=tf.int32, name='time_step_step_type'), reward=TensorSpec(shape=(None,), dtype=tf.float32, name='time_step_reward'), discount=TensorSpec(shape=(None,), dtype=tf.float32, name='time_step_discount'), observation={'base_pose_tool_reached': TensorSpec(shape=(None, 7), dtype=tf.float32, name='time_step_observation_base_pose_tool_reached'), 'workspace_bounds': TensorSpec(shape=(None, 3, 3), dtype=tf.float32, name='time_step_observation_workspace_bounds'), 'image': TensorSpec(shape=(None, 256, 320, 3), dtype=tf.uint8, name='time_step_observation_image'), 'gripper_closedness_commanded': TensorSpec(shape=(None, 1), dtype=tf.float32, name='time_step_observation_gripper_closedness_commanded'), 'orientation_start': TensorSpec(shape=(None, 4), dtype=tf.float32, name='time_step_observation_orientation_start'), 'src_rotation': TensorSpec(shape=(None, 4), dtype=tf.float32, name='time_step_observation_src_rotation'), 'orientation_box': TensorSpec(shape=(None, 2, 3), dtype=tf.float32, name='time_step_observation_orientation_box'), 'height_to_bottom': TensorSpec(shape=(None, 1), dtype=tf.float32, name='time_step_observation_height_to_bottom'), 'rotation_delta_to_go': TensorSpec(shape=(None, 3), dtype=tf.float32, name='time_step_observation_rotation_delta_to_go'), 'gripper_closed': TensorSpec(shape=(None, 1), dtype=tf.float32, name='time_step_observation_gripper_closed'), 'robot_orientation_positions_box': TensorSpec(shape=(None, 3, 3), ...
* {'action_tokens': TensorSpec(shape=(None, 6, 11, 1, 1), dtype=tf.int32, name='policy_state_action_tokens'),
'image': TensorSpec(shape=(None, 6, 256, 320, 3), dtype=tf.uint8, name='policy_state_image'),
'step_num': TensorSpec(shape=(None, 1, 1, 1, 1), dtype=tf.int32, name='policy_state_step_num'),
't': TensorSpec(shape=(None, 1, 1, 1, 1), dtype=tf.int32, name='policy_state_t')}
* None
Keyword arguments: {}
the code snippets are shown as follows:
from tf_agents.policies import py_tf_eager_policy
model_path = os.path.join(os.getcwd(), '../trained_checkpoints/rt1main')
print('model path', model_path)
policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(
model_path=model_path,
load_specs_from_pbtxt=True,
use_tf_function=True)
def create_time_step(seed=123,t=0):
import numpy as np
from tf_agents.trajectories.time_step import StepType, TimeStep
HEIGHT, WIDTH = 256, 320
np.random.seed(seed)
observations = {
'orientation_start': tf.constant(0.0, shape=(1, 4), dtype=tf.dtypes.float32),
'natural_language_instruction': tf.constant('', shape=(1,), dtype=tf.dtypes.string),
'rotation_delta_to_go': tf.constant(0.0, shape=(1, 3), dtype=tf.dtypes.float32),
'natural_language_embedding': tf.constant(0.0, shape=(1, 512), dtype=tf.dtypes.float32),
'vector_to_go': tf.constant(0.0, shape=(1, 3), dtype=tf.dtypes.float32),
'height_to_bottom': tf.constant(0.0, shape=(1,1), dtype=tf.dtypes.float32),
'src_rotation': tf.constant(0.0, shape=(1, 4), dtype=tf.dtypes.float32),
'image': tf.constant(0, shape=(1, 256, 320, 3), dtype=tf.dtypes.uint8),
'gripper_closed': tf.constant(0.0, shape=(1,1), dtype=tf.dtypes.float32),
'base_pose_tool_reached': tf.constant(0.0, shape=(1, 7), dtype=tf.dtypes.float32),
'orientation_box': tf.constant(0.0, shape=(1, 2, 3), dtype=tf.dtypes.float32),
'robot_orientation_position_box': tf.constant(0.0, shape=(1,3,3), dtype=tf.dtypes.float32),
'workspace_bounds':tf.constant(0.0, shape=(1, 3, 3), dtype=tf.dtypes.float32),
'gripper_closedness_commanded': tf.constant(0.0, shape=(1, 1), dtype=tf.dtypes.float32),
}
time_step = TimeStep(
observation=observations,
reward=tf.constant(
0.0, shape=(1, ), dtype=tf.dtypes.float32,
),
discount=tf.constant(
0.0, shape=(1, ), dtype=tf.dtypes.float32,
),
step_type=tf.constant(
t, shape=(1, ), dtype=tf.dtypes.int32,
),
)
return time_step
time_step = create_time_step()
init_state = policy.get_initial_state(1)
policy_state = policy.action(time_step, init_state)