diff --git a/droidlet/dialog/post_process_logical_form.py b/droidlet/dialog/post_process_logical_form.py index e4aff37ae8..6674fa80b3 100644 --- a/droidlet/dialog/post_process_logical_form.py +++ b/droidlet/dialog/post_process_logical_form.py @@ -3,6 +3,7 @@ """ import copy + # move location inside reference_object for Fill and Destroy actions def fix_fill_and_destroy_location(action_dict): action_name = action_dict["action_type"] diff --git a/droidlet/interpreter/craftassist/block_handler.py b/droidlet/interpreter/craftassist/block_handler.py index 485b8e2469..9920176495 100644 --- a/droidlet/interpreter/craftassist/block_handler.py +++ b/droidlet/interpreter/craftassist/block_handler.py @@ -5,6 +5,7 @@ import random import Levenshtein + # TODO FILTERS! def get_block_type(s, block_data_info, color_bid_map) -> Tuple: """string -> (id, meta) diff --git a/droidlet/interpreter/craftassist/tasks.py b/droidlet/interpreter/craftassist/tasks.py index 62a8f07aef..d4d82752be 100644 --- a/droidlet/interpreter/craftassist/tasks.py +++ b/droidlet/interpreter/craftassist/tasks.py @@ -214,7 +214,7 @@ def step(self): # replace blocks if possible R = self.replace.copy() self.replace.clear() - for (pos, idm) in R: + for pos, idm in R: agent.set_held_item(idm) if agent.place_block(*pos): logging.debug("Move: replaced {}".format((pos, idm))) @@ -254,7 +254,7 @@ def handle_no_path(self, agent): x, y, z = newpos newpos_blocks = agent.get_blocks(x, x, y, y + 1, z, z) # dig if necessary - for (bp, idm) in npy_to_blocks_list(newpos_blocks, newpos): + for bp, idm in npy_to_blocks_list(newpos_blocks, newpos): self.replace.add((bp, idm)) agent.dig(*bp) # move diff --git a/droidlet/interpreter/interpret_comparators.py b/droidlet/interpreter/interpret_comparators.py index b5a03caf69..cf770b14a3 100644 --- a/droidlet/interpreter/interpret_comparators.py +++ b/droidlet/interpreter/interpret_comparators.py @@ -7,6 +7,7 @@ from droidlet.memory.memory_values import FilterValue from droidlet.memory.memory_attributes import ComparatorAttribute + # TODO distance between # TODO make this more modular. what if we want to redefine just distance_between in a new agent? def interpret_comparator(interpreter, speaker, d, is_condition=True): diff --git a/droidlet/interpreter/interpret_reference_objects.py b/droidlet/interpreter/interpret_reference_objects.py index a4dc0bd419..ce33415c79 100644 --- a/droidlet/interpreter/interpret_reference_objects.py +++ b/droidlet/interpreter/interpret_reference_objects.py @@ -187,7 +187,6 @@ def interpret_reference_object( _, clarification_task_mems = interpreter.memory.basic_search(clarification_query) # does a clarification task referencing this interpreter exist? if not clarification_task_mems: - mems = maybe_get_text_span_mems(interpreter, speaker, d) if mems: update_attended_and_link_lf(interpreter, mems) diff --git a/droidlet/interpreter/robot/default_behaviors.py b/droidlet/interpreter/robot/default_behaviors.py index 4309d58c13..72942827be 100644 --- a/droidlet/interpreter/robot/default_behaviors.py +++ b/droidlet/interpreter/robot/default_behaviors.py @@ -42,6 +42,7 @@ def init_logger(): init_logger() + # TODO: Move these utils to a suitable place - as a class method in TripleNode def add_or_replace(agent, pred_text, obj_text): memids, _ = agent.memory.basic_search(f"SELECT uuid FROM Triple WHERE pred_text={pred_text}") diff --git a/droidlet/interpreter/robot/task.py b/droidlet/interpreter/robot/task.py index c13694983a..032f0aa549 100644 --- a/droidlet/interpreter/robot/task.py +++ b/droidlet/interpreter/robot/task.py @@ -1,6 +1,8 @@ """ Copyright (c) Facebook, Inc. and its affiliates. """ + + # put a counter and a max_count so can't get stuck? class Task(object): def __init__(self, featurizer=None): diff --git a/droidlet/interpreter/robot/tasks.py b/droidlet/interpreter/robot/tasks.py index 4d2553ce7d..d1437ca1af 100644 --- a/droidlet/interpreter/robot/tasks.py +++ b/droidlet/interpreter/robot/tasks.py @@ -468,6 +468,7 @@ def step(self): # Get a list of current detections objects = DetectedObjectNode.get_all(self.agent.memory) pos = self.agent.mover.get_base_pos_in_canonical_coords() + # pick all from unexamined, in-sight object def pick_random_in_sight(objects, base_pos): for x in objects: diff --git a/droidlet/lowlevel/hello_robot/remote/record_aggregate_metrics_and_videos.py b/droidlet/lowlevel/hello_robot/remote/record_aggregate_metrics_and_videos.py index 8004e8ceeb..02ee65a380 100644 --- a/droidlet/lowlevel/hello_robot/remote/record_aggregate_metrics_and_videos.py +++ b/droidlet/lowlevel/hello_robot/remote/record_aggregate_metrics_and_videos.py @@ -94,7 +94,7 @@ def record_video(image_filenames, image_timestamps, video_filename, fps=30, real out = cv2.VideoWriter(video_filename, cv2.VideoWriter_fourcc(*"mp4v"), fps, size) if realtime: prev_timestamp = 0 - for (timestamp, image) in zip(image_timestamps, images): + for timestamp, image in zip(image_timestamps, images): frame_repeats = round((timestamp - prev_timestamp) * fps) for _ in range(frame_repeats): out.write(image) diff --git a/droidlet/lowlevel/hello_robot/remote/remote_hello_saver.py b/droidlet/lowlevel/hello_robot/remote/remote_hello_saver.py index de6bbfa404..3175a13831 100644 --- a/droidlet/lowlevel/hello_robot/remote/remote_hello_saver.py +++ b/droidlet/lowlevel/hello_robot/remote/remote_hello_saver.py @@ -36,7 +36,6 @@ def return_paths(self, id_): return img_folder, img_folder_dbg, depth_folder, lidar_folder, data_file def create_dirs(self, id_): - img_folder, img_folder_dbg, depth_folder, lidar_folder, data_file = self.return_paths(id_) for x in [img_folder, img_folder_dbg, depth_folder, lidar_folder]: diff --git a/droidlet/lowlevel/hello_robot/remote/stretch_ros_move_api.py b/droidlet/lowlevel/hello_robot/remote/stretch_ros_move_api.py index 5dbf21a8d2..28a7f1ba83 100755 --- a/droidlet/lowlevel/hello_robot/remote/stretch_ros_move_api.py +++ b/droidlet/lowlevel/hello_robot/remote/stretch_ros_move_api.py @@ -133,7 +133,6 @@ def stop(self): return result def background_loop(self): - rospy.Subscriber( "/stretch/joint_states", JointState, self._joint_states_callback, queue_size=1 ) diff --git a/droidlet/lowlevel/hello_robot/remote/utils.py b/droidlet/lowlevel/hello_robot/remote/utils.py index ec111ac30d..0e2f3dfb18 100644 --- a/droidlet/lowlevel/hello_robot/remote/utils.py +++ b/droidlet/lowlevel/hello_robot/remote/utils.py @@ -65,7 +65,6 @@ def is_obstacle_ahead(dist, depth_fn): # Check if a significantly large obstacle is present and filter out smaller noisy regions if np.sum(mask) / 255.0 > 0.01 * mask.shape[0] * mask.shape[1]: - image_gray = cv2.cvtColor(cv2.bitwise_and(rgb, rgb, mask=mask), cv2.COLOR_BGR2GRAY) edges = cv2.Canny(image_gray, 100, 200) edges = cv2.dilate(edges, None) diff --git a/droidlet/lowlevel/locobot/remote/policy/utils/model.py b/droidlet/lowlevel/locobot/remote/policy/utils/model.py index 19bdfbf6aa..88d391063f 100644 --- a/droidlet/lowlevel/locobot/remote/policy/utils/model.py +++ b/droidlet/lowlevel/locobot/remote/policy/utils/model.py @@ -26,7 +26,6 @@ def forward(self, x): # https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/model.py#L82 class NNBase(nn.Module): def __init__(self, recurrent, recurrent_input_size, hidden_size): - super(NNBase, self).__init__() self._hidden_size = hidden_size self._recurrent = recurrent diff --git a/droidlet/lowlevel/locobot/remote/pyrobot/habitat/base_control_utils.py b/droidlet/lowlevel/locobot/remote/pyrobot/habitat/base_control_utils.py index 3f0ec4342c..d467a49890 100644 --- a/droidlet/lowlevel/locobot/remote/pyrobot/habitat/base_control_utils.py +++ b/droidlet/lowlevel/locobot/remote/pyrobot/habitat/base_control_utils.py @@ -25,7 +25,6 @@ class LocalActionServer(object): """docstring for LocalActionServer""" def __init__(self): - self._lock = threading.RLock() self._state = LocalActionStatus.UNKOWN diff --git a/droidlet/lowlevel/minecraft/pyworld/run_world.py b/droidlet/lowlevel/minecraft/pyworld/run_world.py index b71f8095d7..4b1bcb4586 100644 --- a/droidlet/lowlevel/minecraft/pyworld/run_world.py +++ b/droidlet/lowlevel/minecraft/pyworld/run_world.py @@ -52,7 +52,6 @@ def block_generator(world): if __name__ == "__main__": - ticker = Ticker(tick_rate=0.01, step_rate=0.2, ip="localhost", port=6002) ticker_thread = Thread(target=ticker.start, args=()) ticker_thread.start() diff --git a/droidlet/lowlevel/minecraft/pyworld/world.py b/droidlet/lowlevel/minecraft/pyworld/world.py index 9f0ffe4e94..7353db294a 100644 --- a/droidlet/lowlevel/minecraft/pyworld/world.py +++ b/droidlet/lowlevel/minecraft/pyworld/world.py @@ -242,7 +242,7 @@ def blocks_to_dict(self): def get_idm_at_locs(self, xyzs: Sequence[XYZ]) -> Dict[XYZ, IDM]: """Return the ground truth block state""" d = {} - for (x, y, z) in xyzs: + for x, y, z in xyzs: B = self.get_blocks(x, x, y, y, z, z) d[(x, y, z)] = tuple(B[0, 0, 0, :]) return d diff --git a/droidlet/lowlevel/minecraft/small_scenes_with_shapes.py b/droidlet/lowlevel/minecraft/small_scenes_with_shapes.py index f11795b8e6..34f2b339fa 100644 --- a/droidlet/lowlevel/minecraft/small_scenes_with_shapes.py +++ b/droidlet/lowlevel/minecraft/small_scenes_with_shapes.py @@ -265,7 +265,6 @@ def build_shape_scene(args): fence = getattr(args, "fence", False) blocks = build_base_world(args.SL, args.H, args.GROUND_DEPTH, fence=fence) if args.iglu_scenes: - with open(args.iglu_scenes, "rb") as f: assets = pickle.load(f) sid = np.random.choice(list(assets.keys())) diff --git a/droidlet/lowlevel/test/test_transforms3d.py b/droidlet/lowlevel/test/test_transforms3d.py index 5527060079..f15fd92a9b 100644 --- a/droidlet/lowlevel/test/test_transforms3d.py +++ b/droidlet/lowlevel/test/test_transforms3d.py @@ -71,5 +71,4 @@ def test_open3d_pcd_transform(self): if __name__ == "__main__": - unittest.main() diff --git a/droidlet/memory/craftassist/mc_attributes.py b/droidlet/memory/craftassist/mc_attributes.py index f580122b87..f3f1754df3 100644 --- a/droidlet/memory/craftassist/mc_attributes.py +++ b/droidlet/memory/craftassist/mc_attributes.py @@ -3,6 +3,7 @@ """ from droidlet.memory.memory_filters import Attribute + # TODO sqlize these? # FIXME!!!!! rn this will not accurately count voxels in # InstSeg objects with given properties; diff --git a/droidlet/memory/craftassist/mc_memory.py b/droidlet/memory/craftassist/mc_memory.py index 1dfcde46a9..aa546e57f0 100644 --- a/droidlet/memory/craftassist/mc_memory.py +++ b/droidlet/memory/craftassist/mc_memory.py @@ -290,7 +290,7 @@ def update(self, perception_output: namedtuple = None, areas_to_perceive: List = # 5. Update the state of the world when a block is changed. if perception_output.changed_block_attributes: - for (xyz, idm) in perception_output.changed_block_attributes: + for xyz, idm in perception_output.changed_block_attributes: # 5.1 Update old instance segmentation if needed self.maybe_remove_inst_seg(xyz) @@ -642,7 +642,7 @@ def _load_mob_types(self, mobs, mob_property_data, load_mob_types=True): return mob_name_to_properties = mob_property_data.get("name_to_properties", {}) - for (name, m) in mobs.items(): + for name, m in mobs.items(): type_name = "spawn " + name # load single mob as schematics diff --git a/droidlet/memory/craftassist/mc_memory_nodes.py b/droidlet/memory/craftassist/mc_memory_nodes.py index 8ce3283321..8caf4960ea 100644 --- a/droidlet/memory/craftassist/mc_memory_nodes.py +++ b/droidlet/memory/craftassist/mc_memory_nodes.py @@ -723,7 +723,7 @@ def create(cls, memory, blocks: Sequence[Block]) -> str: >>> create(memory, blocks) """ memid = cls.new(memory) - for ((x, y, z), (b, m)) in blocks: + for (x, y, z), (b, m) in blocks: memory.db_write( """ INSERT INTO Schematics(uuid, x, y, z, bid, meta) diff --git a/droidlet/memory/memory_filters.py b/droidlet/memory/memory_filters.py index 5407fdf8ef..65a744531a 100644 --- a/droidlet/memory/memory_filters.py +++ b/droidlet/memory/memory_filters.py @@ -13,6 +13,7 @@ ### in various filter interpreters #################################################################################### + # attribute has function signature list(mems) --> list(value) class Attribute: def __init__(self, memory): diff --git a/droidlet/memory/place_field.py b/droidlet/memory/place_field.py index af4808da02..fe3b777955 100644 --- a/droidlet/memory/place_field.py +++ b/droidlet/memory/place_field.py @@ -116,7 +116,7 @@ def sync_traversible(self, locs, h=0): # overwrite traversibility map from slam service self.maps[h]["map"][:] = 0 self.maps[h]["updated"][:] = self.get_time() - for (x, z) in locs: + for x, z in locs: i, j = self.real2map(x, z, h) s = max(i - self.map_size + 1, j - self.map_size + 1, -i, -j) if s > 0: diff --git a/droidlet/perception/craftassist/low_level_perception.py b/droidlet/perception/craftassist/low_level_perception.py index 357b53870a..67c7162a2f 100644 --- a/droidlet/perception/craftassist/low_level_perception.py +++ b/droidlet/perception/craftassist/low_level_perception.py @@ -104,7 +104,7 @@ def perceive(self, force=False): ) # Changed blocks and their attributes perceive_info["changed_block_attributes"] = {} - for (xyz, idm) in self.agent.safe_get_changed_blocks(): + for xyz, idm in self.agent.safe_get_changed_blocks(): interesting, player_placed, agent_placed = self.on_block_changed( xyz, idm, boring_blocks ) diff --git a/droidlet/perception/craftassist/shape_transforms.py b/droidlet/perception/craftassist/shape_transforms.py index bd5cf7824a..09af14a47a 100644 --- a/droidlet/perception/craftassist/shape_transforms.py +++ b/droidlet/perception/craftassist/shape_transforms.py @@ -83,6 +83,7 @@ def moment_at_center(npy, sl): ## THICKEN ############################################# + # this doesn't preserve corners. should it? # separate deltas per dim? def thicker_blocks(blocks, delta=1): @@ -198,7 +199,7 @@ def scale_sparse(blocks, lams=(1.0, 1.0, 1.0)): cell_szs = szs / big_szs big_szs = big_szs.astype("int32") big = np.zeros(tuple(big_szs) + (2,)).astype("int32") - for (x, y, z) in inp_dict.keys(): + for x, y, z in inp_dict.keys(): for i in range(flint(x * lams[0]), ceint(x * lams[0]) + 2): for j in range(flint(y * lams[1]), ceint(y * lams[1]) + 2): for k in range(flint(z * lams[2]), ceint(z * lams[2]) + 2): diff --git a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/coco.py b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/coco.py index 6803c468fe..57fac6f224 100644 --- a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/coco.py +++ b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/coco.py @@ -138,7 +138,6 @@ def get_in_coco_format(self, idx): def make_coco_transforms(image_set): - normalize = T.Compose( [T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])] ) diff --git a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/coco_panoptic.py b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/coco_panoptic.py index 2b5e456f80..7cfe75fc0d 100644 --- a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/coco_panoptic.py +++ b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/coco_panoptic.py @@ -78,7 +78,6 @@ def get_height_and_width(self, idx): def make_coco_panoptic_transforms(image_set): - normalize = T.Compose( [ T.ToTensor(), diff --git a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/house.py b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/house.py index b2fc895b66..ec7806d66f 100644 --- a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/house.py +++ b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/house.py @@ -81,6 +81,7 @@ def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32): # TODO cut outliers + # TODO simplify def fit_in_sidelength(schematic, labels=None, nothing_id=0, sl=32, max_shift=0): schematic, labels = pad_to_sidelength( @@ -104,7 +105,6 @@ def fit_in_sidelength(schematic, labels=None, nothing_id=0, sl=32, max_shift=0): def make_example_from_raw(schematic, labels=None, augment={}, nothing_id=0, sl=32): - max_shift = augment.get("max_shift", 0) s, l, o = fit_in_sidelength( schematic, labels=labels, nothing_id=nothing_id, max_shift=max_shift diff --git a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/lvis.py b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/lvis.py index 451986b02d..7765d57609 100644 --- a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/lvis.py +++ b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/lvis.py @@ -270,7 +270,6 @@ def get_in_coco_format(self, idx): def make_lvis_transforms(image_set): - normalize = T.Compose( [T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])] ) diff --git a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/voc.py b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/voc.py index 907ebcd976..6ce4ea4ea1 100644 --- a/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/voc.py +++ b/droidlet/perception/craftassist/voxel_models/detection-transformer/datasets/voc.py @@ -32,7 +32,6 @@ def get_in_coco_format(self, idx: int): def make_voc_transforms(image_set, remove_difficult): - normalize = T.Compose( [T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])] ) diff --git a/droidlet/perception/craftassist/voxel_models/detection-transformer/models/semseg.py b/droidlet/perception/craftassist/voxel_models/detection-transformer/models/semseg.py index ce3e79a3bc..71ab519db5 100644 --- a/droidlet/perception/craftassist/voxel_models/detection-transformer/models/semseg.py +++ b/droidlet/perception/craftassist/voxel_models/detection-transformer/models/semseg.py @@ -100,7 +100,6 @@ def fit_in_sidelength(schematic, labels=None, nothing_id=0, sl=32, max_shift=0): def make_example_from_raw(schematic, labels=None, augment={}, nothing_id=0, sl=32): - max_shift = augment.get("max_shift", 0) s, l, o = fit_in_sidelength( schematic, labels=labels, nothing_id=nothing_id, max_shift=max_shift diff --git a/droidlet/perception/craftassist/voxel_models/semantic_segmentation/data_loaders.py b/droidlet/perception/craftassist/voxel_models/semantic_segmentation/data_loaders.py index 5482f51f4d..bdd9be4c0f 100644 --- a/droidlet/perception/craftassist/voxel_models/semantic_segmentation/data_loaders.py +++ b/droidlet/perception/craftassist/voxel_models/semantic_segmentation/data_loaders.py @@ -83,6 +83,7 @@ def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32): # TODO cut outliers + # TODO simplify def fit_in_sidelength(schematic, labels=None, nothing_id=0, sl=32, max_shift=0): """Adjust schematics to the center of the padded one""" diff --git a/droidlet/perception/robot/handlers/label_propagate.py b/droidlet/perception/robot/handlers/label_propagate.py index 458ad1e08e..2e947a3921 100644 --- a/droidlet/perception/robot/handlers/label_propagate.py +++ b/droidlet/perception/robot/handlers/label_propagate.py @@ -21,6 +21,7 @@ CAMERA_HEIGHT = 0.6 trans = np.array([0, 0, CAMERA_HEIGHT]) + # TODO: Consolidate camera intrinsics and their associated utils across locobot and habitat. def compute_uvone(height, width): intrinsic_mat_inv = np.linalg.inv(intrinsic_mat) diff --git a/droidlet/perception/semantic_parsing/load_and_check_datasets.py b/droidlet/perception/semantic_parsing/load_and_check_datasets.py index 5bf9a1c7b0..4879a76c6b 100644 --- a/droidlet/perception/semantic_parsing/load_and_check_datasets.py +++ b/droidlet/perception/semantic_parsing/load_and_check_datasets.py @@ -8,7 +8,7 @@ def get_ground_truth(no_ground_truth, ground_truth_data_dir): if not no_ground_truth: if os.path.isdir(ground_truth_data_dir): gt_data_directory = ground_truth_data_dir + "datasets/" - for (dirpath, dirnames, filenames) in os.walk(gt_data_directory): + for dirpath, dirnames, filenames in os.walk(gt_data_directory): for f_name in filenames: file = gt_data_directory + f_name with open(file) as f: diff --git a/droidlet/perception/semantic_parsing/nsp_templated_data_generation/build_scene.py b/droidlet/perception/semantic_parsing/nsp_templated_data_generation/build_scene.py index 804b66ebac..63780bc09c 100644 --- a/droidlet/perception/semantic_parsing/nsp_templated_data_generation/build_scene.py +++ b/droidlet/perception/semantic_parsing/nsp_templated_data_generation/build_scene.py @@ -190,7 +190,7 @@ def get_good_ad(template_attributes, flat=False): if len(r) > 0: allowed_blocks = template_attributes.get("allowed_blocktypes") if allowed_blocks: - for (_, btype) in r: + for _, btype in r: if btype not in allowed_blocks: new_btype = random.choice(allowed_blocks) text = text.replace(btype, new_btype) @@ -507,7 +507,6 @@ def build_scene(template_attributes, sl=32, flat=False): if __name__ == "__main__": - template_attributes = {"count": range(1, 5)} template_attributes["step"] = range(1, 10) template_attributes["non_shape_names"] = list(SPECIAL_SHAPES_CANONICALIZE.keys()) diff --git a/droidlet/perception/semantic_parsing/nsp_templated_data_generation/generate_data/action_node.py b/droidlet/perception/semantic_parsing/nsp_templated_data_generation/generate_data/action_node.py index b10b8469ea..539e890ce4 100644 --- a/droidlet/perception/semantic_parsing/nsp_templated_data_generation/generate_data/action_node.py +++ b/droidlet/perception/semantic_parsing/nsp_templated_data_generation/generate_data/action_node.py @@ -222,7 +222,6 @@ def substitute_with_spans(action_description_split, d): if action_dict.get("answer_type", None): if action_dict["answer_type"] == "TAG" and "tag_name" in action_dict: - # fix for tag values to new tag_val = action_dict["tag_name"] if tag_val == "action_reference_object_name": diff --git a/droidlet/perception/semantic_parsing/nsp_templated_data_generation/generate_data/human_bot_dialogue.py b/droidlet/perception/semantic_parsing/nsp_templated_data_generation/generate_data/human_bot_dialogue.py index f583e5dab5..7c538d14f6 100644 --- a/droidlet/perception/semantic_parsing/nsp_templated_data_generation/generate_data/human_bot_dialogue.py +++ b/droidlet/perception/semantic_parsing/nsp_templated_data_generation/generate_data/human_bot_dialogue.py @@ -114,7 +114,6 @@ def _generate_description(self): for j, templ in enumerate(self.template): result = [] for i, key in enumerate(templ): - # get the text from template object item = key.generate_description(arg_index=0, index=i, templ_index=j) @@ -221,7 +220,6 @@ def _generate_description(self): for j, templ in enumerate(self.template): result = [] for i, key in enumerate(templ): - # get the text from template object item = key.generate_description(arg_index=0, index=i, templ_index=j) diff --git a/droidlet/perception/semantic_parsing/nsp_templated_data_generation/template_objects/block_object.py b/droidlet/perception/semantic_parsing/nsp_templated_data_generation/template_objects/block_object.py index d17b9756c5..0669395e68 100644 --- a/droidlet/perception/semantic_parsing/nsp_templated_data_generation/template_objects/block_object.py +++ b/droidlet/perception/semantic_parsing/nsp_templated_data_generation/template_objects/block_object.py @@ -11,6 +11,7 @@ ### BLOCKOBJECT TEMPLATES ### ############################# + # TODO: refactor this function. def define_block_object_type( template_obj, diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_add_return_quantity.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_add_return_quantity.py index 5648cd69fc..e03a7a9551 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_add_return_quantity.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_add_return_quantity.py @@ -38,7 +38,7 @@ def update_data(folder): file called : f_name + "_new.txt" (templated.txt -> templated_new.txt) """ f = [] - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: if f_name == "templated_modify.txt": continue diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_dance_type.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_dance_type.py index 947f7ec11a..072ad720f4 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_dance_type.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_dance_type.py @@ -70,7 +70,7 @@ def update_data(folder): file called : f_name + "_new.txt" (templated.txt -> templated_new.txt) """ f = [] - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: if f_name == "templated_modify.txt": continue diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_location.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_location.py index 1ba9fcd9c9..c8d85f1129 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_location.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_location.py @@ -39,7 +39,7 @@ def update_data(folder): performs update on the dataset and writes output to a new file called : f_name + "_new.txt" (templated.txt -> templated_new.txt) """ - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: action_names = {} count = 0 diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_schematics.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_schematics.py index 5f7cee0ab2..ae78bae331 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_schematics.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_filters_in_schematics.py @@ -51,7 +51,7 @@ def update_data(folder): file called : f_name + "_new.txt" (templated.txt -> templated_new.txt) """ f = [] - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: if f_name == "templated_modify.txt": continue diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_all_refactor.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_all_refactor.py index 7b03986e4a..b0d651e959 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_all_refactor.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_all_refactor.py @@ -102,7 +102,7 @@ def update_data(folder): file called : f_name + "_new.txt" (templated.txt -> templated_new.txt) """ f = [] - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: count = 0 if f_name == "templated_modify.txt": diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_in_action.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_in_action.py index 1528ebecc1..025d497772 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_in_action.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_in_action.py @@ -18,7 +18,7 @@ def update_data(folder): file called : f_name + "_new.txt" (templated.txt -> templated_new.txt) """ f = [] - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: action_names = {} count = 0 diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_in_filters.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_in_filters.py index c26b724332..0a0af630fd 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_in_filters.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_repeat_in_filters.py @@ -20,7 +20,7 @@ def update_schematic(folder): """ f = [] action_list = ["DIG", "BUILD"] - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: count = 0 count1 = 0 @@ -93,7 +93,7 @@ def update_reference_object(folder): """ f = [] action_list = ["DESTROY", "SPAWN", "BUILD", "FILL", "GET", "SCOUT", "OTHERACTION"] - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: count = 0 count1 = 0 diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_schematics_in_fill.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_schematics_in_fill.py index a991d9a6dc..31238fc755 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_schematics_in_fill.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/code_mod_schematics_in_fill.py @@ -17,7 +17,7 @@ def update_data(folder): """ f = [] - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: count = 0 if f_name == "templated_modify.txt": diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/post_process_update_fixed_values.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/post_process_update_fixed_values.py index d7683f55d6..04b726ac07 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/post_process_update_fixed_values.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/data_processing_scripts/post_processing_code_mods/post_process_update_fixed_values.py @@ -115,7 +115,7 @@ def update_data(folder): performs update on the dataset and writes output to a new file called : f_name + "_new.txt" (templated.txt -> templated_new.txt) """ - for (dirpath, dirnames, filenames) in walk(folder): + for dirpath, dirnames, filenames in walk(folder): for f_name in filenames: if f_name == "templated_modify.txt": continue diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/label_smoothing_loss.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/label_smoothing_loss.py index ad762513a2..b66378aa4c 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/label_smoothing_loss.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/label_smoothing_loss.py @@ -1,6 +1,7 @@ import torch import torch.nn as nn + # -------------------------- # Label smoothing loss # -------------------------- diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_bert.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_bert.py index 2d75239068..029bb26729 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_bert.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_bert.py @@ -633,7 +633,6 @@ def forward( all_expert_layer_outputs = sum_of_experts if getattr(self.config, "gradient_checkpointing", False) and self.training: - if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " @@ -1200,7 +1199,6 @@ def forward( class BertLMHeadModel(BertPreTrainedModel): - _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] @@ -1353,7 +1351,6 @@ def _reorder_cache(self, past, beam_idx): class BertForMaskedLM(BertPreTrainedModel): - _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] @@ -1721,7 +1718,6 @@ def forward( class BertForTokenClassification(BertPreTrainedModel): - _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): @@ -1800,7 +1796,6 @@ def forward( class BertForQuestionAnswering(BertPreTrainedModel): - _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_utils.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_utils.py index a5d26bb656..facde9fc4f 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_utils.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_utils.py @@ -150,7 +150,7 @@ class ModuleUtilsMixin: def _hook_rss_memory_pre_forward(module, *args, **kwargs): try: import psutil - except (ImportError): + except ImportError: raise ImportError( "You need to install psutil (pip install psutil) to use memory tracing." ) @@ -164,7 +164,7 @@ def _hook_rss_memory_pre_forward(module, *args, **kwargs): def _hook_rss_memory_post_forward(module, *args, **kwargs): try: import psutil - except (ImportError): + except ImportError: raise ImportError( "You need to install psutil (pip install psutil) to use memory tracing." ) @@ -1306,7 +1306,6 @@ def from_pretrained( @classmethod def _load_state_dict_into_model(cls, model, state_dict, pretrained_model_name_or_path): - # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] diff --git a/droidlet/perception/semantic_parsing/nsp_transformer_model/utils_caip.py b/droidlet/perception/semantic_parsing/nsp_transformer_model/utils_caip.py index ee6ca12052..332a7d2e85 100644 --- a/droidlet/perception/semantic_parsing/nsp_transformer_model/utils_caip.py +++ b/droidlet/perception/semantic_parsing/nsp_transformer_model/utils_caip.py @@ -6,6 +6,7 @@ import torch from .tokenization_utils import fixed_span_values + ######### # Node typing: checking the type of a specific sub-tree (dict value) ######### diff --git a/droidlet/perception/semantic_parsing/utils/interaction_logger.py b/droidlet/perception/semantic_parsing/utils/interaction_logger.py index bb70532b9c..6698b3a8b0 100644 --- a/droidlet/perception/semantic_parsing/utils/interaction_logger.py +++ b/droidlet/perception/semantic_parsing/utils/interaction_logger.py @@ -16,7 +16,6 @@ def __init__(self): """ def logInteraction(self, data): - """Log interaction data. args: diff --git a/droidlet/shared_data_struct/rotation.py b/droidlet/shared_data_struct/rotation.py index fab21ff727..711ba8e078 100644 --- a/droidlet/shared_data_struct/rotation.py +++ b/droidlet/shared_data_struct/rotation.py @@ -47,6 +47,7 @@ "UP": np.array([0, 1, 0]), } + # FIXME add the xz_only option for mc also, shouldn't use yaw for determining "up" def transform(direction, yaw, pitch, inverted=False, xz_only=False): """Coordinate transforms with respect to current yaw/pitch of the viewer direction diff --git a/droidlet/tools/artifact_scripts/fetch_artifacts_from_aws.py b/droidlet/tools/artifact_scripts/fetch_artifacts_from_aws.py index 3b6f36d7f4..585f6f98cc 100644 --- a/droidlet/tools/artifact_scripts/fetch_artifacts_from_aws.py +++ b/droidlet/tools/artifact_scripts/fetch_artifacts_from_aws.py @@ -9,6 +9,7 @@ ROOTDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../") print("Rootdir : %r" % ROOTDIR) + # downloader with progress-bar # CC-by-SA: https://stackoverflow.com/a/53877507 class DownloadProgressBar(tqdm): diff --git a/droidlet/tools/crowdsourcing/sync_whitelists.py b/droidlet/tools/crowdsourcing/sync_whitelists.py index a9c9511222..54ad73953e 100644 --- a/droidlet/tools/crowdsourcing/sync_whitelists.py +++ b/droidlet/tools/crowdsourcing/sync_whitelists.py @@ -81,7 +81,6 @@ def import_s3_lists(bucket: str): def add_workers_to_quals(add_list: list, qual: str): - for turker in add_list: # First add the worker to the database, or retrieve them if they already exist try: @@ -154,7 +153,6 @@ def update_lists(bucket: str, diff_dict: dict): for t in diff_dict.keys(): for l in diff_dict[t].keys(): for e in diff_dict[t][l].keys(): - if e == "s3_exclusive" and len(diff_dict[t][l][e]) > 0: add_workers_to_quals(diff_dict[t][l][e], qual_dict[t][l]) diff --git a/droidlet/tools/crowdsourcing/turk_as_oncall/static_run_with_qual.py b/droidlet/tools/crowdsourcing/turk_as_oncall/static_run_with_qual.py index b7acd94700..fdbb32615f 100644 --- a/droidlet/tools/crowdsourcing/turk_as_oncall/static_run_with_qual.py +++ b/droidlet/tools/crowdsourcing/turk_as_oncall/static_run_with_qual.py @@ -25,7 +25,6 @@ @task_script(default_config_file="run_with_qual") def main(operator: Operator, cfg: DictConfig) -> None: - shared_state = SharedStaticTaskState( qualifications=[ make_qualification_dict(ALLOWLIST_QUALIFICATION, QUAL_EXISTS, None), diff --git a/droidlet/tools/crowdsourcing/vision_annotation_task/pilot_stats.py b/droidlet/tools/crowdsourcing/vision_annotation_task/pilot_stats.py index 0786d32829..5b386390f0 100644 --- a/droidlet/tools/crowdsourcing/vision_annotation_task/pilot_stats.py +++ b/droidlet/tools/crowdsourcing/vision_annotation_task/pilot_stats.py @@ -1,9 +1,9 @@ -#%% +# %% """ Get stats and plot for vision annotation pilot tasks """ -#%% +# %% from numpy import Inf, Infinity import json import math @@ -17,7 +17,8 @@ from mephisto.tools.data_browser import DataBrowser from mephisto.data_model.worker import Worker -#%% + +# %% def check_run_status(run_id: int, qual_name: str) -> None: db = LocalMephistoDB() units = db.find_units(task_run_id=run_id) @@ -88,7 +89,7 @@ def check_run_status(run_id: int, qual_name: str) -> None: pass -#%% +# %% def retrieve_units(run_id: int) -> list: db = LocalMephistoDB() units = db.find_units(task_run_id=run_id) @@ -99,7 +100,7 @@ def retrieve_units(run_id: int) -> list: return completed_units -#%% +# %% def increment_dict(dict: dict, key: str) -> dict: temp_dict = dict if key not in temp_dict: @@ -109,7 +110,7 @@ def increment_dict(dict: dict, key: str) -> dict: return temp_dict -#%% +# %% def plot_OS_browser(run_id: int) -> None: completed_units = retrieve_units(run_id) db = LocalMephistoDB() @@ -138,7 +139,7 @@ def plot_OS_browser(run_id: int) -> None: return -#%% +# %% def timing_charts(run_id: int) -> None: completed_units = retrieve_units(run_id) db = LocalMephistoDB() @@ -199,7 +200,7 @@ def timing_charts(run_id: int) -> None: print(feedback) -#%% +# %% def hit_timing( content: dict, starttime: int, endtime: int, unit_timing: dict ) -> Tuple[int, int, dict]: @@ -214,7 +215,7 @@ def hit_timing( return starttime, endtime, unit_timing -#%% +# %% def calc_percentiles(data: list, label: str) -> None: real_data = [x for x in data if x > 0] tenth = np.percentile(real_data, 10) @@ -225,7 +226,7 @@ def calc_percentiles(data: list, label: str) -> None: print(f"{label} nintieth percentile: {nintieth:.1f}") -#%% +# %% def plot_hist( dictionary: dict, ylabel: str, @@ -244,7 +245,7 @@ def plot_hist( plt.show() -#%% +# %% def plot_hist_sorted( values: list, ylabel: str, @@ -261,7 +262,7 @@ def plot_hist_sorted( plot_hist(vals_dict, target_val=target_val, xlabel=xlabel, ylabel=ylabel, ymax=ymax) -#%% +# %% def plot_scatter( xs: list, ys: list, diff --git a/droidlet/tools/crowdsourcing/vision_annotation_task/run_annotation_with_qual.py b/droidlet/tools/crowdsourcing/vision_annotation_task/run_annotation_with_qual.py index 156bd618ab..609eb13d2d 100644 --- a/droidlet/tools/crowdsourcing/vision_annotation_task/run_annotation_with_qual.py +++ b/droidlet/tools/crowdsourcing/vision_annotation_task/run_annotation_with_qual.py @@ -47,7 +47,6 @@ class TestScriptConfig(RunScriptConfig): @hydra.main(config_name="scriptconfig") def main(cfg: DictConfig) -> None: - shared_state = SharedStaticTaskState( qualifications=[ make_qualification_dict(ALLOWLIST_QUALIFICATION, QUAL_EXISTS, None), diff --git a/droidlet/tools/crowdsourcing/vision_annotation_task/run_labeling_with_qual.py b/droidlet/tools/crowdsourcing/vision_annotation_task/run_labeling_with_qual.py index 3933ba2ee3..1a6b6944f2 100644 --- a/droidlet/tools/crowdsourcing/vision_annotation_task/run_labeling_with_qual.py +++ b/droidlet/tools/crowdsourcing/vision_annotation_task/run_labeling_with_qual.py @@ -47,7 +47,6 @@ class TestScriptConfig(RunScriptConfig): @hydra.main(config_name="scriptconfig") def main(cfg: DictConfig) -> None: - shared_state = SharedStaticTaskState( qualifications=[ make_qualification_dict(ALLOWLIST_QUALIFICATION, QUAL_EXISTS, None), diff --git a/droidlet/tools/hitl/vision_retrain/recover_unannotated_scenes.py b/droidlet/tools/hitl/vision_retrain/recover_unannotated_scenes.py index b18efc0bed..1cddd6e305 100644 --- a/droidlet/tools/hitl/vision_retrain/recover_unannotated_scenes.py +++ b/droidlet/tools/hitl/vision_retrain/recover_unannotated_scenes.py @@ -23,7 +23,6 @@ def main(batch_id: int): - # Pull the completed annotated list from .hitl/batch_id print("Retrieving nominally annotated scene list") anno_dir = os.path.join(HITL_TMP_DIR, f"{batch_id}/annotated_scenes") diff --git a/droidlet/tools/hitl/vision_retrain/vision_labeling_jobs.py b/droidlet/tools/hitl/vision_retrain/vision_labeling_jobs.py index a723572041..85fa4fa408 100644 --- a/droidlet/tools/hitl/vision_retrain/vision_labeling_jobs.py +++ b/droidlet/tools/hitl/vision_retrain/vision_labeling_jobs.py @@ -88,7 +88,6 @@ def __init__( self._use_basic_shapes = use_basic_shapes def run(self) -> None: - os.makedirs(f"{HITL_TMP_DIR}/{self._batch_id}/vision_labeling", exist_ok=True) try: